Path: blob/master/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp
40931 views
/*1* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.2* Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.3* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.4*5* This code is free software; you can redistribute it and/or modify it6* under the terms of the GNU General Public License version 2 only, as7* published by the Free Software Foundation.8*9* This code is distributed in the hope that it will be useful, but WITHOUT10* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License12* version 2 for more details (a copy is included in the LICENSE file that13* accompanied this code).14*15* You should have received a copy of the GNU General Public License version16* 2 along with this work; if not, write to the Free Software Foundation,17* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.18*19* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA20* or visit www.oracle.com if you need additional information or have any21* questions.22*23*/2425#ifndef OS_CPU_LINUX_ZERO_ATOMIC_LINUX_ZERO_HPP26#define OS_CPU_LINUX_ZERO_ATOMIC_LINUX_ZERO_HPP2728#include "orderAccess_linux_zero.hpp"29#include "runtime/os.hpp"3031// Implementation of class atomic3233template<size_t byte_size>34struct Atomic::PlatformAdd {35template<typename D, typename I>36D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;3738template<typename D, typename I>39D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {40return add_and_fetch(dest, add_value, order) - add_value;41}42};4344template<>45template<typename D, typename I>46inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,47atomic_memory_order order) const {48STATIC_ASSERT(4 == sizeof(I));49STATIC_ASSERT(4 == sizeof(D));5051D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);52FULL_MEM_BARRIER;53return res;54}5556template<>57template<typename D, typename I>58inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,59atomic_memory_order order) const {60STATIC_ASSERT(8 == sizeof(I));61STATIC_ASSERT(8 == sizeof(D));6263D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);64FULL_MEM_BARRIER;65return res;66}6768template<>69template<typename T>70inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,71T exchange_value,72atomic_memory_order order) const {73STATIC_ASSERT(4 == sizeof(T));74// __sync_lock_test_and_set is a bizarrely named atomic exchange75// operation. Note that some platforms only support this with the76// limitation that the only valid value to store is the immediate77// constant 1. There is a test for this in JNI_CreateJavaVM().78T result = __sync_lock_test_and_set (dest, exchange_value);79// All atomic operations are expected to be full memory barriers80// (see atomic.hpp). However, __sync_lock_test_and_set is not81// a full memory barrier, but an acquire barrier. Hence, this added82// barrier. Some platforms (notably ARM) have peculiarities with83// their barrier implementations, delegate it to OrderAccess.84OrderAccess::fence();85return result;86}8788template<>89template<typename T>90inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,91T exchange_value,92atomic_memory_order order) const {93STATIC_ASSERT(8 == sizeof(T));94T result = __sync_lock_test_and_set (dest, exchange_value);95OrderAccess::fence();96return result;97}9899// No direct support for cmpxchg of bytes; emulate using int.100template<>101struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};102103template<>104template<typename T>105inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,106T compare_value,107T exchange_value,108atomic_memory_order order) const {109STATIC_ASSERT(4 == sizeof(T));110111T value = compare_value;112FULL_MEM_BARRIER;113__atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,114__ATOMIC_RELAXED, __ATOMIC_RELAXED);115FULL_MEM_BARRIER;116return value;117}118119template<>120template<typename T>121inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,122T compare_value,123T exchange_value,124atomic_memory_order order) const {125STATIC_ASSERT(8 == sizeof(T));126127FULL_MEM_BARRIER;128T value = compare_value;129__atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,130__ATOMIC_RELAXED, __ATOMIC_RELAXED);131FULL_MEM_BARRIER;132return value;133}134135template<>136template<typename T>137inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {138STATIC_ASSERT(8 == sizeof(T));139volatile int64_t dest;140os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));141return PrimitiveConversions::cast<T>(dest);142}143144template<>145template<typename T>146inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,147T store_value) const {148STATIC_ASSERT(8 == sizeof(T));149os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));150}151152#endif // OS_CPU_LINUX_ZERO_ATOMIC_LINUX_ZERO_HPP153154155