Path: blob/master/src/hotspot/os_cpu/bsd_aarch64/atomic_bsd_aarch64.hpp
40930 views
/*1* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.2* Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.3* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.4* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.5*6* This code is free software; you can redistribute it and/or modify it7* under the terms of the GNU General Public License version 2 only, as8* published by the Free Software Foundation.9*10* This code is distributed in the hope that it will be useful, but WITHOUT11* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or12* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License13* version 2 for more details (a copy is included in the LICENSE file that14* accompanied this code).15*16* You should have received a copy of the GNU General Public License version17* 2 along with this work; if not, write to the Free Software Foundation,18* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.19*20* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA21* or visit www.oracle.com if you need additional information or have any22* questions.23*24*/2526#ifndef OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP27#define OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP2829// Implementation of class atomic30// Note that memory_order_conservative requires a full barrier after atomic stores.31// See https://patchwork.kernel.org/patch/3575821/3233template<size_t byte_size>34struct Atomic::PlatformAdd {35template<typename D, typename I>36D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {37D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);38FULL_MEM_BARRIER;39return res;40}4142template<typename D, typename I>43D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {44return add_and_fetch(dest, add_value, order) - add_value;45}46};4748template<size_t byte_size>49template<typename T>50inline T Atomic::PlatformXchg<byte_size>::operator()(T volatile* dest,51T exchange_value,52atomic_memory_order order) const {53STATIC_ASSERT(byte_size == sizeof(T));54T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE);55FULL_MEM_BARRIER;56return res;57}5859template<size_t byte_size>60template<typename T>61inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T volatile* dest,62T compare_value,63T exchange_value,64atomic_memory_order order) const {65STATIC_ASSERT(byte_size == sizeof(T));66if (order == memory_order_relaxed) {67T value = compare_value;68__atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,69__ATOMIC_RELAXED, __ATOMIC_RELAXED);70return value;71} else {72T value = compare_value;73FULL_MEM_BARRIER;74__atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,75__ATOMIC_RELAXED, __ATOMIC_RELAXED);76FULL_MEM_BARRIER;77return value;78}79}8081template<size_t byte_size>82struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>83{84template <typename T>85T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }86};8788template<size_t byte_size>89struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X>90{91template <typename T>92void operator()(volatile T* p, T v) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }93};9495template<size_t byte_size>96struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>97{98template <typename T>99void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); }100};101102103#endif // OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP104105106