Path: blob/master/src/hotspot/os_cpu/bsd_aarch64/atomic_bsd_aarch64.hpp
64440 views
/*1* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.2* Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.3* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.4* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.5*6* This code is free software; you can redistribute it and/or modify it7* under the terms of the GNU General Public License version 2 only, as8* published by the Free Software Foundation.9*10* This code is distributed in the hope that it will be useful, but WITHOUT11* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or12* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License13* version 2 for more details (a copy is included in the LICENSE file that14* accompanied this code).15*16* You should have received a copy of the GNU General Public License version17* 2 along with this work; if not, write to the Free Software Foundation,18* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.19*20* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA21* or visit www.oracle.com if you need additional information or have any22* questions.23*24*/2526#ifndef OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP27#define OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP2829#include "utilities/debug.hpp"3031// Implementation of class atomic32// Note that memory_order_conservative requires a full barrier after atomic stores.33// See https://patchwork.kernel.org/patch/3575821/3435template<size_t byte_size>36struct Atomic::PlatformAdd {37template<typename D, typename I>38D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {39D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);40FULL_MEM_BARRIER;41return res;42}4344template<typename D, typename I>45D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {46return add_and_fetch(dest, add_value, order) - add_value;47}48};4950template<size_t byte_size>51template<typename T>52inline T Atomic::PlatformXchg<byte_size>::operator()(T volatile* dest,53T exchange_value,54atomic_memory_order order) const {55STATIC_ASSERT(byte_size == sizeof(T));56T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE);57FULL_MEM_BARRIER;58return res;59}6061template<size_t byte_size>62template<typename T>63inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T volatile* dest,64T compare_value,65T exchange_value,66atomic_memory_order order) const {67STATIC_ASSERT(byte_size == sizeof(T));68if (order == memory_order_conservative) {69T value = compare_value;70FULL_MEM_BARRIER;71__atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,72__ATOMIC_RELAXED, __ATOMIC_RELAXED);73FULL_MEM_BARRIER;74return value;75} else {76STATIC_ASSERT (77// The modes that align with C++11 are intended to78// follow the same semantics.79memory_order_relaxed == __ATOMIC_RELAXED &&80memory_order_acquire == __ATOMIC_ACQUIRE &&81memory_order_release == __ATOMIC_RELEASE &&82memory_order_acq_rel == __ATOMIC_ACQ_REL &&83memory_order_seq_cst == __ATOMIC_SEQ_CST);8485// Some sanity checking on the memory order. It makes no86// sense to have a release operation for a store that never87// happens.88int failure_memory_order;89switch (order) {90case memory_order_release:91failure_memory_order = memory_order_relaxed; break;92case memory_order_acq_rel:93failure_memory_order = memory_order_acquire; break;94default:95failure_memory_order = order;96}97assert(failure_memory_order <= order, "must be");9899T value = compare_value;100__atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,101order, failure_memory_order);102return value;103}104}105106template<size_t byte_size>107struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>108{109template <typename T>110T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }111};112113template<size_t byte_size>114struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X>115{116template <typename T>117void operator()(volatile T* p, T v) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }118};119120template<size_t byte_size>121struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>122{123template <typename T>124void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); }125};126127128#endif // OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP129130131