Path: blob/master/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp
40949 views
/*1* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.2* Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.3* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.4*5* This code is free software; you can redistribute it and/or modify it6* under the terms of the GNU General Public License version 2 only, as7* published by the Free Software Foundation.8*9* This code is distributed in the hope that it will be useful, but WITHOUT10* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License12* version 2 for more details (a copy is included in the LICENSE file that13* accompanied this code).14*15* You should have received a copy of the GNU General Public License version16* 2 along with this work; if not, write to the Free Software Foundation,17* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.18*19* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA20* or visit www.oracle.com if you need additional information or have any21* questions.22*23*/2425#ifndef OS_CPU_BSD_ZERO_ATOMIC_BSD_ZERO_HPP26#define OS_CPU_BSD_ZERO_ATOMIC_BSD_ZERO_HPP2728#include "orderAccess_bsd_zero.hpp"29#include "runtime/os.hpp"3031// Implementation of class atomic3233#ifdef M68K3435/*36* __m68k_cmpxchg37*38* Atomically store newval in *ptr if *ptr is equal to oldval for user space.39* Returns newval on success and oldval if no exchange happened.40* This implementation is processor specific and works on41* 68020 68030 68040 and 68060.42*43* It will not work on ColdFire, 68000 and 68010 since they lack the CAS44* instruction.45* Using a kernelhelper would be better for arch complete implementation.46*47*/4849static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {50int ret;51__asm __volatile ("cas%.l %0,%2,%1"52: "=d" (ret), "+m" (*(ptr))53: "d" (newval), "0" (oldval));54return ret;55}5657/* Perform an atomic compare and swap: if the current value of `*PTR'58is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of59`*PTR' before the operation.*/60static inline int m68k_compare_and_swap(int newval,61volatile int *ptr,62int oldval) {63for (;;) {64int prev = *ptr;65if (prev != oldval)66return prev;6768if (__m68k_cmpxchg (prev, newval, ptr) == newval)69// Success.70return prev;7172// We failed even though prev == oldval. Try again.73}74}7576/* Atomically add an int to memory. */77static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) {78for (;;) {79// Loop until success.8081int prev = *ptr;8283if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)84return prev + add_value;85}86}8788/* Atomically write VALUE into `*PTR' and returns the previous89contents of `*PTR'. */90static inline int m68k_lock_test_and_set(int newval, volatile int *ptr) {91for (;;) {92// Loop until success.93int prev = *ptr;9495if (__m68k_cmpxchg (prev, newval, ptr) == prev)96return prev;97}98}99#endif // M68K100101#ifdef ARM102103/*104* __kernel_cmpxchg105*106* Atomically store newval in *ptr if *ptr is equal to oldval for user space.107* Return zero if *ptr was changed or non-zero if no exchange happened.108* The C flag is also set if *ptr was changed to allow for assembly109* optimization in the calling code.110*111*/112113typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);114#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)115116117118/* Perform an atomic compare and swap: if the current value of `*PTR'119is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of120`*PTR' before the operation.*/121static inline int arm_compare_and_swap(int newval,122volatile int *ptr,123int oldval) {124for (;;) {125int prev = *ptr;126if (prev != oldval)127return prev;128129if (__kernel_cmpxchg (prev, newval, ptr) == 0)130// Success.131return prev;132133// We failed even though prev == oldval. Try again.134}135}136137/* Atomically add an int to memory. */138static inline int arm_add_and_fetch(int add_value, volatile int *ptr) {139for (;;) {140// Loop until a __kernel_cmpxchg succeeds.141142int prev = *ptr;143144if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)145return prev + add_value;146}147}148149/* Atomically write VALUE into `*PTR' and returns the previous150contents of `*PTR'. */151static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {152for (;;) {153// Loop until a __kernel_cmpxchg succeeds.154int prev = *ptr;155156if (__kernel_cmpxchg (prev, newval, ptr) == 0)157return prev;158}159}160#endif // ARM161162template<size_t byte_size>163struct Atomic::PlatformAdd {164template<typename D, typename I>165D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;166167template<typename D, typename I>168D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {169return add_and_fetch(dest, add_value, order) - add_value;170}171};172173template<>174template<typename D, typename I>175inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,176atomic_memory_order order) const {177STATIC_ASSERT(4 == sizeof(I));178STATIC_ASSERT(4 == sizeof(D));179180#ifdef ARM181return add_using_helper<int>(arm_add_and_fetch, dest, add_value);182#else183#ifdef M68K184return add_using_helper<int>(m68k_add_and_fetch, dest, add_value);185#else186D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);187FULL_MEM_BARRIER;188return res;189#endif // M68K190#endif // ARM191}192193template<>194template<typename D, typename I>195inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,196atomic_memory_order order) const {197STATIC_ASSERT(8 == sizeof(I));198STATIC_ASSERT(8 == sizeof(D));199200D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);201FULL_MEM_BARRIER;202return res;203}204205template<>206template<typename T>207inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,208T exchange_value,209atomic_memory_order order) const {210STATIC_ASSERT(4 == sizeof(T));211#ifdef ARM212return xchg_using_helper<int>(arm_lock_test_and_set, dest, exchange_value);213#else214#ifdef M68K215return xchg_using_helper<int>(m68k_lock_test_and_set, dest, exchange_value);216#else217// __sync_lock_test_and_set is a bizarrely named atomic exchange218// operation. Note that some platforms only support this with the219// limitation that the only valid value to store is the immediate220// constant 1. There is a test for this in JNI_CreateJavaVM().221T result = __sync_lock_test_and_set (dest, exchange_value);222// All atomic operations are expected to be full memory barriers223// (see atomic.hpp). However, __sync_lock_test_and_set is not224// a full memory barrier, but an acquire barrier. Hence, this added225// barrier. Some platforms (notably ARM) have peculiarities with226// their barrier implementations, delegate it to OrderAccess.227OrderAccess::fence();228return result;229#endif // M68K230#endif // ARM231}232233template<>234template<typename T>235inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,236T exchange_value,237atomic_memory_order order) const {238STATIC_ASSERT(8 == sizeof(T));239T result = __sync_lock_test_and_set (dest, exchange_value);240OrderAccess::fence();241return result;242}243244// No direct support for cmpxchg of bytes; emulate using int.245template<>246struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};247248template<>249template<typename T>250inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,251T compare_value,252T exchange_value,253atomic_memory_order order) const {254STATIC_ASSERT(4 == sizeof(T));255#ifdef ARM256return cmpxchg_using_helper<int>(arm_compare_and_swap, dest, compare_value, exchange_value);257#else258#ifdef M68K259return cmpxchg_using_helper<int>(m68k_compare_and_swap, dest, compare_value, exchange_value);260#else261T value = compare_value;262FULL_MEM_BARRIER;263__atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,264__ATOMIC_RELAXED, __ATOMIC_RELAXED);265FULL_MEM_BARRIER;266return value;267#endif // M68K268#endif // ARM269}270271template<>272template<typename T>273inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,274T compare_value,275T exchange_value,276atomic_memory_order order) const {277STATIC_ASSERT(8 == sizeof(T));278279T value = compare_value;280FULL_MEM_BARRIER;281__atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,282__ATOMIC_RELAXED, __ATOMIC_RELAXED);283FULL_MEM_BARRIER;284return value;285}286287template<>288template<typename T>289inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {290STATIC_ASSERT(8 == sizeof(T));291volatile int64_t dest;292os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));293return PrimitiveConversions::cast<T>(dest);294}295296template<>297template<typename T>298inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,299T store_value) const {300STATIC_ASSERT(8 == sizeof(T));301os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));302}303304#endif // OS_CPU_BSD_ZERO_ATOMIC_BSD_ZERO_HPP305306307