Path: blob/master/src/hotspot/share/runtime/atomic.hpp
64440 views
/*1* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef SHARE_RUNTIME_ATOMIC_HPP25#define SHARE_RUNTIME_ATOMIC_HPP2627#include "memory/allocation.hpp"28#include "metaprogramming/conditional.hpp"29#include "metaprogramming/enableIf.hpp"30#include "metaprogramming/isIntegral.hpp"31#include "metaprogramming/isPointer.hpp"32#include "metaprogramming/isSame.hpp"33#include "metaprogramming/primitiveConversions.hpp"34#include "metaprogramming/removeCV.hpp"35#include "metaprogramming/removePointer.hpp"36#include "runtime/orderAccess.hpp"37#include "utilities/align.hpp"38#include "utilities/bytes.hpp"39#include "utilities/macros.hpp"40#include <type_traits>4142enum atomic_memory_order {43// The modes that align with C++11 are intended to44// follow the same semantics.45memory_order_relaxed = 0,46memory_order_acquire = 2,47memory_order_release = 3,48memory_order_acq_rel = 4,49memory_order_seq_cst = 5,50// Strong two-way memory barrier.51memory_order_conservative = 852};5354enum ScopedFenceType {55X_ACQUIRE56, RELEASE_X57, RELEASE_X_FENCE58};5960class Atomic : AllStatic {61public:62// Atomic operations on int64 types are not available on all 32-bit63// platforms. If atomic ops on int64 are defined here they must only64// be used from code that verifies they are available at runtime and65// can provide an alternative action if not - see supports_cx8() for66// a means to test availability.6768// The memory operations that are mentioned with each of the atomic69// function families come from src/share/vm/runtime/orderAccess.hpp,70// e.g., <fence> is described in that file and is implemented by the71// OrderAccess::fence() function. See that file for the gory details72// on the Memory Access Ordering Model.7374// All of the atomic operations that imply a read-modify-write action75// guarantee a two-way memory barrier across that operation. Historically76// these semantics reflect the strength of atomic operations that are77// provided on SPARC/X86. We assume that strength is necessary unless78// we can prove that a weaker form is sufficiently safe.7980// Atomically store to a location81// The type T must be either a pointer type convertible to or equal82// to D, an integral/enum type equal to D, or a type equal to D that83// is primitive convertible using PrimitiveConversions.84template<typename D, typename T>85inline static void store(volatile D* dest, T store_value);8687template <typename D, typename T>88inline static void release_store(volatile D* dest, T store_value);8990template <typename D, typename T>91inline static void release_store_fence(volatile D* dest, T store_value);9293// Atomically load from a location94// The type T must be either a pointer type, an integral/enum type,95// or a type that is primitive convertible using PrimitiveConversions.96template<typename T>97inline static T load(const volatile T* dest);9899template <typename T>100inline static T load_acquire(const volatile T* dest);101102// Atomically add to a location. *add*() provide:103// <fence> add-value-to-dest <membar StoreLoad|StoreStore>104105// Returns updated value.106template<typename D, typename I>107inline static D add(D volatile* dest, I add_value,108atomic_memory_order order = memory_order_conservative);109110// Returns previous value.111template<typename D, typename I>112inline static D fetch_and_add(D volatile* dest, I add_value,113atomic_memory_order order = memory_order_conservative);114115template<typename D, typename I>116inline static D sub(D volatile* dest, I sub_value,117atomic_memory_order order = memory_order_conservative);118119// Atomically increment location. inc() provide:120// <fence> increment-dest <membar StoreLoad|StoreStore>121// The type D may be either a pointer type, or an integral122// type. If it is a pointer type, then the increment is123// scaled to the size of the type pointed to by the pointer.124template<typename D>125inline static void inc(D volatile* dest,126atomic_memory_order order = memory_order_conservative);127128// Atomically decrement a location. dec() provide:129// <fence> decrement-dest <membar StoreLoad|StoreStore>130// The type D may be either a pointer type, or an integral131// type. If it is a pointer type, then the decrement is132// scaled to the size of the type pointed to by the pointer.133template<typename D>134inline static void dec(D volatile* dest,135atomic_memory_order order = memory_order_conservative);136137// Performs atomic exchange of *dest with exchange_value. Returns old138// prior value of *dest. xchg*() provide:139// <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>140// The type T must be either a pointer type convertible to or equal141// to D, an integral/enum type equal to D, or a type equal to D that142// is primitive convertible using PrimitiveConversions.143template<typename D, typename T>144inline static D xchg(volatile D* dest, T exchange_value,145atomic_memory_order order = memory_order_conservative);146147// Performs atomic compare of *dest and compare_value, and exchanges148// *dest with exchange_value if the comparison succeeded. Returns prior149// value of *dest. cmpxchg*() provide:150// <fence> compare-and-exchange <membar StoreLoad|StoreStore>151152template<typename D, typename U, typename T>153inline static D cmpxchg(D volatile* dest,154U compare_value,155T exchange_value,156atomic_memory_order order = memory_order_conservative);157158// Performs atomic compare of *dest and NULL, and replaces *dest159// with exchange_value if the comparison succeeded. Returns true if160// the comparison succeeded and the exchange occurred. This is161// often used as part of lazy initialization, as a lock-free162// alternative to the Double-Checked Locking Pattern.163template<typename D, typename T>164inline static bool replace_if_null(D* volatile* dest, T* value,165atomic_memory_order order = memory_order_conservative);166167private:168WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointerConvertible is declared private169// Test whether From is implicitly convertible to To.170// From and To must be pointer types.171// Note: Provides the limited subset of C++11 std::is_convertible172// that is needed here.173template<typename From, typename To> struct IsPointerConvertible;174175protected:176// Dispatch handler for store. Provides type-based validity177// checking and limited conversions around calls to the platform-178// specific implementation layer provided by PlatformOp.179template<typename D, typename T, typename PlatformOp, typename Enable = void>180struct StoreImpl;181182// Platform-specific implementation of store. Support for sizes183// of 1, 2, 4, and (if different) pointer size bytes are required.184// The class is a function object that must be default constructable,185// with these requirements:186//187// either:188// - dest is of type D*, an integral, enum or pointer type.189// - new_value are of type T, an integral, enum or pointer type D or190// pointer type convertible to D.191// or:192// - T and D are the same and are primitive convertible using PrimitiveConversions193// and either way:194// - platform_store is an object of type PlatformStore<sizeof(T)>.195//196// Then197// platform_store(new_value, dest)198// must be a valid expression.199//200// The default implementation is a volatile store. If a platform201// requires more for e.g. 64 bit stores, a specialization is required202template<size_t byte_size> struct PlatformStore;203204// Dispatch handler for load. Provides type-based validity205// checking and limited conversions around calls to the platform-206// specific implementation layer provided by PlatformOp.207template<typename T, typename PlatformOp, typename Enable = void>208struct LoadImpl;209210// Platform-specific implementation of load. Support for sizes of211// 1, 2, 4 bytes and (if different) pointer size bytes are required.212// The class is a function object that must be default213// constructable, with these requirements:214//215// - dest is of type T*, an integral, enum or pointer type, or216// T is convertible to a primitive type using PrimitiveConversions217// - platform_load is an object of type PlatformLoad<sizeof(T)>.218//219// Then220// platform_load(src)221// must be a valid expression, returning a result convertible to T.222//223// The default implementation is a volatile load. If a platform224// requires more for e.g. 64 bit loads, a specialization is required225template<size_t byte_size> struct PlatformLoad;226227// Give platforms a variation point to specialize.228template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore;229template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad;230231private:232// Dispatch handler for add. Provides type-based validity checking233// and limited conversions around calls to the platform-specific234// implementation layer provided by PlatformAdd.235template<typename D, typename I, typename Enable = void>236struct AddImpl;237238// Platform-specific implementation of add. Support for sizes of 4239// bytes and (if different) pointer size bytes are required. The240// class must be default constructable, with these requirements:241//242// - dest is of type D*, an integral or pointer type.243// - add_value is of type I, an integral type.244// - sizeof(I) == sizeof(D).245// - if D is an integral type, I == D.246// - order is of type atomic_memory_order.247// - platform_add is an object of type PlatformAdd<sizeof(D)>.248//249// Then both250// platform_add.add_and_fetch(dest, add_value, order)251// platform_add.fetch_and_add(dest, add_value, order)252// must be valid expressions returning a result convertible to D.253//254// add_and_fetch atomically adds add_value to the value of dest,255// returning the new value.256//257// fetch_and_add atomically adds add_value to the value of dest,258// returning the old value.259//260// When D is a pointer type P*, both add_and_fetch and fetch_and_add261// treat it as if it were an uintptr_t; they do not perform any262// scaling of add_value, as that has already been done by the caller.263//264// No definition is provided; all platforms must explicitly define265// this class and any needed specializations.266template<size_t byte_size> struct PlatformAdd;267268// Support for platforms that implement some variants of add using a269// (typically out of line) non-template helper function. The270// generic arguments passed to PlatformAdd need to be translated to271// the appropriate type for the helper function, the helper function272// invoked on the translated arguments, and the result translated273// back. Type is the parameter / return type of the helper274// function. No scaling of add_value is performed when D is a pointer275// type, so this function can be used to implement the support function276// required by AddAndFetch.277template<typename Type, typename Fn, typename D, typename I>278static D add_using_helper(Fn fn, D volatile* dest, I add_value);279280// Dispatch handler for cmpxchg. Provides type-based validity281// checking and limited conversions around calls to the282// platform-specific implementation layer provided by283// PlatformCmpxchg.284template<typename D, typename U, typename T, typename Enable = void>285struct CmpxchgImpl;286287// Platform-specific implementation of cmpxchg. Support for sizes288// of 1, 4, and 8 are required. The class is a function object that289// must be default constructable, with these requirements:290//291// - dest is of type T*.292// - exchange_value and compare_value are of type T.293// - order is of type atomic_memory_order.294// - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>.295//296// Then297// platform_cmpxchg(dest, compare_value, exchange_value, order)298// must be a valid expression, returning a result convertible to T.299//300// A default definition is provided, which declares a function template301// T operator()(T volatile*, T, T, atomic_memory_order) const302//303// For each required size, a platform must either provide an304// appropriate definition of that function, or must entirely305// specialize the class template for that size.306template<size_t byte_size> struct PlatformCmpxchg;307308// Support for platforms that implement some variants of cmpxchg309// using a (typically out of line) non-template helper function.310// The generic arguments passed to PlatformCmpxchg need to be311// translated to the appropriate type for the helper function, the312// helper invoked on the translated arguments, and the result313// translated back. Type is the parameter / return type of the314// helper function.315template<typename Type, typename Fn, typename T>316static T cmpxchg_using_helper(Fn fn,317T volatile* dest,318T compare_value,319T exchange_value);320321// Support platforms that do not provide Read-Modify-Write322// byte-level atomic access. To use, derive PlatformCmpxchg<1> from323// this class.324public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.325struct CmpxchgByteUsingInt;326private:327328// Dispatch handler for xchg. Provides type-based validity329// checking and limited conversions around calls to the330// platform-specific implementation layer provided by331// PlatformXchg.332template<typename D, typename T, typename Enable = void>333struct XchgImpl;334335// Platform-specific implementation of xchg. Support for sizes336// of 4, and sizeof(intptr_t) are required. The class is a function337// object that must be default constructable, with these requirements:338//339// - dest is of type T*.340// - exchange_value is of type T.341// - platform_xchg is an object of type PlatformXchg<sizeof(T)>.342//343// Then344// platform_xchg(dest, exchange_value)345// must be a valid expression, returning a result convertible to T.346//347// A default definition is provided, which declares a function template348// T operator()(T volatile*, T, atomic_memory_order) const349//350// For each required size, a platform must either provide an351// appropriate definition of that function, or must entirely352// specialize the class template for that size.353template<size_t byte_size> struct PlatformXchg;354355// Support for platforms that implement some variants of xchg356// using a (typically out of line) non-template helper function.357// The generic arguments passed to PlatformXchg need to be358// translated to the appropriate type for the helper function, the359// helper invoked on the translated arguments, and the result360// translated back. Type is the parameter / return type of the361// helper function.362template<typename Type, typename Fn, typename T>363static T xchg_using_helper(Fn fn,364T volatile* dest,365T exchange_value);366};367368template<typename From, typename To>369struct Atomic::IsPointerConvertible<From*, To*> : AllStatic {370// Determine whether From* is implicitly convertible to To*, using371// the "sizeof trick".372typedef char yes;373typedef char (&no)[2];374375static yes test(To*);376static no test(...);377static From* test_value;378379static const bool value = (sizeof(yes) == sizeof(test(test_value)));380};381382// Handle load for pointer, integral and enum types.383template<typename T, typename PlatformOp>384struct Atomic::LoadImpl<385T,386PlatformOp,387typename EnableIf<IsIntegral<T>::value || std::is_enum<T>::value || IsPointer<T>::value>::type>388{389T operator()(T const volatile* dest) const {390// Forward to the platform handler for the size of T.391return PlatformOp()(dest);392}393};394395// Handle load for types that have a translator.396//397// All the involved types must be identical.398//399// This translates the original call into a call on the decayed400// arguments, and returns the recovered result of that translated401// call.402template<typename T, typename PlatformOp>403struct Atomic::LoadImpl<404T,405PlatformOp,406typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>407{408T operator()(T const volatile* dest) const {409typedef PrimitiveConversions::Translate<T> Translator;410typedef typename Translator::Decayed Decayed;411STATIC_ASSERT(sizeof(T) == sizeof(Decayed));412Decayed result = PlatformOp()(reinterpret_cast<Decayed const volatile*>(dest));413return Translator::recover(result);414}415};416417// Default implementation of atomic load if a specific platform418// does not provide a specialization for a certain size class.419// For increased safety, the default implementation only allows420// load types that are pointer sized or smaller. If a platform still421// supports wide atomics, then it has to use specialization422// of Atomic::PlatformLoad for that wider size class.423template<size_t byte_size>424struct Atomic::PlatformLoad {425template<typename T>426T operator()(T const volatile* dest) const {427STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization428return *dest;429}430};431432// Handle store for integral and enum types.433//434// All the involved types must be identical.435template<typename T, typename PlatformOp>436struct Atomic::StoreImpl<437T, T,438PlatformOp,439typename EnableIf<IsIntegral<T>::value || std::is_enum<T>::value>::type>440{441void operator()(T volatile* dest, T new_value) const {442// Forward to the platform handler for the size of T.443PlatformOp()(dest, new_value);444}445};446447// Handle store for pointer types.448//449// The new_value must be implicitly convertible to the450// destination's type; it must be type-correct to store the451// new_value in the destination.452template<typename D, typename T, typename PlatformOp>453struct Atomic::StoreImpl<454D*, T*,455PlatformOp,456typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>457{458void operator()(D* volatile* dest, T* new_value) const {459// Allow derived to base conversion, and adding cv-qualifiers.460D* value = new_value;461PlatformOp()(dest, value);462}463};464465// Handle store for types that have a translator.466//467// All the involved types must be identical.468//469// This translates the original call into a call on the decayed470// arguments.471template<typename T, typename PlatformOp>472struct Atomic::StoreImpl<473T, T,474PlatformOp,475typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>476{477void operator()(T volatile* dest, T new_value) const {478typedef PrimitiveConversions::Translate<T> Translator;479typedef typename Translator::Decayed Decayed;480STATIC_ASSERT(sizeof(T) == sizeof(Decayed));481PlatformOp()(reinterpret_cast<Decayed volatile*>(dest),482Translator::decay(new_value));483}484};485486// Default implementation of atomic store if a specific platform487// does not provide a specialization for a certain size class.488// For increased safety, the default implementation only allows489// storing types that are pointer sized or smaller. If a platform still490// supports wide atomics, then it has to use specialization491// of Atomic::PlatformStore for that wider size class.492template<size_t byte_size>493struct Atomic::PlatformStore {494template<typename T>495void operator()(T volatile* dest,496T new_value) const {497STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization498(void)const_cast<T&>(*dest = new_value);499}500};501502template<typename D>503inline void Atomic::inc(D volatile* dest, atomic_memory_order order) {504STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);505typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;506Atomic::add(dest, I(1), order);507}508509template<typename D>510inline void Atomic::dec(D volatile* dest, atomic_memory_order order) {511STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);512typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;513// Assumes two's complement integer representation.514#pragma warning(suppress: 4146)515Atomic::add(dest, I(-1), order);516}517518template<typename D, typename I>519inline D Atomic::sub(D volatile* dest, I sub_value, atomic_memory_order order) {520STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);521STATIC_ASSERT(IsIntegral<I>::value);522// If D is a pointer type, use [u]intptr_t as the addend type,523// matching signedness of I. Otherwise, use D as the addend type.524typedef typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type PI;525typedef typename Conditional<IsPointer<D>::value, PI, D>::type AddendType;526// Only allow conversions that can't change the value.527STATIC_ASSERT(IsSigned<I>::value == IsSigned<AddendType>::value);528STATIC_ASSERT(sizeof(I) <= sizeof(AddendType));529AddendType addend = sub_value;530// Assumes two's complement integer representation.531#pragma warning(suppress: 4146) // In case AddendType is not signed.532return Atomic::add(dest, -addend, order);533}534535// Define the class before including platform file, which may specialize536// the operator definition. No generic definition of specializations537// of the operator template are provided, nor are there any generic538// specializations of the class. The platform file is responsible for539// providing those.540template<size_t byte_size>541struct Atomic::PlatformCmpxchg {542template<typename T>543T operator()(T volatile* dest,544T compare_value,545T exchange_value,546atomic_memory_order order) const;547};548549// Define the class before including platform file, which may use this550// as a base class, requiring it be complete. The definition is later551// in this file, near the other definitions related to cmpxchg.552struct Atomic::CmpxchgByteUsingInt {553static uint8_t get_byte_in_int(uint32_t n, uint32_t idx);554static uint32_t set_byte_in_int(uint32_t n, uint8_t b, uint32_t idx);555template<typename T>556T operator()(T volatile* dest,557T compare_value,558T exchange_value,559atomic_memory_order order) const;560};561562// Define the class before including platform file, which may specialize563// the operator definition. No generic definition of specializations564// of the operator template are provided, nor are there any generic565// specializations of the class. The platform file is responsible for566// providing those.567template<size_t byte_size>568struct Atomic::PlatformXchg {569template<typename T>570T operator()(T volatile* dest,571T exchange_value,572atomic_memory_order order) const;573};574575template <ScopedFenceType T>576class ScopedFenceGeneral: public StackObj {577public:578void prefix() {}579void postfix() {}580};581582// The following methods can be specialized using simple template specialization583// in the platform specific files for optimization purposes. Otherwise the584// generalized variant is used.585586template<> inline void ScopedFenceGeneral<X_ACQUIRE>::postfix() { OrderAccess::acquire(); }587template<> inline void ScopedFenceGeneral<RELEASE_X>::prefix() { OrderAccess::release(); }588template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::prefix() { OrderAccess::release(); }589template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }590591template <ScopedFenceType T>592class ScopedFence : public ScopedFenceGeneral<T> {593void *const _field;594public:595ScopedFence(void *const field) : _field(field) { prefix(); }596~ScopedFence() { postfix(); }597void prefix() { ScopedFenceGeneral<T>::prefix(); }598void postfix() { ScopedFenceGeneral<T>::postfix(); }599};600601// platform specific in-line definitions - must come before shared definitions602603#include OS_CPU_HEADER(atomic)604605// shared in-line definitions606607// size_t casts...608#if (SIZE_MAX != UINTPTR_MAX)609#error size_t is not WORD_SIZE, interesting platform, but missing implementation here610#endif611612template<typename T>613inline T Atomic::load(const volatile T* dest) {614return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest);615}616617template<size_t byte_size, ScopedFenceType type>618struct Atomic::PlatformOrderedLoad {619template <typename T>620T operator()(const volatile T* p) const {621ScopedFence<type> f((void*)p);622return Atomic::load(p);623}624};625626template <typename T>627inline T Atomic::load_acquire(const volatile T* p) {628return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p);629}630631template<typename D, typename T>632inline void Atomic::store(volatile D* dest, T store_value) {633StoreImpl<D, T, PlatformStore<sizeof(D)> >()(dest, store_value);634}635636template<size_t byte_size, ScopedFenceType type>637struct Atomic::PlatformOrderedStore {638template <typename T>639void operator()(volatile T* p, T v) const {640ScopedFence<type> f((void*)p);641Atomic::store(p, v);642}643};644645template <typename D, typename T>646inline void Atomic::release_store(volatile D* p, T v) {647StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(p, v);648}649650template <typename D, typename T>651inline void Atomic::release_store_fence(volatile D* p, T v) {652StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(p, v);653}654655template<typename D, typename I>656inline D Atomic::add(D volatile* dest, I add_value,657atomic_memory_order order) {658return AddImpl<D, I>::add_and_fetch(dest, add_value, order);659}660661template<typename D, typename I>662inline D Atomic::fetch_and_add(D volatile* dest, I add_value,663atomic_memory_order order) {664return AddImpl<D, I>::fetch_and_add(dest, add_value, order);665}666667template<typename D, typename I>668struct Atomic::AddImpl<669D, I,670typename EnableIf<IsIntegral<I>::value &&671IsIntegral<D>::value &&672(sizeof(I) <= sizeof(D)) &&673(IsSigned<I>::value == IsSigned<D>::value)>::type>674{675static D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) {676D addend = add_value;677return PlatformAdd<sizeof(D)>().add_and_fetch(dest, addend, order);678}679static D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) {680D addend = add_value;681return PlatformAdd<sizeof(D)>().fetch_and_add(dest, addend, order);682}683};684685template<typename P, typename I>686struct Atomic::AddImpl<687P*, I,688typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>689{690STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));691STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));692typedef typename Conditional<IsSigned<I>::value,693intptr_t,694uintptr_t>::type CI;695696static CI scale_addend(CI add_value) {697return add_value * sizeof(P);698}699700static P* add_and_fetch(P* volatile* dest, I add_value, atomic_memory_order order) {701CI addend = add_value;702return PlatformAdd<sizeof(P*)>().add_and_fetch(dest, scale_addend(addend), order);703}704static P* fetch_and_add(P* volatile* dest, I add_value, atomic_memory_order order) {705CI addend = add_value;706return PlatformAdd<sizeof(P*)>().fetch_and_add(dest, scale_addend(addend), order);707}708};709710template<typename Type, typename Fn, typename D, typename I>711inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) {712return PrimitiveConversions::cast<D>(713fn(PrimitiveConversions::cast<Type>(add_value),714reinterpret_cast<Type volatile*>(dest)));715}716717template<typename D, typename U, typename T>718inline D Atomic::cmpxchg(D volatile* dest,719U compare_value,720T exchange_value,721atomic_memory_order order) {722return CmpxchgImpl<D, U, T>()(dest, compare_value, exchange_value, order);723}724725template<typename D, typename T>726inline bool Atomic::replace_if_null(D* volatile* dest, T* value,727atomic_memory_order order) {728// Presently using a trivial implementation in terms of cmpxchg.729// Consider adding platform support, to permit the use of compiler730// intrinsics like gcc's __sync_bool_compare_and_swap.731D* expected_null = NULL;732return expected_null == cmpxchg(dest, expected_null, value, order);733}734735// Handle cmpxchg for integral and enum types.736//737// All the involved types must be identical.738template<typename T>739struct Atomic::CmpxchgImpl<740T, T, T,741typename EnableIf<IsIntegral<T>::value || std::is_enum<T>::value>::type>742{743T operator()(T volatile* dest, T compare_value, T exchange_value,744atomic_memory_order order) const {745// Forward to the platform handler for the size of T.746return PlatformCmpxchg<sizeof(T)>()(dest,747compare_value,748exchange_value,749order);750}751};752753// Handle cmpxchg for pointer types.754//755// The destination's type and the compare_value type must be the same,756// ignoring cv-qualifiers; we don't care about the cv-qualifiers of757// the compare_value.758//759// The exchange_value must be implicitly convertible to the760// destination's type; it must be type-correct to store the761// exchange_value in the destination.762template<typename D, typename U, typename T>763struct Atomic::CmpxchgImpl<764D*, U*, T*,765typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value &&766IsSame<typename RemoveCV<D>::type,767typename RemoveCV<U>::type>::value>::type>768{769D* operator()(D* volatile* dest, U* compare_value, T* exchange_value,770atomic_memory_order order) const {771// Allow derived to base conversion, and adding cv-qualifiers.772D* new_value = exchange_value;773// Don't care what the CV qualifiers for compare_value are,774// but we need to match D* when calling platform support.775D* old_value = const_cast<D*>(compare_value);776return PlatformCmpxchg<sizeof(D*)>()(dest, old_value, new_value, order);777}778};779780// Handle cmpxchg for types that have a translator.781//782// All the involved types must be identical.783//784// This translates the original call into a call on the decayed785// arguments, and returns the recovered result of that translated786// call.787template<typename T>788struct Atomic::CmpxchgImpl<789T, T, T,790typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>791{792T operator()(T volatile* dest, T compare_value, T exchange_value,793atomic_memory_order order) const {794typedef PrimitiveConversions::Translate<T> Translator;795typedef typename Translator::Decayed Decayed;796STATIC_ASSERT(sizeof(T) == sizeof(Decayed));797return Translator::recover(798cmpxchg(reinterpret_cast<Decayed volatile*>(dest),799Translator::decay(compare_value),800Translator::decay(exchange_value),801order));802}803};804805template<typename Type, typename Fn, typename T>806inline T Atomic::cmpxchg_using_helper(Fn fn,807T volatile* dest,808T compare_value,809T exchange_value) {810STATIC_ASSERT(sizeof(Type) == sizeof(T));811return PrimitiveConversions::cast<T>(812fn(PrimitiveConversions::cast<Type>(exchange_value),813reinterpret_cast<Type volatile*>(dest),814PrimitiveConversions::cast<Type>(compare_value)));815}816817inline uint32_t Atomic::CmpxchgByteUsingInt::set_byte_in_int(uint32_t n,818uint8_t b,819uint32_t idx) {820int bitsIdx = BitsPerByte * idx;821return (n & ~(static_cast<uint32_t>(0xff) << bitsIdx))822| (static_cast<uint32_t>(b) << bitsIdx);823}824825inline uint8_t Atomic::CmpxchgByteUsingInt::get_byte_in_int(uint32_t n,826uint32_t idx) {827int bitsIdx = BitsPerByte * idx;828return (uint8_t)(n >> bitsIdx);829}830831template<typename T>832inline T Atomic::CmpxchgByteUsingInt::operator()(T volatile* dest,833T compare_value,834T exchange_value,835atomic_memory_order order) const {836STATIC_ASSERT(sizeof(T) == sizeof(uint8_t));837uint8_t canon_exchange_value = exchange_value;838uint8_t canon_compare_value = compare_value;839volatile uint32_t* aligned_dest840= reinterpret_cast<volatile uint32_t*>(align_down(dest, sizeof(uint32_t)));841size_t offset = pointer_delta(dest, aligned_dest, 1);842843uint32_t idx = (Endian::NATIVE == Endian::BIG)844? (sizeof(uint32_t) - 1 - offset)845: offset;846847// current value may not be what we are looking for, so force it848// to that value so the initial cmpxchg will fail if it is different849uint32_t cur = set_byte_in_int(Atomic::load(aligned_dest), canon_compare_value, idx);850851// always execute a real cmpxchg so that we get the required memory852// barriers even on initial failure853do {854// value to swap in matches current value855// except for the one byte we want to update856uint32_t new_value = set_byte_in_int(cur, canon_exchange_value, idx);857858uint32_t res = cmpxchg(aligned_dest, cur, new_value, order);859if (res == cur) break; // success860861// at least one byte in the int changed value, so update862// our view of the current int863cur = res;864// if our byte is still as cur we loop and try again865} while (get_byte_in_int(cur, idx) == canon_compare_value);866867return PrimitiveConversions::cast<T>(get_byte_in_int(cur, idx));868}869870// Handle xchg for integral and enum types.871//872// All the involved types must be identical.873template<typename T>874struct Atomic::XchgImpl<875T, T,876typename EnableIf<IsIntegral<T>::value || std::is_enum<T>::value>::type>877{878T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const {879// Forward to the platform handler for the size of T.880return PlatformXchg<sizeof(T)>()(dest, exchange_value, order);881}882};883884// Handle xchg for pointer types.885//886// The exchange_value must be implicitly convertible to the887// destination's type; it must be type-correct to store the888// exchange_value in the destination.889template<typename D, typename T>890struct Atomic::XchgImpl<891D*, T*,892typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>893{894D* operator()(D* volatile* dest, T* exchange_value, atomic_memory_order order) const {895// Allow derived to base conversion, and adding cv-qualifiers.896D* new_value = exchange_value;897return PlatformXchg<sizeof(D*)>()(dest, new_value, order);898}899};900901// Handle xchg for types that have a translator.902//903// All the involved types must be identical.904//905// This translates the original call into a call on the decayed906// arguments, and returns the recovered result of that translated907// call.908template<typename T>909struct Atomic::XchgImpl<910T, T,911typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>912{913T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const {914typedef PrimitiveConversions::Translate<T> Translator;915typedef typename Translator::Decayed Decayed;916STATIC_ASSERT(sizeof(T) == sizeof(Decayed));917return Translator::recover(918xchg(reinterpret_cast<Decayed volatile*>(dest),919Translator::decay(exchange_value),920order));921}922};923924template<typename Type, typename Fn, typename T>925inline T Atomic::xchg_using_helper(Fn fn,926T volatile* dest,927T exchange_value) {928STATIC_ASSERT(sizeof(Type) == sizeof(T));929// Notice the swapped order of arguments. Change when/if stubs are rewritten.930return PrimitiveConversions::cast<T>(931fn(PrimitiveConversions::cast<Type>(exchange_value),932reinterpret_cast<Type volatile*>(dest)));933}934935template<typename D, typename T>936inline D Atomic::xchg(volatile D* dest, T exchange_value, atomic_memory_order order) {937return XchgImpl<D, T>()(dest, exchange_value, order);938}939940#endif // SHARE_RUNTIME_ATOMIC_HPP941942943