Path: blob/master/src/hotspot/share/runtime/atomic.hpp
40951 views
/*1* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef SHARE_RUNTIME_ATOMIC_HPP25#define SHARE_RUNTIME_ATOMIC_HPP2627#include "memory/allocation.hpp"28#include "metaprogramming/conditional.hpp"29#include "metaprogramming/enableIf.hpp"30#include "metaprogramming/isIntegral.hpp"31#include "metaprogramming/isPointer.hpp"32#include "metaprogramming/isSame.hpp"33#include "metaprogramming/primitiveConversions.hpp"34#include "metaprogramming/removeCV.hpp"35#include "metaprogramming/removePointer.hpp"36#include "runtime/orderAccess.hpp"37#include "utilities/align.hpp"38#include "utilities/bytes.hpp"39#include "utilities/macros.hpp"40#include <type_traits>4142enum atomic_memory_order {43// The modes that align with C++11 are intended to44// follow the same semantics.45memory_order_relaxed = 0,46memory_order_acquire = 2,47memory_order_release = 3,48memory_order_acq_rel = 4,49// Strong two-way memory barrier.50memory_order_conservative = 851};5253enum ScopedFenceType {54X_ACQUIRE55, RELEASE_X56, RELEASE_X_FENCE57};5859class Atomic : AllStatic {60public:61// Atomic operations on int64 types are not available on all 32-bit62// platforms. If atomic ops on int64 are defined here they must only63// be used from code that verifies they are available at runtime and64// can provide an alternative action if not - see supports_cx8() for65// a means to test availability.6667// The memory operations that are mentioned with each of the atomic68// function families come from src/share/vm/runtime/orderAccess.hpp,69// e.g., <fence> is described in that file and is implemented by the70// OrderAccess::fence() function. See that file for the gory details71// on the Memory Access Ordering Model.7273// All of the atomic operations that imply a read-modify-write action74// guarantee a two-way memory barrier across that operation. Historically75// these semantics reflect the strength of atomic operations that are76// provided on SPARC/X86. We assume that strength is necessary unless77// we can prove that a weaker form is sufficiently safe.7879// Atomically store to a location80// The type T must be either a pointer type convertible to or equal81// to D, an integral/enum type equal to D, or a type equal to D that82// is primitive convertible using PrimitiveConversions.83template<typename D, typename T>84inline static void store(volatile D* dest, T store_value);8586template <typename D, typename T>87inline static void release_store(volatile D* dest, T store_value);8889template <typename D, typename T>90inline static void release_store_fence(volatile D* dest, T store_value);9192// Atomically load from a location93// The type T must be either a pointer type, an integral/enum type,94// or a type that is primitive convertible using PrimitiveConversions.95template<typename T>96inline static T load(const volatile T* dest);9798template <typename T>99inline static T load_acquire(const volatile T* dest);100101// Atomically add to a location. *add*() provide:102// <fence> add-value-to-dest <membar StoreLoad|StoreStore>103104// Returns updated value.105template<typename D, typename I>106inline static D add(D volatile* dest, I add_value,107atomic_memory_order order = memory_order_conservative);108109// Returns previous value.110template<typename D, typename I>111inline static D fetch_and_add(D volatile* dest, I add_value,112atomic_memory_order order = memory_order_conservative);113114template<typename D, typename I>115inline static D sub(D volatile* dest, I sub_value,116atomic_memory_order order = memory_order_conservative);117118// Atomically increment location. inc() provide:119// <fence> increment-dest <membar StoreLoad|StoreStore>120// The type D may be either a pointer type, or an integral121// type. If it is a pointer type, then the increment is122// scaled to the size of the type pointed to by the pointer.123template<typename D>124inline static void inc(D volatile* dest,125atomic_memory_order order = memory_order_conservative);126127// Atomically decrement a location. dec() provide:128// <fence> decrement-dest <membar StoreLoad|StoreStore>129// The type D may be either a pointer type, or an integral130// type. If it is a pointer type, then the decrement is131// scaled to the size of the type pointed to by the pointer.132template<typename D>133inline static void dec(D volatile* dest,134atomic_memory_order order = memory_order_conservative);135136// Performs atomic exchange of *dest with exchange_value. Returns old137// prior value of *dest. xchg*() provide:138// <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>139// The type T must be either a pointer type convertible to or equal140// to D, an integral/enum type equal to D, or a type equal to D that141// is primitive convertible using PrimitiveConversions.142template<typename D, typename T>143inline static D xchg(volatile D* dest, T exchange_value,144atomic_memory_order order = memory_order_conservative);145146// Performs atomic compare of *dest and compare_value, and exchanges147// *dest with exchange_value if the comparison succeeded. Returns prior148// value of *dest. cmpxchg*() provide:149// <fence> compare-and-exchange <membar StoreLoad|StoreStore>150151template<typename D, typename U, typename T>152inline static D cmpxchg(D volatile* dest,153U compare_value,154T exchange_value,155atomic_memory_order order = memory_order_conservative);156157// Performs atomic compare of *dest and NULL, and replaces *dest158// with exchange_value if the comparison succeeded. Returns true if159// the comparison succeeded and the exchange occurred. This is160// often used as part of lazy initialization, as a lock-free161// alternative to the Double-Checked Locking Pattern.162template<typename D, typename T>163inline static bool replace_if_null(D* volatile* dest, T* value,164atomic_memory_order order = memory_order_conservative);165166private:167WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointerConvertible is declared private168// Test whether From is implicitly convertible to To.169// From and To must be pointer types.170// Note: Provides the limited subset of C++11 std::is_convertible171// that is needed here.172template<typename From, typename To> struct IsPointerConvertible;173174protected:175// Dispatch handler for store. Provides type-based validity176// checking and limited conversions around calls to the platform-177// specific implementation layer provided by PlatformOp.178template<typename D, typename T, typename PlatformOp, typename Enable = void>179struct StoreImpl;180181// Platform-specific implementation of store. Support for sizes182// of 1, 2, 4, and (if different) pointer size bytes are required.183// The class is a function object that must be default constructable,184// with these requirements:185//186// either:187// - dest is of type D*, an integral, enum or pointer type.188// - new_value are of type T, an integral, enum or pointer type D or189// pointer type convertible to D.190// or:191// - T and D are the same and are primitive convertible using PrimitiveConversions192// and either way:193// - platform_store is an object of type PlatformStore<sizeof(T)>.194//195// Then196// platform_store(new_value, dest)197// must be a valid expression.198//199// The default implementation is a volatile store. If a platform200// requires more for e.g. 64 bit stores, a specialization is required201template<size_t byte_size> struct PlatformStore;202203// Dispatch handler for load. Provides type-based validity204// checking and limited conversions around calls to the platform-205// specific implementation layer provided by PlatformOp.206template<typename T, typename PlatformOp, typename Enable = void>207struct LoadImpl;208209// Platform-specific implementation of load. Support for sizes of210// 1, 2, 4 bytes and (if different) pointer size bytes are required.211// The class is a function object that must be default212// constructable, with these requirements:213//214// - dest is of type T*, an integral, enum or pointer type, or215// T is convertible to a primitive type using PrimitiveConversions216// - platform_load is an object of type PlatformLoad<sizeof(T)>.217//218// Then219// platform_load(src)220// must be a valid expression, returning a result convertible to T.221//222// The default implementation is a volatile load. If a platform223// requires more for e.g. 64 bit loads, a specialization is required224template<size_t byte_size> struct PlatformLoad;225226// Give platforms a variation point to specialize.227template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore;228template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad;229230private:231// Dispatch handler for add. Provides type-based validity checking232// and limited conversions around calls to the platform-specific233// implementation layer provided by PlatformAdd.234template<typename D, typename I, typename Enable = void>235struct AddImpl;236237// Platform-specific implementation of add. Support for sizes of 4238// bytes and (if different) pointer size bytes are required. The239// class must be default constructable, with these requirements:240//241// - dest is of type D*, an integral or pointer type.242// - add_value is of type I, an integral type.243// - sizeof(I) == sizeof(D).244// - if D is an integral type, I == D.245// - order is of type atomic_memory_order.246// - platform_add is an object of type PlatformAdd<sizeof(D)>.247//248// Then both249// platform_add.add_and_fetch(dest, add_value, order)250// platform_add.fetch_and_add(dest, add_value, order)251// must be valid expressions returning a result convertible to D.252//253// add_and_fetch atomically adds add_value to the value of dest,254// returning the new value.255//256// fetch_and_add atomically adds add_value to the value of dest,257// returning the old value.258//259// When D is a pointer type P*, both add_and_fetch and fetch_and_add260// treat it as if it were an uintptr_t; they do not perform any261// scaling of add_value, as that has already been done by the caller.262//263// No definition is provided; all platforms must explicitly define264// this class and any needed specializations.265template<size_t byte_size> struct PlatformAdd;266267// Support for platforms that implement some variants of add using a268// (typically out of line) non-template helper function. The269// generic arguments passed to PlatformAdd need to be translated to270// the appropriate type for the helper function, the helper function271// invoked on the translated arguments, and the result translated272// back. Type is the parameter / return type of the helper273// function. No scaling of add_value is performed when D is a pointer274// type, so this function can be used to implement the support function275// required by AddAndFetch.276template<typename Type, typename Fn, typename D, typename I>277static D add_using_helper(Fn fn, D volatile* dest, I add_value);278279// Dispatch handler for cmpxchg. Provides type-based validity280// checking and limited conversions around calls to the281// platform-specific implementation layer provided by282// PlatformCmpxchg.283template<typename D, typename U, typename T, typename Enable = void>284struct CmpxchgImpl;285286// Platform-specific implementation of cmpxchg. Support for sizes287// of 1, 4, and 8 are required. The class is a function object that288// must be default constructable, with these requirements:289//290// - dest is of type T*.291// - exchange_value and compare_value are of type T.292// - order is of type atomic_memory_order.293// - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>.294//295// Then296// platform_cmpxchg(dest, compare_value, exchange_value, order)297// must be a valid expression, returning a result convertible to T.298//299// A default definition is provided, which declares a function template300// T operator()(T volatile*, T, T, atomic_memory_order) const301//302// For each required size, a platform must either provide an303// appropriate definition of that function, or must entirely304// specialize the class template for that size.305template<size_t byte_size> struct PlatformCmpxchg;306307// Support for platforms that implement some variants of cmpxchg308// using a (typically out of line) non-template helper function.309// The generic arguments passed to PlatformCmpxchg need to be310// translated to the appropriate type for the helper function, the311// helper invoked on the translated arguments, and the result312// translated back. Type is the parameter / return type of the313// helper function.314template<typename Type, typename Fn, typename T>315static T cmpxchg_using_helper(Fn fn,316T volatile* dest,317T compare_value,318T exchange_value);319320// Support platforms that do not provide Read-Modify-Write321// byte-level atomic access. To use, derive PlatformCmpxchg<1> from322// this class.323public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.324struct CmpxchgByteUsingInt;325private:326327// Dispatch handler for xchg. Provides type-based validity328// checking and limited conversions around calls to the329// platform-specific implementation layer provided by330// PlatformXchg.331template<typename D, typename T, typename Enable = void>332struct XchgImpl;333334// Platform-specific implementation of xchg. Support for sizes335// of 4, and sizeof(intptr_t) are required. The class is a function336// object that must be default constructable, with these requirements:337//338// - dest is of type T*.339// - exchange_value is of type T.340// - platform_xchg is an object of type PlatformXchg<sizeof(T)>.341//342// Then343// platform_xchg(dest, exchange_value)344// must be a valid expression, returning a result convertible to T.345//346// A default definition is provided, which declares a function template347// T operator()(T volatile*, T, atomic_memory_order) const348//349// For each required size, a platform must either provide an350// appropriate definition of that function, or must entirely351// specialize the class template for that size.352template<size_t byte_size> struct PlatformXchg;353354// Support for platforms that implement some variants of xchg355// using a (typically out of line) non-template helper function.356// The generic arguments passed to PlatformXchg need to be357// translated to the appropriate type for the helper function, the358// helper invoked on the translated arguments, and the result359// translated back. Type is the parameter / return type of the360// helper function.361template<typename Type, typename Fn, typename T>362static T xchg_using_helper(Fn fn,363T volatile* dest,364T exchange_value);365};366367template<typename From, typename To>368struct Atomic::IsPointerConvertible<From*, To*> : AllStatic {369// Determine whether From* is implicitly convertible to To*, using370// the "sizeof trick".371typedef char yes;372typedef char (&no)[2];373374static yes test(To*);375static no test(...);376static From* test_value;377378static const bool value = (sizeof(yes) == sizeof(test(test_value)));379};380381// Handle load for pointer, integral and enum types.382template<typename T, typename PlatformOp>383struct Atomic::LoadImpl<384T,385PlatformOp,386typename EnableIf<IsIntegral<T>::value || std::is_enum<T>::value || IsPointer<T>::value>::type>387{388T operator()(T const volatile* dest) const {389// Forward to the platform handler for the size of T.390return PlatformOp()(dest);391}392};393394// Handle load for types that have a translator.395//396// All the involved types must be identical.397//398// This translates the original call into a call on the decayed399// arguments, and returns the recovered result of that translated400// call.401template<typename T, typename PlatformOp>402struct Atomic::LoadImpl<403T,404PlatformOp,405typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>406{407T operator()(T const volatile* dest) const {408typedef PrimitiveConversions::Translate<T> Translator;409typedef typename Translator::Decayed Decayed;410STATIC_ASSERT(sizeof(T) == sizeof(Decayed));411Decayed result = PlatformOp()(reinterpret_cast<Decayed const volatile*>(dest));412return Translator::recover(result);413}414};415416// Default implementation of atomic load if a specific platform417// does not provide a specialization for a certain size class.418// For increased safety, the default implementation only allows419// load types that are pointer sized or smaller. If a platform still420// supports wide atomics, then it has to use specialization421// of Atomic::PlatformLoad for that wider size class.422template<size_t byte_size>423struct Atomic::PlatformLoad {424template<typename T>425T operator()(T const volatile* dest) const {426STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization427return *dest;428}429};430431// Handle store for integral and enum types.432//433// All the involved types must be identical.434template<typename T, typename PlatformOp>435struct Atomic::StoreImpl<436T, T,437PlatformOp,438typename EnableIf<IsIntegral<T>::value || std::is_enum<T>::value>::type>439{440void operator()(T volatile* dest, T new_value) const {441// Forward to the platform handler for the size of T.442PlatformOp()(dest, new_value);443}444};445446// Handle store for pointer types.447//448// The new_value must be implicitly convertible to the449// destination's type; it must be type-correct to store the450// new_value in the destination.451template<typename D, typename T, typename PlatformOp>452struct Atomic::StoreImpl<453D*, T*,454PlatformOp,455typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>456{457void operator()(D* volatile* dest, T* new_value) const {458// Allow derived to base conversion, and adding cv-qualifiers.459D* value = new_value;460PlatformOp()(dest, value);461}462};463464// Handle store for types that have a translator.465//466// All the involved types must be identical.467//468// This translates the original call into a call on the decayed469// arguments.470template<typename T, typename PlatformOp>471struct Atomic::StoreImpl<472T, T,473PlatformOp,474typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>475{476void operator()(T volatile* dest, T new_value) const {477typedef PrimitiveConversions::Translate<T> Translator;478typedef typename Translator::Decayed Decayed;479STATIC_ASSERT(sizeof(T) == sizeof(Decayed));480PlatformOp()(reinterpret_cast<Decayed volatile*>(dest),481Translator::decay(new_value));482}483};484485// Default implementation of atomic store if a specific platform486// does not provide a specialization for a certain size class.487// For increased safety, the default implementation only allows488// storing types that are pointer sized or smaller. If a platform still489// supports wide atomics, then it has to use specialization490// of Atomic::PlatformStore for that wider size class.491template<size_t byte_size>492struct Atomic::PlatformStore {493template<typename T>494void operator()(T volatile* dest,495T new_value) const {496STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization497(void)const_cast<T&>(*dest = new_value);498}499};500501template<typename D>502inline void Atomic::inc(D volatile* dest, atomic_memory_order order) {503STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);504typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;505Atomic::add(dest, I(1), order);506}507508template<typename D>509inline void Atomic::dec(D volatile* dest, atomic_memory_order order) {510STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);511typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;512// Assumes two's complement integer representation.513#pragma warning(suppress: 4146)514Atomic::add(dest, I(-1), order);515}516517template<typename D, typename I>518inline D Atomic::sub(D volatile* dest, I sub_value, atomic_memory_order order) {519STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);520STATIC_ASSERT(IsIntegral<I>::value);521// If D is a pointer type, use [u]intptr_t as the addend type,522// matching signedness of I. Otherwise, use D as the addend type.523typedef typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type PI;524typedef typename Conditional<IsPointer<D>::value, PI, D>::type AddendType;525// Only allow conversions that can't change the value.526STATIC_ASSERT(IsSigned<I>::value == IsSigned<AddendType>::value);527STATIC_ASSERT(sizeof(I) <= sizeof(AddendType));528AddendType addend = sub_value;529// Assumes two's complement integer representation.530#pragma warning(suppress: 4146) // In case AddendType is not signed.531return Atomic::add(dest, -addend, order);532}533534// Define the class before including platform file, which may specialize535// the operator definition. No generic definition of specializations536// of the operator template are provided, nor are there any generic537// specializations of the class. The platform file is responsible for538// providing those.539template<size_t byte_size>540struct Atomic::PlatformCmpxchg {541template<typename T>542T operator()(T volatile* dest,543T compare_value,544T exchange_value,545atomic_memory_order order) const;546};547548// Define the class before including platform file, which may use this549// as a base class, requiring it be complete. The definition is later550// in this file, near the other definitions related to cmpxchg.551struct Atomic::CmpxchgByteUsingInt {552static uint8_t get_byte_in_int(uint32_t n, uint32_t idx);553static uint32_t set_byte_in_int(uint32_t n, uint8_t b, uint32_t idx);554template<typename T>555T operator()(T volatile* dest,556T compare_value,557T exchange_value,558atomic_memory_order order) const;559};560561// Define the class before including platform file, which may specialize562// the operator definition. No generic definition of specializations563// of the operator template are provided, nor are there any generic564// specializations of the class. The platform file is responsible for565// providing those.566template<size_t byte_size>567struct Atomic::PlatformXchg {568template<typename T>569T operator()(T volatile* dest,570T exchange_value,571atomic_memory_order order) const;572};573574template <ScopedFenceType T>575class ScopedFenceGeneral: public StackObj {576public:577void prefix() {}578void postfix() {}579};580581// The following methods can be specialized using simple template specialization582// in the platform specific files for optimization purposes. Otherwise the583// generalized variant is used.584585template<> inline void ScopedFenceGeneral<X_ACQUIRE>::postfix() { OrderAccess::acquire(); }586template<> inline void ScopedFenceGeneral<RELEASE_X>::prefix() { OrderAccess::release(); }587template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::prefix() { OrderAccess::release(); }588template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }589590template <ScopedFenceType T>591class ScopedFence : public ScopedFenceGeneral<T> {592void *const _field;593public:594ScopedFence(void *const field) : _field(field) { prefix(); }595~ScopedFence() { postfix(); }596void prefix() { ScopedFenceGeneral<T>::prefix(); }597void postfix() { ScopedFenceGeneral<T>::postfix(); }598};599600// platform specific in-line definitions - must come before shared definitions601602#include OS_CPU_HEADER(atomic)603604// shared in-line definitions605606// size_t casts...607#if (SIZE_MAX != UINTPTR_MAX)608#error size_t is not WORD_SIZE, interesting platform, but missing implementation here609#endif610611template<typename T>612inline T Atomic::load(const volatile T* dest) {613return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest);614}615616template<size_t byte_size, ScopedFenceType type>617struct Atomic::PlatformOrderedLoad {618template <typename T>619T operator()(const volatile T* p) const {620ScopedFence<type> f((void*)p);621return Atomic::load(p);622}623};624625template <typename T>626inline T Atomic::load_acquire(const volatile T* p) {627return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p);628}629630template<typename D, typename T>631inline void Atomic::store(volatile D* dest, T store_value) {632StoreImpl<D, T, PlatformStore<sizeof(D)> >()(dest, store_value);633}634635template<size_t byte_size, ScopedFenceType type>636struct Atomic::PlatformOrderedStore {637template <typename T>638void operator()(volatile T* p, T v) const {639ScopedFence<type> f((void*)p);640Atomic::store(p, v);641}642};643644template <typename D, typename T>645inline void Atomic::release_store(volatile D* p, T v) {646StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(p, v);647}648649template <typename D, typename T>650inline void Atomic::release_store_fence(volatile D* p, T v) {651StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(p, v);652}653654template<typename D, typename I>655inline D Atomic::add(D volatile* dest, I add_value,656atomic_memory_order order) {657return AddImpl<D, I>::add_and_fetch(dest, add_value, order);658}659660template<typename D, typename I>661inline D Atomic::fetch_and_add(D volatile* dest, I add_value,662atomic_memory_order order) {663return AddImpl<D, I>::fetch_and_add(dest, add_value, order);664}665666template<typename D, typename I>667struct Atomic::AddImpl<668D, I,669typename EnableIf<IsIntegral<I>::value &&670IsIntegral<D>::value &&671(sizeof(I) <= sizeof(D)) &&672(IsSigned<I>::value == IsSigned<D>::value)>::type>673{674static D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) {675D addend = add_value;676return PlatformAdd<sizeof(D)>().add_and_fetch(dest, addend, order);677}678static D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) {679D addend = add_value;680return PlatformAdd<sizeof(D)>().fetch_and_add(dest, addend, order);681}682};683684template<typename P, typename I>685struct Atomic::AddImpl<686P*, I,687typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>688{689STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));690STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));691typedef typename Conditional<IsSigned<I>::value,692intptr_t,693uintptr_t>::type CI;694695static CI scale_addend(CI add_value) {696return add_value * sizeof(P);697}698699static P* add_and_fetch(P* volatile* dest, I add_value, atomic_memory_order order) {700CI addend = add_value;701return PlatformAdd<sizeof(P*)>().add_and_fetch(dest, scale_addend(addend), order);702}703static P* fetch_and_add(P* volatile* dest, I add_value, atomic_memory_order order) {704CI addend = add_value;705return PlatformAdd<sizeof(P*)>().fetch_and_add(dest, scale_addend(addend), order);706}707};708709template<typename Type, typename Fn, typename D, typename I>710inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) {711return PrimitiveConversions::cast<D>(712fn(PrimitiveConversions::cast<Type>(add_value),713reinterpret_cast<Type volatile*>(dest)));714}715716template<typename D, typename U, typename T>717inline D Atomic::cmpxchg(D volatile* dest,718U compare_value,719T exchange_value,720atomic_memory_order order) {721return CmpxchgImpl<D, U, T>()(dest, compare_value, exchange_value, order);722}723724template<typename D, typename T>725inline bool Atomic::replace_if_null(D* volatile* dest, T* value,726atomic_memory_order order) {727// Presently using a trivial implementation in terms of cmpxchg.728// Consider adding platform support, to permit the use of compiler729// intrinsics like gcc's __sync_bool_compare_and_swap.730D* expected_null = NULL;731return expected_null == cmpxchg(dest, expected_null, value, order);732}733734// Handle cmpxchg for integral and enum types.735//736// All the involved types must be identical.737template<typename T>738struct Atomic::CmpxchgImpl<739T, T, T,740typename EnableIf<IsIntegral<T>::value || std::is_enum<T>::value>::type>741{742T operator()(T volatile* dest, T compare_value, T exchange_value,743atomic_memory_order order) const {744// Forward to the platform handler for the size of T.745return PlatformCmpxchg<sizeof(T)>()(dest,746compare_value,747exchange_value,748order);749}750};751752// Handle cmpxchg for pointer types.753//754// The destination's type and the compare_value type must be the same,755// ignoring cv-qualifiers; we don't care about the cv-qualifiers of756// the compare_value.757//758// The exchange_value must be implicitly convertible to the759// destination's type; it must be type-correct to store the760// exchange_value in the destination.761template<typename D, typename U, typename T>762struct Atomic::CmpxchgImpl<763D*, U*, T*,764typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value &&765IsSame<typename RemoveCV<D>::type,766typename RemoveCV<U>::type>::value>::type>767{768D* operator()(D* volatile* dest, U* compare_value, T* exchange_value,769atomic_memory_order order) const {770// Allow derived to base conversion, and adding cv-qualifiers.771D* new_value = exchange_value;772// Don't care what the CV qualifiers for compare_value are,773// but we need to match D* when calling platform support.774D* old_value = const_cast<D*>(compare_value);775return PlatformCmpxchg<sizeof(D*)>()(dest, old_value, new_value, order);776}777};778779// Handle cmpxchg for types that have a translator.780//781// All the involved types must be identical.782//783// This translates the original call into a call on the decayed784// arguments, and returns the recovered result of that translated785// call.786template<typename T>787struct Atomic::CmpxchgImpl<788T, T, T,789typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>790{791T operator()(T volatile* dest, T compare_value, T exchange_value,792atomic_memory_order order) const {793typedef PrimitiveConversions::Translate<T> Translator;794typedef typename Translator::Decayed Decayed;795STATIC_ASSERT(sizeof(T) == sizeof(Decayed));796return Translator::recover(797cmpxchg(reinterpret_cast<Decayed volatile*>(dest),798Translator::decay(compare_value),799Translator::decay(exchange_value),800order));801}802};803804template<typename Type, typename Fn, typename T>805inline T Atomic::cmpxchg_using_helper(Fn fn,806T volatile* dest,807T compare_value,808T exchange_value) {809STATIC_ASSERT(sizeof(Type) == sizeof(T));810return PrimitiveConversions::cast<T>(811fn(PrimitiveConversions::cast<Type>(exchange_value),812reinterpret_cast<Type volatile*>(dest),813PrimitiveConversions::cast<Type>(compare_value)));814}815816inline uint32_t Atomic::CmpxchgByteUsingInt::set_byte_in_int(uint32_t n,817uint8_t b,818uint32_t idx) {819int bitsIdx = BitsPerByte * idx;820return (n & ~(static_cast<uint32_t>(0xff) << bitsIdx))821| (static_cast<uint32_t>(b) << bitsIdx);822}823824inline uint8_t Atomic::CmpxchgByteUsingInt::get_byte_in_int(uint32_t n,825uint32_t idx) {826int bitsIdx = BitsPerByte * idx;827return (uint8_t)(n >> bitsIdx);828}829830template<typename T>831inline T Atomic::CmpxchgByteUsingInt::operator()(T volatile* dest,832T compare_value,833T exchange_value,834atomic_memory_order order) const {835STATIC_ASSERT(sizeof(T) == sizeof(uint8_t));836uint8_t canon_exchange_value = exchange_value;837uint8_t canon_compare_value = compare_value;838volatile uint32_t* aligned_dest839= reinterpret_cast<volatile uint32_t*>(align_down(dest, sizeof(uint32_t)));840size_t offset = pointer_delta(dest, aligned_dest, 1);841842uint32_t idx = (Endian::NATIVE == Endian::BIG)843? (sizeof(uint32_t) - 1 - offset)844: offset;845846// current value may not be what we are looking for, so force it847// to that value so the initial cmpxchg will fail if it is different848uint32_t cur = set_byte_in_int(Atomic::load(aligned_dest), canon_compare_value, idx);849850// always execute a real cmpxchg so that we get the required memory851// barriers even on initial failure852do {853// value to swap in matches current value854// except for the one byte we want to update855uint32_t new_value = set_byte_in_int(cur, canon_exchange_value, idx);856857uint32_t res = cmpxchg(aligned_dest, cur, new_value, order);858if (res == cur) break; // success859860// at least one byte in the int changed value, so update861// our view of the current int862cur = res;863// if our byte is still as cur we loop and try again864} while (get_byte_in_int(cur, idx) == canon_compare_value);865866return PrimitiveConversions::cast<T>(get_byte_in_int(cur, idx));867}868869// Handle xchg for integral and enum types.870//871// All the involved types must be identical.872template<typename T>873struct Atomic::XchgImpl<874T, T,875typename EnableIf<IsIntegral<T>::value || std::is_enum<T>::value>::type>876{877T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const {878// Forward to the platform handler for the size of T.879return PlatformXchg<sizeof(T)>()(dest, exchange_value, order);880}881};882883// Handle xchg for pointer types.884//885// The exchange_value must be implicitly convertible to the886// destination's type; it must be type-correct to store the887// exchange_value in the destination.888template<typename D, typename T>889struct Atomic::XchgImpl<890D*, T*,891typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>892{893D* operator()(D* volatile* dest, T* exchange_value, atomic_memory_order order) const {894// Allow derived to base conversion, and adding cv-qualifiers.895D* new_value = exchange_value;896return PlatformXchg<sizeof(D*)>()(dest, new_value, order);897}898};899900// Handle xchg for types that have a translator.901//902// All the involved types must be identical.903//904// This translates the original call into a call on the decayed905// arguments, and returns the recovered result of that translated906// call.907template<typename T>908struct Atomic::XchgImpl<909T, T,910typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>911{912T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const {913typedef PrimitiveConversions::Translate<T> Translator;914typedef typename Translator::Decayed Decayed;915STATIC_ASSERT(sizeof(T) == sizeof(Decayed));916return Translator::recover(917xchg(reinterpret_cast<Decayed volatile*>(dest),918Translator::decay(exchange_value),919order));920}921};922923template<typename Type, typename Fn, typename T>924inline T Atomic::xchg_using_helper(Fn fn,925T volatile* dest,926T exchange_value) {927STATIC_ASSERT(sizeof(Type) == sizeof(T));928// Notice the swapped order of arguments. Change when/if stubs are rewritten.929return PrimitiveConversions::cast<T>(930fn(PrimitiveConversions::cast<Type>(exchange_value),931reinterpret_cast<Type volatile*>(dest)));932}933934template<typename D, typename T>935inline D Atomic::xchg(volatile D* dest, T exchange_value, atomic_memory_order order) {936return XchgImpl<D, T>()(dest, exchange_value, order);937}938939#endif // SHARE_RUNTIME_ATOMIC_HPP940941942