Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/runtime/atomic.hpp
40951 views
1
/*
2
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_RUNTIME_ATOMIC_HPP
26
#define SHARE_RUNTIME_ATOMIC_HPP
27
28
#include "memory/allocation.hpp"
29
#include "metaprogramming/conditional.hpp"
30
#include "metaprogramming/enableIf.hpp"
31
#include "metaprogramming/isIntegral.hpp"
32
#include "metaprogramming/isPointer.hpp"
33
#include "metaprogramming/isSame.hpp"
34
#include "metaprogramming/primitiveConversions.hpp"
35
#include "metaprogramming/removeCV.hpp"
36
#include "metaprogramming/removePointer.hpp"
37
#include "runtime/orderAccess.hpp"
38
#include "utilities/align.hpp"
39
#include "utilities/bytes.hpp"
40
#include "utilities/macros.hpp"
41
#include <type_traits>
42
43
enum atomic_memory_order {
44
// The modes that align with C++11 are intended to
45
// follow the same semantics.
46
memory_order_relaxed = 0,
47
memory_order_acquire = 2,
48
memory_order_release = 3,
49
memory_order_acq_rel = 4,
50
// Strong two-way memory barrier.
51
memory_order_conservative = 8
52
};
53
54
enum ScopedFenceType {
55
X_ACQUIRE
56
, RELEASE_X
57
, RELEASE_X_FENCE
58
};
59
60
class Atomic : AllStatic {
61
public:
62
// Atomic operations on int64 types are not available on all 32-bit
63
// platforms. If atomic ops on int64 are defined here they must only
64
// be used from code that verifies they are available at runtime and
65
// can provide an alternative action if not - see supports_cx8() for
66
// a means to test availability.
67
68
// The memory operations that are mentioned with each of the atomic
69
// function families come from src/share/vm/runtime/orderAccess.hpp,
70
// e.g., <fence> is described in that file and is implemented by the
71
// OrderAccess::fence() function. See that file for the gory details
72
// on the Memory Access Ordering Model.
73
74
// All of the atomic operations that imply a read-modify-write action
75
// guarantee a two-way memory barrier across that operation. Historically
76
// these semantics reflect the strength of atomic operations that are
77
// provided on SPARC/X86. We assume that strength is necessary unless
78
// we can prove that a weaker form is sufficiently safe.
79
80
// Atomically store to a location
81
// The type T must be either a pointer type convertible to or equal
82
// to D, an integral/enum type equal to D, or a type equal to D that
83
// is primitive convertible using PrimitiveConversions.
84
template<typename D, typename T>
85
inline static void store(volatile D* dest, T store_value);
86
87
template <typename D, typename T>
88
inline static void release_store(volatile D* dest, T store_value);
89
90
template <typename D, typename T>
91
inline static void release_store_fence(volatile D* dest, T store_value);
92
93
// Atomically load from a location
94
// The type T must be either a pointer type, an integral/enum type,
95
// or a type that is primitive convertible using PrimitiveConversions.
96
template<typename T>
97
inline static T load(const volatile T* dest);
98
99
template <typename T>
100
inline static T load_acquire(const volatile T* dest);
101
102
// Atomically add to a location. *add*() provide:
103
// <fence> add-value-to-dest <membar StoreLoad|StoreStore>
104
105
// Returns updated value.
106
template<typename D, typename I>
107
inline static D add(D volatile* dest, I add_value,
108
atomic_memory_order order = memory_order_conservative);
109
110
// Returns previous value.
111
template<typename D, typename I>
112
inline static D fetch_and_add(D volatile* dest, I add_value,
113
atomic_memory_order order = memory_order_conservative);
114
115
template<typename D, typename I>
116
inline static D sub(D volatile* dest, I sub_value,
117
atomic_memory_order order = memory_order_conservative);
118
119
// Atomically increment location. inc() provide:
120
// <fence> increment-dest <membar StoreLoad|StoreStore>
121
// The type D may be either a pointer type, or an integral
122
// type. If it is a pointer type, then the increment is
123
// scaled to the size of the type pointed to by the pointer.
124
template<typename D>
125
inline static void inc(D volatile* dest,
126
atomic_memory_order order = memory_order_conservative);
127
128
// Atomically decrement a location. dec() provide:
129
// <fence> decrement-dest <membar StoreLoad|StoreStore>
130
// The type D may be either a pointer type, or an integral
131
// type. If it is a pointer type, then the decrement is
132
// scaled to the size of the type pointed to by the pointer.
133
template<typename D>
134
inline static void dec(D volatile* dest,
135
atomic_memory_order order = memory_order_conservative);
136
137
// Performs atomic exchange of *dest with exchange_value. Returns old
138
// prior value of *dest. xchg*() provide:
139
// <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
140
// The type T must be either a pointer type convertible to or equal
141
// to D, an integral/enum type equal to D, or a type equal to D that
142
// is primitive convertible using PrimitiveConversions.
143
template<typename D, typename T>
144
inline static D xchg(volatile D* dest, T exchange_value,
145
atomic_memory_order order = memory_order_conservative);
146
147
// Performs atomic compare of *dest and compare_value, and exchanges
148
// *dest with exchange_value if the comparison succeeded. Returns prior
149
// value of *dest. cmpxchg*() provide:
150
// <fence> compare-and-exchange <membar StoreLoad|StoreStore>
151
152
template<typename D, typename U, typename T>
153
inline static D cmpxchg(D volatile* dest,
154
U compare_value,
155
T exchange_value,
156
atomic_memory_order order = memory_order_conservative);
157
158
// Performs atomic compare of *dest and NULL, and replaces *dest
159
// with exchange_value if the comparison succeeded. Returns true if
160
// the comparison succeeded and the exchange occurred. This is
161
// often used as part of lazy initialization, as a lock-free
162
// alternative to the Double-Checked Locking Pattern.
163
template<typename D, typename T>
164
inline static bool replace_if_null(D* volatile* dest, T* value,
165
atomic_memory_order order = memory_order_conservative);
166
167
private:
168
WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointerConvertible is declared private
169
// Test whether From is implicitly convertible to To.
170
// From and To must be pointer types.
171
// Note: Provides the limited subset of C++11 std::is_convertible
172
// that is needed here.
173
template<typename From, typename To> struct IsPointerConvertible;
174
175
protected:
176
// Dispatch handler for store. Provides type-based validity
177
// checking and limited conversions around calls to the platform-
178
// specific implementation layer provided by PlatformOp.
179
template<typename D, typename T, typename PlatformOp, typename Enable = void>
180
struct StoreImpl;
181
182
// Platform-specific implementation of store. Support for sizes
183
// of 1, 2, 4, and (if different) pointer size bytes are required.
184
// The class is a function object that must be default constructable,
185
// with these requirements:
186
//
187
// either:
188
// - dest is of type D*, an integral, enum or pointer type.
189
// - new_value are of type T, an integral, enum or pointer type D or
190
// pointer type convertible to D.
191
// or:
192
// - T and D are the same and are primitive convertible using PrimitiveConversions
193
// and either way:
194
// - platform_store is an object of type PlatformStore<sizeof(T)>.
195
//
196
// Then
197
// platform_store(new_value, dest)
198
// must be a valid expression.
199
//
200
// The default implementation is a volatile store. If a platform
201
// requires more for e.g. 64 bit stores, a specialization is required
202
template<size_t byte_size> struct PlatformStore;
203
204
// Dispatch handler for load. Provides type-based validity
205
// checking and limited conversions around calls to the platform-
206
// specific implementation layer provided by PlatformOp.
207
template<typename T, typename PlatformOp, typename Enable = void>
208
struct LoadImpl;
209
210
// Platform-specific implementation of load. Support for sizes of
211
// 1, 2, 4 bytes and (if different) pointer size bytes are required.
212
// The class is a function object that must be default
213
// constructable, with these requirements:
214
//
215
// - dest is of type T*, an integral, enum or pointer type, or
216
// T is convertible to a primitive type using PrimitiveConversions
217
// - platform_load is an object of type PlatformLoad<sizeof(T)>.
218
//
219
// Then
220
// platform_load(src)
221
// must be a valid expression, returning a result convertible to T.
222
//
223
// The default implementation is a volatile load. If a platform
224
// requires more for e.g. 64 bit loads, a specialization is required
225
template<size_t byte_size> struct PlatformLoad;
226
227
// Give platforms a variation point to specialize.
228
template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore;
229
template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad;
230
231
private:
232
// Dispatch handler for add. Provides type-based validity checking
233
// and limited conversions around calls to the platform-specific
234
// implementation layer provided by PlatformAdd.
235
template<typename D, typename I, typename Enable = void>
236
struct AddImpl;
237
238
// Platform-specific implementation of add. Support for sizes of 4
239
// bytes and (if different) pointer size bytes are required. The
240
// class must be default constructable, with these requirements:
241
//
242
// - dest is of type D*, an integral or pointer type.
243
// - add_value is of type I, an integral type.
244
// - sizeof(I) == sizeof(D).
245
// - if D is an integral type, I == D.
246
// - order is of type atomic_memory_order.
247
// - platform_add is an object of type PlatformAdd<sizeof(D)>.
248
//
249
// Then both
250
// platform_add.add_and_fetch(dest, add_value, order)
251
// platform_add.fetch_and_add(dest, add_value, order)
252
// must be valid expressions returning a result convertible to D.
253
//
254
// add_and_fetch atomically adds add_value to the value of dest,
255
// returning the new value.
256
//
257
// fetch_and_add atomically adds add_value to the value of dest,
258
// returning the old value.
259
//
260
// When D is a pointer type P*, both add_and_fetch and fetch_and_add
261
// treat it as if it were an uintptr_t; they do not perform any
262
// scaling of add_value, as that has already been done by the caller.
263
//
264
// No definition is provided; all platforms must explicitly define
265
// this class and any needed specializations.
266
template<size_t byte_size> struct PlatformAdd;
267
268
// Support for platforms that implement some variants of add using a
269
// (typically out of line) non-template helper function. The
270
// generic arguments passed to PlatformAdd need to be translated to
271
// the appropriate type for the helper function, the helper function
272
// invoked on the translated arguments, and the result translated
273
// back. Type is the parameter / return type of the helper
274
// function. No scaling of add_value is performed when D is a pointer
275
// type, so this function can be used to implement the support function
276
// required by AddAndFetch.
277
template<typename Type, typename Fn, typename D, typename I>
278
static D add_using_helper(Fn fn, D volatile* dest, I add_value);
279
280
// Dispatch handler for cmpxchg. Provides type-based validity
281
// checking and limited conversions around calls to the
282
// platform-specific implementation layer provided by
283
// PlatformCmpxchg.
284
template<typename D, typename U, typename T, typename Enable = void>
285
struct CmpxchgImpl;
286
287
// Platform-specific implementation of cmpxchg. Support for sizes
288
// of 1, 4, and 8 are required. The class is a function object that
289
// must be default constructable, with these requirements:
290
//
291
// - dest is of type T*.
292
// - exchange_value and compare_value are of type T.
293
// - order is of type atomic_memory_order.
294
// - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>.
295
//
296
// Then
297
// platform_cmpxchg(dest, compare_value, exchange_value, order)
298
// must be a valid expression, returning a result convertible to T.
299
//
300
// A default definition is provided, which declares a function template
301
// T operator()(T volatile*, T, T, atomic_memory_order) const
302
//
303
// For each required size, a platform must either provide an
304
// appropriate definition of that function, or must entirely
305
// specialize the class template for that size.
306
template<size_t byte_size> struct PlatformCmpxchg;
307
308
// Support for platforms that implement some variants of cmpxchg
309
// using a (typically out of line) non-template helper function.
310
// The generic arguments passed to PlatformCmpxchg need to be
311
// translated to the appropriate type for the helper function, the
312
// helper invoked on the translated arguments, and the result
313
// translated back. Type is the parameter / return type of the
314
// helper function.
315
template<typename Type, typename Fn, typename T>
316
static T cmpxchg_using_helper(Fn fn,
317
T volatile* dest,
318
T compare_value,
319
T exchange_value);
320
321
// Support platforms that do not provide Read-Modify-Write
322
// byte-level atomic access. To use, derive PlatformCmpxchg<1> from
323
// this class.
324
public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
325
struct CmpxchgByteUsingInt;
326
private:
327
328
// Dispatch handler for xchg. Provides type-based validity
329
// checking and limited conversions around calls to the
330
// platform-specific implementation layer provided by
331
// PlatformXchg.
332
template<typename D, typename T, typename Enable = void>
333
struct XchgImpl;
334
335
// Platform-specific implementation of xchg. Support for sizes
336
// of 4, and sizeof(intptr_t) are required. The class is a function
337
// object that must be default constructable, with these requirements:
338
//
339
// - dest is of type T*.
340
// - exchange_value is of type T.
341
// - platform_xchg is an object of type PlatformXchg<sizeof(T)>.
342
//
343
// Then
344
// platform_xchg(dest, exchange_value)
345
// must be a valid expression, returning a result convertible to T.
346
//
347
// A default definition is provided, which declares a function template
348
// T operator()(T volatile*, T, atomic_memory_order) const
349
//
350
// For each required size, a platform must either provide an
351
// appropriate definition of that function, or must entirely
352
// specialize the class template for that size.
353
template<size_t byte_size> struct PlatformXchg;
354
355
// Support for platforms that implement some variants of xchg
356
// using a (typically out of line) non-template helper function.
357
// The generic arguments passed to PlatformXchg need to be
358
// translated to the appropriate type for the helper function, the
359
// helper invoked on the translated arguments, and the result
360
// translated back. Type is the parameter / return type of the
361
// helper function.
362
template<typename Type, typename Fn, typename T>
363
static T xchg_using_helper(Fn fn,
364
T volatile* dest,
365
T exchange_value);
366
};
367
368
template<typename From, typename To>
369
struct Atomic::IsPointerConvertible<From*, To*> : AllStatic {
370
// Determine whether From* is implicitly convertible to To*, using
371
// the "sizeof trick".
372
typedef char yes;
373
typedef char (&no)[2];
374
375
static yes test(To*);
376
static no test(...);
377
static From* test_value;
378
379
static const bool value = (sizeof(yes) == sizeof(test(test_value)));
380
};
381
382
// Handle load for pointer, integral and enum types.
383
template<typename T, typename PlatformOp>
384
struct Atomic::LoadImpl<
385
T,
386
PlatformOp,
387
typename EnableIf<IsIntegral<T>::value || std::is_enum<T>::value || IsPointer<T>::value>::type>
388
{
389
T operator()(T const volatile* dest) const {
390
// Forward to the platform handler for the size of T.
391
return PlatformOp()(dest);
392
}
393
};
394
395
// Handle load for types that have a translator.
396
//
397
// All the involved types must be identical.
398
//
399
// This translates the original call into a call on the decayed
400
// arguments, and returns the recovered result of that translated
401
// call.
402
template<typename T, typename PlatformOp>
403
struct Atomic::LoadImpl<
404
T,
405
PlatformOp,
406
typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
407
{
408
T operator()(T const volatile* dest) const {
409
typedef PrimitiveConversions::Translate<T> Translator;
410
typedef typename Translator::Decayed Decayed;
411
STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
412
Decayed result = PlatformOp()(reinterpret_cast<Decayed const volatile*>(dest));
413
return Translator::recover(result);
414
}
415
};
416
417
// Default implementation of atomic load if a specific platform
418
// does not provide a specialization for a certain size class.
419
// For increased safety, the default implementation only allows
420
// load types that are pointer sized or smaller. If a platform still
421
// supports wide atomics, then it has to use specialization
422
// of Atomic::PlatformLoad for that wider size class.
423
template<size_t byte_size>
424
struct Atomic::PlatformLoad {
425
template<typename T>
426
T operator()(T const volatile* dest) const {
427
STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
428
return *dest;
429
}
430
};
431
432
// Handle store for integral and enum types.
433
//
434
// All the involved types must be identical.
435
template<typename T, typename PlatformOp>
436
struct Atomic::StoreImpl<
437
T, T,
438
PlatformOp,
439
typename EnableIf<IsIntegral<T>::value || std::is_enum<T>::value>::type>
440
{
441
void operator()(T volatile* dest, T new_value) const {
442
// Forward to the platform handler for the size of T.
443
PlatformOp()(dest, new_value);
444
}
445
};
446
447
// Handle store for pointer types.
448
//
449
// The new_value must be implicitly convertible to the
450
// destination's type; it must be type-correct to store the
451
// new_value in the destination.
452
template<typename D, typename T, typename PlatformOp>
453
struct Atomic::StoreImpl<
454
D*, T*,
455
PlatformOp,
456
typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
457
{
458
void operator()(D* volatile* dest, T* new_value) const {
459
// Allow derived to base conversion, and adding cv-qualifiers.
460
D* value = new_value;
461
PlatformOp()(dest, value);
462
}
463
};
464
465
// Handle store for types that have a translator.
466
//
467
// All the involved types must be identical.
468
//
469
// This translates the original call into a call on the decayed
470
// arguments.
471
template<typename T, typename PlatformOp>
472
struct Atomic::StoreImpl<
473
T, T,
474
PlatformOp,
475
typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
476
{
477
void operator()(T volatile* dest, T new_value) const {
478
typedef PrimitiveConversions::Translate<T> Translator;
479
typedef typename Translator::Decayed Decayed;
480
STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
481
PlatformOp()(reinterpret_cast<Decayed volatile*>(dest),
482
Translator::decay(new_value));
483
}
484
};
485
486
// Default implementation of atomic store if a specific platform
487
// does not provide a specialization for a certain size class.
488
// For increased safety, the default implementation only allows
489
// storing types that are pointer sized or smaller. If a platform still
490
// supports wide atomics, then it has to use specialization
491
// of Atomic::PlatformStore for that wider size class.
492
template<size_t byte_size>
493
struct Atomic::PlatformStore {
494
template<typename T>
495
void operator()(T volatile* dest,
496
T new_value) const {
497
STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
498
(void)const_cast<T&>(*dest = new_value);
499
}
500
};
501
502
template<typename D>
503
inline void Atomic::inc(D volatile* dest, atomic_memory_order order) {
504
STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
505
typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
506
Atomic::add(dest, I(1), order);
507
}
508
509
template<typename D>
510
inline void Atomic::dec(D volatile* dest, atomic_memory_order order) {
511
STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
512
typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
513
// Assumes two's complement integer representation.
514
#pragma warning(suppress: 4146)
515
Atomic::add(dest, I(-1), order);
516
}
517
518
template<typename D, typename I>
519
inline D Atomic::sub(D volatile* dest, I sub_value, atomic_memory_order order) {
520
STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
521
STATIC_ASSERT(IsIntegral<I>::value);
522
// If D is a pointer type, use [u]intptr_t as the addend type,
523
// matching signedness of I. Otherwise, use D as the addend type.
524
typedef typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type PI;
525
typedef typename Conditional<IsPointer<D>::value, PI, D>::type AddendType;
526
// Only allow conversions that can't change the value.
527
STATIC_ASSERT(IsSigned<I>::value == IsSigned<AddendType>::value);
528
STATIC_ASSERT(sizeof(I) <= sizeof(AddendType));
529
AddendType addend = sub_value;
530
// Assumes two's complement integer representation.
531
#pragma warning(suppress: 4146) // In case AddendType is not signed.
532
return Atomic::add(dest, -addend, order);
533
}
534
535
// Define the class before including platform file, which may specialize
536
// the operator definition. No generic definition of specializations
537
// of the operator template are provided, nor are there any generic
538
// specializations of the class. The platform file is responsible for
539
// providing those.
540
template<size_t byte_size>
541
struct Atomic::PlatformCmpxchg {
542
template<typename T>
543
T operator()(T volatile* dest,
544
T compare_value,
545
T exchange_value,
546
atomic_memory_order order) const;
547
};
548
549
// Define the class before including platform file, which may use this
550
// as a base class, requiring it be complete. The definition is later
551
// in this file, near the other definitions related to cmpxchg.
552
struct Atomic::CmpxchgByteUsingInt {
553
static uint8_t get_byte_in_int(uint32_t n, uint32_t idx);
554
static uint32_t set_byte_in_int(uint32_t n, uint8_t b, uint32_t idx);
555
template<typename T>
556
T operator()(T volatile* dest,
557
T compare_value,
558
T exchange_value,
559
atomic_memory_order order) const;
560
};
561
562
// Define the class before including platform file, which may specialize
563
// the operator definition. No generic definition of specializations
564
// of the operator template are provided, nor are there any generic
565
// specializations of the class. The platform file is responsible for
566
// providing those.
567
template<size_t byte_size>
568
struct Atomic::PlatformXchg {
569
template<typename T>
570
T operator()(T volatile* dest,
571
T exchange_value,
572
atomic_memory_order order) const;
573
};
574
575
template <ScopedFenceType T>
576
class ScopedFenceGeneral: public StackObj {
577
public:
578
void prefix() {}
579
void postfix() {}
580
};
581
582
// The following methods can be specialized using simple template specialization
583
// in the platform specific files for optimization purposes. Otherwise the
584
// generalized variant is used.
585
586
template<> inline void ScopedFenceGeneral<X_ACQUIRE>::postfix() { OrderAccess::acquire(); }
587
template<> inline void ScopedFenceGeneral<RELEASE_X>::prefix() { OrderAccess::release(); }
588
template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::prefix() { OrderAccess::release(); }
589
template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
590
591
template <ScopedFenceType T>
592
class ScopedFence : public ScopedFenceGeneral<T> {
593
void *const _field;
594
public:
595
ScopedFence(void *const field) : _field(field) { prefix(); }
596
~ScopedFence() { postfix(); }
597
void prefix() { ScopedFenceGeneral<T>::prefix(); }
598
void postfix() { ScopedFenceGeneral<T>::postfix(); }
599
};
600
601
// platform specific in-line definitions - must come before shared definitions
602
603
#include OS_CPU_HEADER(atomic)
604
605
// shared in-line definitions
606
607
// size_t casts...
608
#if (SIZE_MAX != UINTPTR_MAX)
609
#error size_t is not WORD_SIZE, interesting platform, but missing implementation here
610
#endif
611
612
template<typename T>
613
inline T Atomic::load(const volatile T* dest) {
614
return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest);
615
}
616
617
template<size_t byte_size, ScopedFenceType type>
618
struct Atomic::PlatformOrderedLoad {
619
template <typename T>
620
T operator()(const volatile T* p) const {
621
ScopedFence<type> f((void*)p);
622
return Atomic::load(p);
623
}
624
};
625
626
template <typename T>
627
inline T Atomic::load_acquire(const volatile T* p) {
628
return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p);
629
}
630
631
template<typename D, typename T>
632
inline void Atomic::store(volatile D* dest, T store_value) {
633
StoreImpl<D, T, PlatformStore<sizeof(D)> >()(dest, store_value);
634
}
635
636
template<size_t byte_size, ScopedFenceType type>
637
struct Atomic::PlatformOrderedStore {
638
template <typename T>
639
void operator()(volatile T* p, T v) const {
640
ScopedFence<type> f((void*)p);
641
Atomic::store(p, v);
642
}
643
};
644
645
template <typename D, typename T>
646
inline void Atomic::release_store(volatile D* p, T v) {
647
StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(p, v);
648
}
649
650
template <typename D, typename T>
651
inline void Atomic::release_store_fence(volatile D* p, T v) {
652
StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(p, v);
653
}
654
655
template<typename D, typename I>
656
inline D Atomic::add(D volatile* dest, I add_value,
657
atomic_memory_order order) {
658
return AddImpl<D, I>::add_and_fetch(dest, add_value, order);
659
}
660
661
template<typename D, typename I>
662
inline D Atomic::fetch_and_add(D volatile* dest, I add_value,
663
atomic_memory_order order) {
664
return AddImpl<D, I>::fetch_and_add(dest, add_value, order);
665
}
666
667
template<typename D, typename I>
668
struct Atomic::AddImpl<
669
D, I,
670
typename EnableIf<IsIntegral<I>::value &&
671
IsIntegral<D>::value &&
672
(sizeof(I) <= sizeof(D)) &&
673
(IsSigned<I>::value == IsSigned<D>::value)>::type>
674
{
675
static D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) {
676
D addend = add_value;
677
return PlatformAdd<sizeof(D)>().add_and_fetch(dest, addend, order);
678
}
679
static D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) {
680
D addend = add_value;
681
return PlatformAdd<sizeof(D)>().fetch_and_add(dest, addend, order);
682
}
683
};
684
685
template<typename P, typename I>
686
struct Atomic::AddImpl<
687
P*, I,
688
typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
689
{
690
STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
691
STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
692
typedef typename Conditional<IsSigned<I>::value,
693
intptr_t,
694
uintptr_t>::type CI;
695
696
static CI scale_addend(CI add_value) {
697
return add_value * sizeof(P);
698
}
699
700
static P* add_and_fetch(P* volatile* dest, I add_value, atomic_memory_order order) {
701
CI addend = add_value;
702
return PlatformAdd<sizeof(P*)>().add_and_fetch(dest, scale_addend(addend), order);
703
}
704
static P* fetch_and_add(P* volatile* dest, I add_value, atomic_memory_order order) {
705
CI addend = add_value;
706
return PlatformAdd<sizeof(P*)>().fetch_and_add(dest, scale_addend(addend), order);
707
}
708
};
709
710
template<typename Type, typename Fn, typename D, typename I>
711
inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) {
712
return PrimitiveConversions::cast<D>(
713
fn(PrimitiveConversions::cast<Type>(add_value),
714
reinterpret_cast<Type volatile*>(dest)));
715
}
716
717
template<typename D, typename U, typename T>
718
inline D Atomic::cmpxchg(D volatile* dest,
719
U compare_value,
720
T exchange_value,
721
atomic_memory_order order) {
722
return CmpxchgImpl<D, U, T>()(dest, compare_value, exchange_value, order);
723
}
724
725
template<typename D, typename T>
726
inline bool Atomic::replace_if_null(D* volatile* dest, T* value,
727
atomic_memory_order order) {
728
// Presently using a trivial implementation in terms of cmpxchg.
729
// Consider adding platform support, to permit the use of compiler
730
// intrinsics like gcc's __sync_bool_compare_and_swap.
731
D* expected_null = NULL;
732
return expected_null == cmpxchg(dest, expected_null, value, order);
733
}
734
735
// Handle cmpxchg for integral and enum types.
736
//
737
// All the involved types must be identical.
738
template<typename T>
739
struct Atomic::CmpxchgImpl<
740
T, T, T,
741
typename EnableIf<IsIntegral<T>::value || std::is_enum<T>::value>::type>
742
{
743
T operator()(T volatile* dest, T compare_value, T exchange_value,
744
atomic_memory_order order) const {
745
// Forward to the platform handler for the size of T.
746
return PlatformCmpxchg<sizeof(T)>()(dest,
747
compare_value,
748
exchange_value,
749
order);
750
}
751
};
752
753
// Handle cmpxchg for pointer types.
754
//
755
// The destination's type and the compare_value type must be the same,
756
// ignoring cv-qualifiers; we don't care about the cv-qualifiers of
757
// the compare_value.
758
//
759
// The exchange_value must be implicitly convertible to the
760
// destination's type; it must be type-correct to store the
761
// exchange_value in the destination.
762
template<typename D, typename U, typename T>
763
struct Atomic::CmpxchgImpl<
764
D*, U*, T*,
765
typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value &&
766
IsSame<typename RemoveCV<D>::type,
767
typename RemoveCV<U>::type>::value>::type>
768
{
769
D* operator()(D* volatile* dest, U* compare_value, T* exchange_value,
770
atomic_memory_order order) const {
771
// Allow derived to base conversion, and adding cv-qualifiers.
772
D* new_value = exchange_value;
773
// Don't care what the CV qualifiers for compare_value are,
774
// but we need to match D* when calling platform support.
775
D* old_value = const_cast<D*>(compare_value);
776
return PlatformCmpxchg<sizeof(D*)>()(dest, old_value, new_value, order);
777
}
778
};
779
780
// Handle cmpxchg for types that have a translator.
781
//
782
// All the involved types must be identical.
783
//
784
// This translates the original call into a call on the decayed
785
// arguments, and returns the recovered result of that translated
786
// call.
787
template<typename T>
788
struct Atomic::CmpxchgImpl<
789
T, T, T,
790
typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
791
{
792
T operator()(T volatile* dest, T compare_value, T exchange_value,
793
atomic_memory_order order) const {
794
typedef PrimitiveConversions::Translate<T> Translator;
795
typedef typename Translator::Decayed Decayed;
796
STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
797
return Translator::recover(
798
cmpxchg(reinterpret_cast<Decayed volatile*>(dest),
799
Translator::decay(compare_value),
800
Translator::decay(exchange_value),
801
order));
802
}
803
};
804
805
template<typename Type, typename Fn, typename T>
806
inline T Atomic::cmpxchg_using_helper(Fn fn,
807
T volatile* dest,
808
T compare_value,
809
T exchange_value) {
810
STATIC_ASSERT(sizeof(Type) == sizeof(T));
811
return PrimitiveConversions::cast<T>(
812
fn(PrimitiveConversions::cast<Type>(exchange_value),
813
reinterpret_cast<Type volatile*>(dest),
814
PrimitiveConversions::cast<Type>(compare_value)));
815
}
816
817
inline uint32_t Atomic::CmpxchgByteUsingInt::set_byte_in_int(uint32_t n,
818
uint8_t b,
819
uint32_t idx) {
820
int bitsIdx = BitsPerByte * idx;
821
return (n & ~(static_cast<uint32_t>(0xff) << bitsIdx))
822
| (static_cast<uint32_t>(b) << bitsIdx);
823
}
824
825
inline uint8_t Atomic::CmpxchgByteUsingInt::get_byte_in_int(uint32_t n,
826
uint32_t idx) {
827
int bitsIdx = BitsPerByte * idx;
828
return (uint8_t)(n >> bitsIdx);
829
}
830
831
template<typename T>
832
inline T Atomic::CmpxchgByteUsingInt::operator()(T volatile* dest,
833
T compare_value,
834
T exchange_value,
835
atomic_memory_order order) const {
836
STATIC_ASSERT(sizeof(T) == sizeof(uint8_t));
837
uint8_t canon_exchange_value = exchange_value;
838
uint8_t canon_compare_value = compare_value;
839
volatile uint32_t* aligned_dest
840
= reinterpret_cast<volatile uint32_t*>(align_down(dest, sizeof(uint32_t)));
841
size_t offset = pointer_delta(dest, aligned_dest, 1);
842
843
uint32_t idx = (Endian::NATIVE == Endian::BIG)
844
? (sizeof(uint32_t) - 1 - offset)
845
: offset;
846
847
// current value may not be what we are looking for, so force it
848
// to that value so the initial cmpxchg will fail if it is different
849
uint32_t cur = set_byte_in_int(Atomic::load(aligned_dest), canon_compare_value, idx);
850
851
// always execute a real cmpxchg so that we get the required memory
852
// barriers even on initial failure
853
do {
854
// value to swap in matches current value
855
// except for the one byte we want to update
856
uint32_t new_value = set_byte_in_int(cur, canon_exchange_value, idx);
857
858
uint32_t res = cmpxchg(aligned_dest, cur, new_value, order);
859
if (res == cur) break; // success
860
861
// at least one byte in the int changed value, so update
862
// our view of the current int
863
cur = res;
864
// if our byte is still as cur we loop and try again
865
} while (get_byte_in_int(cur, idx) == canon_compare_value);
866
867
return PrimitiveConversions::cast<T>(get_byte_in_int(cur, idx));
868
}
869
870
// Handle xchg for integral and enum types.
871
//
872
// All the involved types must be identical.
873
template<typename T>
874
struct Atomic::XchgImpl<
875
T, T,
876
typename EnableIf<IsIntegral<T>::value || std::is_enum<T>::value>::type>
877
{
878
T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const {
879
// Forward to the platform handler for the size of T.
880
return PlatformXchg<sizeof(T)>()(dest, exchange_value, order);
881
}
882
};
883
884
// Handle xchg for pointer types.
885
//
886
// The exchange_value must be implicitly convertible to the
887
// destination's type; it must be type-correct to store the
888
// exchange_value in the destination.
889
template<typename D, typename T>
890
struct Atomic::XchgImpl<
891
D*, T*,
892
typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
893
{
894
D* operator()(D* volatile* dest, T* exchange_value, atomic_memory_order order) const {
895
// Allow derived to base conversion, and adding cv-qualifiers.
896
D* new_value = exchange_value;
897
return PlatformXchg<sizeof(D*)>()(dest, new_value, order);
898
}
899
};
900
901
// Handle xchg for types that have a translator.
902
//
903
// All the involved types must be identical.
904
//
905
// This translates the original call into a call on the decayed
906
// arguments, and returns the recovered result of that translated
907
// call.
908
template<typename T>
909
struct Atomic::XchgImpl<
910
T, T,
911
typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
912
{
913
T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const {
914
typedef PrimitiveConversions::Translate<T> Translator;
915
typedef typename Translator::Decayed Decayed;
916
STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
917
return Translator::recover(
918
xchg(reinterpret_cast<Decayed volatile*>(dest),
919
Translator::decay(exchange_value),
920
order));
921
}
922
};
923
924
template<typename Type, typename Fn, typename T>
925
inline T Atomic::xchg_using_helper(Fn fn,
926
T volatile* dest,
927
T exchange_value) {
928
STATIC_ASSERT(sizeof(Type) == sizeof(T));
929
// Notice the swapped order of arguments. Change when/if stubs are rewritten.
930
return PrimitiveConversions::cast<T>(
931
fn(PrimitiveConversions::cast<Type>(exchange_value),
932
reinterpret_cast<Type volatile*>(dest)));
933
}
934
935
template<typename D, typename T>
936
inline D Atomic::xchg(volatile D* dest, T exchange_value, atomic_memory_order order) {
937
return XchgImpl<D, T>()(dest, exchange_value, order);
938
}
939
940
#endif // SHARE_RUNTIME_ATOMIC_HPP
941
942