Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/share/runtime/atomic.hpp
64440 views
1
/*
2
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_RUNTIME_ATOMIC_HPP
26
#define SHARE_RUNTIME_ATOMIC_HPP
27
28
#include "memory/allocation.hpp"
29
#include "metaprogramming/conditional.hpp"
30
#include "metaprogramming/enableIf.hpp"
31
#include "metaprogramming/isIntegral.hpp"
32
#include "metaprogramming/isPointer.hpp"
33
#include "metaprogramming/isSame.hpp"
34
#include "metaprogramming/primitiveConversions.hpp"
35
#include "metaprogramming/removeCV.hpp"
36
#include "metaprogramming/removePointer.hpp"
37
#include "runtime/orderAccess.hpp"
38
#include "utilities/align.hpp"
39
#include "utilities/bytes.hpp"
40
#include "utilities/macros.hpp"
41
#include <type_traits>
42
43
enum atomic_memory_order {
44
// The modes that align with C++11 are intended to
45
// follow the same semantics.
46
memory_order_relaxed = 0,
47
memory_order_acquire = 2,
48
memory_order_release = 3,
49
memory_order_acq_rel = 4,
50
memory_order_seq_cst = 5,
51
// Strong two-way memory barrier.
52
memory_order_conservative = 8
53
};
54
55
enum ScopedFenceType {
56
X_ACQUIRE
57
, RELEASE_X
58
, RELEASE_X_FENCE
59
};
60
61
class Atomic : AllStatic {
62
public:
63
// Atomic operations on int64 types are not available on all 32-bit
64
// platforms. If atomic ops on int64 are defined here they must only
65
// be used from code that verifies they are available at runtime and
66
// can provide an alternative action if not - see supports_cx8() for
67
// a means to test availability.
68
69
// The memory operations that are mentioned with each of the atomic
70
// function families come from src/share/vm/runtime/orderAccess.hpp,
71
// e.g., <fence> is described in that file and is implemented by the
72
// OrderAccess::fence() function. See that file for the gory details
73
// on the Memory Access Ordering Model.
74
75
// All of the atomic operations that imply a read-modify-write action
76
// guarantee a two-way memory barrier across that operation. Historically
77
// these semantics reflect the strength of atomic operations that are
78
// provided on SPARC/X86. We assume that strength is necessary unless
79
// we can prove that a weaker form is sufficiently safe.
80
81
// Atomically store to a location
82
// The type T must be either a pointer type convertible to or equal
83
// to D, an integral/enum type equal to D, or a type equal to D that
84
// is primitive convertible using PrimitiveConversions.
85
template<typename D, typename T>
86
inline static void store(volatile D* dest, T store_value);
87
88
template <typename D, typename T>
89
inline static void release_store(volatile D* dest, T store_value);
90
91
template <typename D, typename T>
92
inline static void release_store_fence(volatile D* dest, T store_value);
93
94
// Atomically load from a location
95
// The type T must be either a pointer type, an integral/enum type,
96
// or a type that is primitive convertible using PrimitiveConversions.
97
template<typename T>
98
inline static T load(const volatile T* dest);
99
100
template <typename T>
101
inline static T load_acquire(const volatile T* dest);
102
103
// Atomically add to a location. *add*() provide:
104
// <fence> add-value-to-dest <membar StoreLoad|StoreStore>
105
106
// Returns updated value.
107
template<typename D, typename I>
108
inline static D add(D volatile* dest, I add_value,
109
atomic_memory_order order = memory_order_conservative);
110
111
// Returns previous value.
112
template<typename D, typename I>
113
inline static D fetch_and_add(D volatile* dest, I add_value,
114
atomic_memory_order order = memory_order_conservative);
115
116
template<typename D, typename I>
117
inline static D sub(D volatile* dest, I sub_value,
118
atomic_memory_order order = memory_order_conservative);
119
120
// Atomically increment location. inc() provide:
121
// <fence> increment-dest <membar StoreLoad|StoreStore>
122
// The type D may be either a pointer type, or an integral
123
// type. If it is a pointer type, then the increment is
124
// scaled to the size of the type pointed to by the pointer.
125
template<typename D>
126
inline static void inc(D volatile* dest,
127
atomic_memory_order order = memory_order_conservative);
128
129
// Atomically decrement a location. dec() provide:
130
// <fence> decrement-dest <membar StoreLoad|StoreStore>
131
// The type D may be either a pointer type, or an integral
132
// type. If it is a pointer type, then the decrement is
133
// scaled to the size of the type pointed to by the pointer.
134
template<typename D>
135
inline static void dec(D volatile* dest,
136
atomic_memory_order order = memory_order_conservative);
137
138
// Performs atomic exchange of *dest with exchange_value. Returns old
139
// prior value of *dest. xchg*() provide:
140
// <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
141
// The type T must be either a pointer type convertible to or equal
142
// to D, an integral/enum type equal to D, or a type equal to D that
143
// is primitive convertible using PrimitiveConversions.
144
template<typename D, typename T>
145
inline static D xchg(volatile D* dest, T exchange_value,
146
atomic_memory_order order = memory_order_conservative);
147
148
// Performs atomic compare of *dest and compare_value, and exchanges
149
// *dest with exchange_value if the comparison succeeded. Returns prior
150
// value of *dest. cmpxchg*() provide:
151
// <fence> compare-and-exchange <membar StoreLoad|StoreStore>
152
153
template<typename D, typename U, typename T>
154
inline static D cmpxchg(D volatile* dest,
155
U compare_value,
156
T exchange_value,
157
atomic_memory_order order = memory_order_conservative);
158
159
// Performs atomic compare of *dest and NULL, and replaces *dest
160
// with exchange_value if the comparison succeeded. Returns true if
161
// the comparison succeeded and the exchange occurred. This is
162
// often used as part of lazy initialization, as a lock-free
163
// alternative to the Double-Checked Locking Pattern.
164
template<typename D, typename T>
165
inline static bool replace_if_null(D* volatile* dest, T* value,
166
atomic_memory_order order = memory_order_conservative);
167
168
private:
169
WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointerConvertible is declared private
170
// Test whether From is implicitly convertible to To.
171
// From and To must be pointer types.
172
// Note: Provides the limited subset of C++11 std::is_convertible
173
// that is needed here.
174
template<typename From, typename To> struct IsPointerConvertible;
175
176
protected:
177
// Dispatch handler for store. Provides type-based validity
178
// checking and limited conversions around calls to the platform-
179
// specific implementation layer provided by PlatformOp.
180
template<typename D, typename T, typename PlatformOp, typename Enable = void>
181
struct StoreImpl;
182
183
// Platform-specific implementation of store. Support for sizes
184
// of 1, 2, 4, and (if different) pointer size bytes are required.
185
// The class is a function object that must be default constructable,
186
// with these requirements:
187
//
188
// either:
189
// - dest is of type D*, an integral, enum or pointer type.
190
// - new_value are of type T, an integral, enum or pointer type D or
191
// pointer type convertible to D.
192
// or:
193
// - T and D are the same and are primitive convertible using PrimitiveConversions
194
// and either way:
195
// - platform_store is an object of type PlatformStore<sizeof(T)>.
196
//
197
// Then
198
// platform_store(new_value, dest)
199
// must be a valid expression.
200
//
201
// The default implementation is a volatile store. If a platform
202
// requires more for e.g. 64 bit stores, a specialization is required
203
template<size_t byte_size> struct PlatformStore;
204
205
// Dispatch handler for load. Provides type-based validity
206
// checking and limited conversions around calls to the platform-
207
// specific implementation layer provided by PlatformOp.
208
template<typename T, typename PlatformOp, typename Enable = void>
209
struct LoadImpl;
210
211
// Platform-specific implementation of load. Support for sizes of
212
// 1, 2, 4 bytes and (if different) pointer size bytes are required.
213
// The class is a function object that must be default
214
// constructable, with these requirements:
215
//
216
// - dest is of type T*, an integral, enum or pointer type, or
217
// T is convertible to a primitive type using PrimitiveConversions
218
// - platform_load is an object of type PlatformLoad<sizeof(T)>.
219
//
220
// Then
221
// platform_load(src)
222
// must be a valid expression, returning a result convertible to T.
223
//
224
// The default implementation is a volatile load. If a platform
225
// requires more for e.g. 64 bit loads, a specialization is required
226
template<size_t byte_size> struct PlatformLoad;
227
228
// Give platforms a variation point to specialize.
229
template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore;
230
template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad;
231
232
private:
233
// Dispatch handler for add. Provides type-based validity checking
234
// and limited conversions around calls to the platform-specific
235
// implementation layer provided by PlatformAdd.
236
template<typename D, typename I, typename Enable = void>
237
struct AddImpl;
238
239
// Platform-specific implementation of add. Support for sizes of 4
240
// bytes and (if different) pointer size bytes are required. The
241
// class must be default constructable, with these requirements:
242
//
243
// - dest is of type D*, an integral or pointer type.
244
// - add_value is of type I, an integral type.
245
// - sizeof(I) == sizeof(D).
246
// - if D is an integral type, I == D.
247
// - order is of type atomic_memory_order.
248
// - platform_add is an object of type PlatformAdd<sizeof(D)>.
249
//
250
// Then both
251
// platform_add.add_and_fetch(dest, add_value, order)
252
// platform_add.fetch_and_add(dest, add_value, order)
253
// must be valid expressions returning a result convertible to D.
254
//
255
// add_and_fetch atomically adds add_value to the value of dest,
256
// returning the new value.
257
//
258
// fetch_and_add atomically adds add_value to the value of dest,
259
// returning the old value.
260
//
261
// When D is a pointer type P*, both add_and_fetch and fetch_and_add
262
// treat it as if it were an uintptr_t; they do not perform any
263
// scaling of add_value, as that has already been done by the caller.
264
//
265
// No definition is provided; all platforms must explicitly define
266
// this class and any needed specializations.
267
template<size_t byte_size> struct PlatformAdd;
268
269
// Support for platforms that implement some variants of add using a
270
// (typically out of line) non-template helper function. The
271
// generic arguments passed to PlatformAdd need to be translated to
272
// the appropriate type for the helper function, the helper function
273
// invoked on the translated arguments, and the result translated
274
// back. Type is the parameter / return type of the helper
275
// function. No scaling of add_value is performed when D is a pointer
276
// type, so this function can be used to implement the support function
277
// required by AddAndFetch.
278
template<typename Type, typename Fn, typename D, typename I>
279
static D add_using_helper(Fn fn, D volatile* dest, I add_value);
280
281
// Dispatch handler for cmpxchg. Provides type-based validity
282
// checking and limited conversions around calls to the
283
// platform-specific implementation layer provided by
284
// PlatformCmpxchg.
285
template<typename D, typename U, typename T, typename Enable = void>
286
struct CmpxchgImpl;
287
288
// Platform-specific implementation of cmpxchg. Support for sizes
289
// of 1, 4, and 8 are required. The class is a function object that
290
// must be default constructable, with these requirements:
291
//
292
// - dest is of type T*.
293
// - exchange_value and compare_value are of type T.
294
// - order is of type atomic_memory_order.
295
// - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>.
296
//
297
// Then
298
// platform_cmpxchg(dest, compare_value, exchange_value, order)
299
// must be a valid expression, returning a result convertible to T.
300
//
301
// A default definition is provided, which declares a function template
302
// T operator()(T volatile*, T, T, atomic_memory_order) const
303
//
304
// For each required size, a platform must either provide an
305
// appropriate definition of that function, or must entirely
306
// specialize the class template for that size.
307
template<size_t byte_size> struct PlatformCmpxchg;
308
309
// Support for platforms that implement some variants of cmpxchg
310
// using a (typically out of line) non-template helper function.
311
// The generic arguments passed to PlatformCmpxchg need to be
312
// translated to the appropriate type for the helper function, the
313
// helper invoked on the translated arguments, and the result
314
// translated back. Type is the parameter / return type of the
315
// helper function.
316
template<typename Type, typename Fn, typename T>
317
static T cmpxchg_using_helper(Fn fn,
318
T volatile* dest,
319
T compare_value,
320
T exchange_value);
321
322
// Support platforms that do not provide Read-Modify-Write
323
// byte-level atomic access. To use, derive PlatformCmpxchg<1> from
324
// this class.
325
public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
326
struct CmpxchgByteUsingInt;
327
private:
328
329
// Dispatch handler for xchg. Provides type-based validity
330
// checking and limited conversions around calls to the
331
// platform-specific implementation layer provided by
332
// PlatformXchg.
333
template<typename D, typename T, typename Enable = void>
334
struct XchgImpl;
335
336
// Platform-specific implementation of xchg. Support for sizes
337
// of 4, and sizeof(intptr_t) are required. The class is a function
338
// object that must be default constructable, with these requirements:
339
//
340
// - dest is of type T*.
341
// - exchange_value is of type T.
342
// - platform_xchg is an object of type PlatformXchg<sizeof(T)>.
343
//
344
// Then
345
// platform_xchg(dest, exchange_value)
346
// must be a valid expression, returning a result convertible to T.
347
//
348
// A default definition is provided, which declares a function template
349
// T operator()(T volatile*, T, atomic_memory_order) const
350
//
351
// For each required size, a platform must either provide an
352
// appropriate definition of that function, or must entirely
353
// specialize the class template for that size.
354
template<size_t byte_size> struct PlatformXchg;
355
356
// Support for platforms that implement some variants of xchg
357
// using a (typically out of line) non-template helper function.
358
// The generic arguments passed to PlatformXchg need to be
359
// translated to the appropriate type for the helper function, the
360
// helper invoked on the translated arguments, and the result
361
// translated back. Type is the parameter / return type of the
362
// helper function.
363
template<typename Type, typename Fn, typename T>
364
static T xchg_using_helper(Fn fn,
365
T volatile* dest,
366
T exchange_value);
367
};
368
369
template<typename From, typename To>
370
struct Atomic::IsPointerConvertible<From*, To*> : AllStatic {
371
// Determine whether From* is implicitly convertible to To*, using
372
// the "sizeof trick".
373
typedef char yes;
374
typedef char (&no)[2];
375
376
static yes test(To*);
377
static no test(...);
378
static From* test_value;
379
380
static const bool value = (sizeof(yes) == sizeof(test(test_value)));
381
};
382
383
// Handle load for pointer, integral and enum types.
384
template<typename T, typename PlatformOp>
385
struct Atomic::LoadImpl<
386
T,
387
PlatformOp,
388
typename EnableIf<IsIntegral<T>::value || std::is_enum<T>::value || IsPointer<T>::value>::type>
389
{
390
T operator()(T const volatile* dest) const {
391
// Forward to the platform handler for the size of T.
392
return PlatformOp()(dest);
393
}
394
};
395
396
// Handle load for types that have a translator.
397
//
398
// All the involved types must be identical.
399
//
400
// This translates the original call into a call on the decayed
401
// arguments, and returns the recovered result of that translated
402
// call.
403
template<typename T, typename PlatformOp>
404
struct Atomic::LoadImpl<
405
T,
406
PlatformOp,
407
typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
408
{
409
T operator()(T const volatile* dest) const {
410
typedef PrimitiveConversions::Translate<T> Translator;
411
typedef typename Translator::Decayed Decayed;
412
STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
413
Decayed result = PlatformOp()(reinterpret_cast<Decayed const volatile*>(dest));
414
return Translator::recover(result);
415
}
416
};
417
418
// Default implementation of atomic load if a specific platform
419
// does not provide a specialization for a certain size class.
420
// For increased safety, the default implementation only allows
421
// load types that are pointer sized or smaller. If a platform still
422
// supports wide atomics, then it has to use specialization
423
// of Atomic::PlatformLoad for that wider size class.
424
template<size_t byte_size>
425
struct Atomic::PlatformLoad {
426
template<typename T>
427
T operator()(T const volatile* dest) const {
428
STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
429
return *dest;
430
}
431
};
432
433
// Handle store for integral and enum types.
434
//
435
// All the involved types must be identical.
436
template<typename T, typename PlatformOp>
437
struct Atomic::StoreImpl<
438
T, T,
439
PlatformOp,
440
typename EnableIf<IsIntegral<T>::value || std::is_enum<T>::value>::type>
441
{
442
void operator()(T volatile* dest, T new_value) const {
443
// Forward to the platform handler for the size of T.
444
PlatformOp()(dest, new_value);
445
}
446
};
447
448
// Handle store for pointer types.
449
//
450
// The new_value must be implicitly convertible to the
451
// destination's type; it must be type-correct to store the
452
// new_value in the destination.
453
template<typename D, typename T, typename PlatformOp>
454
struct Atomic::StoreImpl<
455
D*, T*,
456
PlatformOp,
457
typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
458
{
459
void operator()(D* volatile* dest, T* new_value) const {
460
// Allow derived to base conversion, and adding cv-qualifiers.
461
D* value = new_value;
462
PlatformOp()(dest, value);
463
}
464
};
465
466
// Handle store for types that have a translator.
467
//
468
// All the involved types must be identical.
469
//
470
// This translates the original call into a call on the decayed
471
// arguments.
472
template<typename T, typename PlatformOp>
473
struct Atomic::StoreImpl<
474
T, T,
475
PlatformOp,
476
typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
477
{
478
void operator()(T volatile* dest, T new_value) const {
479
typedef PrimitiveConversions::Translate<T> Translator;
480
typedef typename Translator::Decayed Decayed;
481
STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
482
PlatformOp()(reinterpret_cast<Decayed volatile*>(dest),
483
Translator::decay(new_value));
484
}
485
};
486
487
// Default implementation of atomic store if a specific platform
488
// does not provide a specialization for a certain size class.
489
// For increased safety, the default implementation only allows
490
// storing types that are pointer sized or smaller. If a platform still
491
// supports wide atomics, then it has to use specialization
492
// of Atomic::PlatformStore for that wider size class.
493
template<size_t byte_size>
494
struct Atomic::PlatformStore {
495
template<typename T>
496
void operator()(T volatile* dest,
497
T new_value) const {
498
STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
499
(void)const_cast<T&>(*dest = new_value);
500
}
501
};
502
503
template<typename D>
504
inline void Atomic::inc(D volatile* dest, atomic_memory_order order) {
505
STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
506
typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
507
Atomic::add(dest, I(1), order);
508
}
509
510
template<typename D>
511
inline void Atomic::dec(D volatile* dest, atomic_memory_order order) {
512
STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
513
typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
514
// Assumes two's complement integer representation.
515
#pragma warning(suppress: 4146)
516
Atomic::add(dest, I(-1), order);
517
}
518
519
template<typename D, typename I>
520
inline D Atomic::sub(D volatile* dest, I sub_value, atomic_memory_order order) {
521
STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
522
STATIC_ASSERT(IsIntegral<I>::value);
523
// If D is a pointer type, use [u]intptr_t as the addend type,
524
// matching signedness of I. Otherwise, use D as the addend type.
525
typedef typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type PI;
526
typedef typename Conditional<IsPointer<D>::value, PI, D>::type AddendType;
527
// Only allow conversions that can't change the value.
528
STATIC_ASSERT(IsSigned<I>::value == IsSigned<AddendType>::value);
529
STATIC_ASSERT(sizeof(I) <= sizeof(AddendType));
530
AddendType addend = sub_value;
531
// Assumes two's complement integer representation.
532
#pragma warning(suppress: 4146) // In case AddendType is not signed.
533
return Atomic::add(dest, -addend, order);
534
}
535
536
// Define the class before including platform file, which may specialize
537
// the operator definition. No generic definition of specializations
538
// of the operator template are provided, nor are there any generic
539
// specializations of the class. The platform file is responsible for
540
// providing those.
541
template<size_t byte_size>
542
struct Atomic::PlatformCmpxchg {
543
template<typename T>
544
T operator()(T volatile* dest,
545
T compare_value,
546
T exchange_value,
547
atomic_memory_order order) const;
548
};
549
550
// Define the class before including platform file, which may use this
551
// as a base class, requiring it be complete. The definition is later
552
// in this file, near the other definitions related to cmpxchg.
553
struct Atomic::CmpxchgByteUsingInt {
554
static uint8_t get_byte_in_int(uint32_t n, uint32_t idx);
555
static uint32_t set_byte_in_int(uint32_t n, uint8_t b, uint32_t idx);
556
template<typename T>
557
T operator()(T volatile* dest,
558
T compare_value,
559
T exchange_value,
560
atomic_memory_order order) const;
561
};
562
563
// Define the class before including platform file, which may specialize
564
// the operator definition. No generic definition of specializations
565
// of the operator template are provided, nor are there any generic
566
// specializations of the class. The platform file is responsible for
567
// providing those.
568
template<size_t byte_size>
569
struct Atomic::PlatformXchg {
570
template<typename T>
571
T operator()(T volatile* dest,
572
T exchange_value,
573
atomic_memory_order order) const;
574
};
575
576
template <ScopedFenceType T>
577
class ScopedFenceGeneral: public StackObj {
578
public:
579
void prefix() {}
580
void postfix() {}
581
};
582
583
// The following methods can be specialized using simple template specialization
584
// in the platform specific files for optimization purposes. Otherwise the
585
// generalized variant is used.
586
587
template<> inline void ScopedFenceGeneral<X_ACQUIRE>::postfix() { OrderAccess::acquire(); }
588
template<> inline void ScopedFenceGeneral<RELEASE_X>::prefix() { OrderAccess::release(); }
589
template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::prefix() { OrderAccess::release(); }
590
template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
591
592
template <ScopedFenceType T>
593
class ScopedFence : public ScopedFenceGeneral<T> {
594
void *const _field;
595
public:
596
ScopedFence(void *const field) : _field(field) { prefix(); }
597
~ScopedFence() { postfix(); }
598
void prefix() { ScopedFenceGeneral<T>::prefix(); }
599
void postfix() { ScopedFenceGeneral<T>::postfix(); }
600
};
601
602
// platform specific in-line definitions - must come before shared definitions
603
604
#include OS_CPU_HEADER(atomic)
605
606
// shared in-line definitions
607
608
// size_t casts...
609
#if (SIZE_MAX != UINTPTR_MAX)
610
#error size_t is not WORD_SIZE, interesting platform, but missing implementation here
611
#endif
612
613
template<typename T>
614
inline T Atomic::load(const volatile T* dest) {
615
return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest);
616
}
617
618
template<size_t byte_size, ScopedFenceType type>
619
struct Atomic::PlatformOrderedLoad {
620
template <typename T>
621
T operator()(const volatile T* p) const {
622
ScopedFence<type> f((void*)p);
623
return Atomic::load(p);
624
}
625
};
626
627
template <typename T>
628
inline T Atomic::load_acquire(const volatile T* p) {
629
return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p);
630
}
631
632
template<typename D, typename T>
633
inline void Atomic::store(volatile D* dest, T store_value) {
634
StoreImpl<D, T, PlatformStore<sizeof(D)> >()(dest, store_value);
635
}
636
637
template<size_t byte_size, ScopedFenceType type>
638
struct Atomic::PlatformOrderedStore {
639
template <typename T>
640
void operator()(volatile T* p, T v) const {
641
ScopedFence<type> f((void*)p);
642
Atomic::store(p, v);
643
}
644
};
645
646
template <typename D, typename T>
647
inline void Atomic::release_store(volatile D* p, T v) {
648
StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(p, v);
649
}
650
651
template <typename D, typename T>
652
inline void Atomic::release_store_fence(volatile D* p, T v) {
653
StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(p, v);
654
}
655
656
template<typename D, typename I>
657
inline D Atomic::add(D volatile* dest, I add_value,
658
atomic_memory_order order) {
659
return AddImpl<D, I>::add_and_fetch(dest, add_value, order);
660
}
661
662
template<typename D, typename I>
663
inline D Atomic::fetch_and_add(D volatile* dest, I add_value,
664
atomic_memory_order order) {
665
return AddImpl<D, I>::fetch_and_add(dest, add_value, order);
666
}
667
668
template<typename D, typename I>
669
struct Atomic::AddImpl<
670
D, I,
671
typename EnableIf<IsIntegral<I>::value &&
672
IsIntegral<D>::value &&
673
(sizeof(I) <= sizeof(D)) &&
674
(IsSigned<I>::value == IsSigned<D>::value)>::type>
675
{
676
static D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) {
677
D addend = add_value;
678
return PlatformAdd<sizeof(D)>().add_and_fetch(dest, addend, order);
679
}
680
static D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) {
681
D addend = add_value;
682
return PlatformAdd<sizeof(D)>().fetch_and_add(dest, addend, order);
683
}
684
};
685
686
template<typename P, typename I>
687
struct Atomic::AddImpl<
688
P*, I,
689
typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
690
{
691
STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
692
STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
693
typedef typename Conditional<IsSigned<I>::value,
694
intptr_t,
695
uintptr_t>::type CI;
696
697
static CI scale_addend(CI add_value) {
698
return add_value * sizeof(P);
699
}
700
701
static P* add_and_fetch(P* volatile* dest, I add_value, atomic_memory_order order) {
702
CI addend = add_value;
703
return PlatformAdd<sizeof(P*)>().add_and_fetch(dest, scale_addend(addend), order);
704
}
705
static P* fetch_and_add(P* volatile* dest, I add_value, atomic_memory_order order) {
706
CI addend = add_value;
707
return PlatformAdd<sizeof(P*)>().fetch_and_add(dest, scale_addend(addend), order);
708
}
709
};
710
711
template<typename Type, typename Fn, typename D, typename I>
712
inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) {
713
return PrimitiveConversions::cast<D>(
714
fn(PrimitiveConversions::cast<Type>(add_value),
715
reinterpret_cast<Type volatile*>(dest)));
716
}
717
718
template<typename D, typename U, typename T>
719
inline D Atomic::cmpxchg(D volatile* dest,
720
U compare_value,
721
T exchange_value,
722
atomic_memory_order order) {
723
return CmpxchgImpl<D, U, T>()(dest, compare_value, exchange_value, order);
724
}
725
726
template<typename D, typename T>
727
inline bool Atomic::replace_if_null(D* volatile* dest, T* value,
728
atomic_memory_order order) {
729
// Presently using a trivial implementation in terms of cmpxchg.
730
// Consider adding platform support, to permit the use of compiler
731
// intrinsics like gcc's __sync_bool_compare_and_swap.
732
D* expected_null = NULL;
733
return expected_null == cmpxchg(dest, expected_null, value, order);
734
}
735
736
// Handle cmpxchg for integral and enum types.
737
//
738
// All the involved types must be identical.
739
template<typename T>
740
struct Atomic::CmpxchgImpl<
741
T, T, T,
742
typename EnableIf<IsIntegral<T>::value || std::is_enum<T>::value>::type>
743
{
744
T operator()(T volatile* dest, T compare_value, T exchange_value,
745
atomic_memory_order order) const {
746
// Forward to the platform handler for the size of T.
747
return PlatformCmpxchg<sizeof(T)>()(dest,
748
compare_value,
749
exchange_value,
750
order);
751
}
752
};
753
754
// Handle cmpxchg for pointer types.
755
//
756
// The destination's type and the compare_value type must be the same,
757
// ignoring cv-qualifiers; we don't care about the cv-qualifiers of
758
// the compare_value.
759
//
760
// The exchange_value must be implicitly convertible to the
761
// destination's type; it must be type-correct to store the
762
// exchange_value in the destination.
763
template<typename D, typename U, typename T>
764
struct Atomic::CmpxchgImpl<
765
D*, U*, T*,
766
typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value &&
767
IsSame<typename RemoveCV<D>::type,
768
typename RemoveCV<U>::type>::value>::type>
769
{
770
D* operator()(D* volatile* dest, U* compare_value, T* exchange_value,
771
atomic_memory_order order) const {
772
// Allow derived to base conversion, and adding cv-qualifiers.
773
D* new_value = exchange_value;
774
// Don't care what the CV qualifiers for compare_value are,
775
// but we need to match D* when calling platform support.
776
D* old_value = const_cast<D*>(compare_value);
777
return PlatformCmpxchg<sizeof(D*)>()(dest, old_value, new_value, order);
778
}
779
};
780
781
// Handle cmpxchg for types that have a translator.
782
//
783
// All the involved types must be identical.
784
//
785
// This translates the original call into a call on the decayed
786
// arguments, and returns the recovered result of that translated
787
// call.
788
template<typename T>
789
struct Atomic::CmpxchgImpl<
790
T, T, T,
791
typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
792
{
793
T operator()(T volatile* dest, T compare_value, T exchange_value,
794
atomic_memory_order order) const {
795
typedef PrimitiveConversions::Translate<T> Translator;
796
typedef typename Translator::Decayed Decayed;
797
STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
798
return Translator::recover(
799
cmpxchg(reinterpret_cast<Decayed volatile*>(dest),
800
Translator::decay(compare_value),
801
Translator::decay(exchange_value),
802
order));
803
}
804
};
805
806
template<typename Type, typename Fn, typename T>
807
inline T Atomic::cmpxchg_using_helper(Fn fn,
808
T volatile* dest,
809
T compare_value,
810
T exchange_value) {
811
STATIC_ASSERT(sizeof(Type) == sizeof(T));
812
return PrimitiveConversions::cast<T>(
813
fn(PrimitiveConversions::cast<Type>(exchange_value),
814
reinterpret_cast<Type volatile*>(dest),
815
PrimitiveConversions::cast<Type>(compare_value)));
816
}
817
818
inline uint32_t Atomic::CmpxchgByteUsingInt::set_byte_in_int(uint32_t n,
819
uint8_t b,
820
uint32_t idx) {
821
int bitsIdx = BitsPerByte * idx;
822
return (n & ~(static_cast<uint32_t>(0xff) << bitsIdx))
823
| (static_cast<uint32_t>(b) << bitsIdx);
824
}
825
826
inline uint8_t Atomic::CmpxchgByteUsingInt::get_byte_in_int(uint32_t n,
827
uint32_t idx) {
828
int bitsIdx = BitsPerByte * idx;
829
return (uint8_t)(n >> bitsIdx);
830
}
831
832
template<typename T>
833
inline T Atomic::CmpxchgByteUsingInt::operator()(T volatile* dest,
834
T compare_value,
835
T exchange_value,
836
atomic_memory_order order) const {
837
STATIC_ASSERT(sizeof(T) == sizeof(uint8_t));
838
uint8_t canon_exchange_value = exchange_value;
839
uint8_t canon_compare_value = compare_value;
840
volatile uint32_t* aligned_dest
841
= reinterpret_cast<volatile uint32_t*>(align_down(dest, sizeof(uint32_t)));
842
size_t offset = pointer_delta(dest, aligned_dest, 1);
843
844
uint32_t idx = (Endian::NATIVE == Endian::BIG)
845
? (sizeof(uint32_t) - 1 - offset)
846
: offset;
847
848
// current value may not be what we are looking for, so force it
849
// to that value so the initial cmpxchg will fail if it is different
850
uint32_t cur = set_byte_in_int(Atomic::load(aligned_dest), canon_compare_value, idx);
851
852
// always execute a real cmpxchg so that we get the required memory
853
// barriers even on initial failure
854
do {
855
// value to swap in matches current value
856
// except for the one byte we want to update
857
uint32_t new_value = set_byte_in_int(cur, canon_exchange_value, idx);
858
859
uint32_t res = cmpxchg(aligned_dest, cur, new_value, order);
860
if (res == cur) break; // success
861
862
// at least one byte in the int changed value, so update
863
// our view of the current int
864
cur = res;
865
// if our byte is still as cur we loop and try again
866
} while (get_byte_in_int(cur, idx) == canon_compare_value);
867
868
return PrimitiveConversions::cast<T>(get_byte_in_int(cur, idx));
869
}
870
871
// Handle xchg for integral and enum types.
872
//
873
// All the involved types must be identical.
874
template<typename T>
875
struct Atomic::XchgImpl<
876
T, T,
877
typename EnableIf<IsIntegral<T>::value || std::is_enum<T>::value>::type>
878
{
879
T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const {
880
// Forward to the platform handler for the size of T.
881
return PlatformXchg<sizeof(T)>()(dest, exchange_value, order);
882
}
883
};
884
885
// Handle xchg for pointer types.
886
//
887
// The exchange_value must be implicitly convertible to the
888
// destination's type; it must be type-correct to store the
889
// exchange_value in the destination.
890
template<typename D, typename T>
891
struct Atomic::XchgImpl<
892
D*, T*,
893
typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
894
{
895
D* operator()(D* volatile* dest, T* exchange_value, atomic_memory_order order) const {
896
// Allow derived to base conversion, and adding cv-qualifiers.
897
D* new_value = exchange_value;
898
return PlatformXchg<sizeof(D*)>()(dest, new_value, order);
899
}
900
};
901
902
// Handle xchg for types that have a translator.
903
//
904
// All the involved types must be identical.
905
//
906
// This translates the original call into a call on the decayed
907
// arguments, and returns the recovered result of that translated
908
// call.
909
template<typename T>
910
struct Atomic::XchgImpl<
911
T, T,
912
typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
913
{
914
T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const {
915
typedef PrimitiveConversions::Translate<T> Translator;
916
typedef typename Translator::Decayed Decayed;
917
STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
918
return Translator::recover(
919
xchg(reinterpret_cast<Decayed volatile*>(dest),
920
Translator::decay(exchange_value),
921
order));
922
}
923
};
924
925
template<typename Type, typename Fn, typename T>
926
inline T Atomic::xchg_using_helper(Fn fn,
927
T volatile* dest,
928
T exchange_value) {
929
STATIC_ASSERT(sizeof(Type) == sizeof(T));
930
// Notice the swapped order of arguments. Change when/if stubs are rewritten.
931
return PrimitiveConversions::cast<T>(
932
fn(PrimitiveConversions::cast<Type>(exchange_value),
933
reinterpret_cast<Type volatile*>(dest)));
934
}
935
936
template<typename D, typename T>
937
inline D Atomic::xchg(volatile D* dest, T exchange_value, atomic_memory_order order) {
938
return XchgImpl<D, T>()(dest, exchange_value, order);
939
}
940
941
#endif // SHARE_RUNTIME_ATOMIC_HPP
942
943