Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/oops/accessBackend.hpp
40951 views
1
/*
2
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_OOPS_ACCESSBACKEND_HPP
26
#define SHARE_OOPS_ACCESSBACKEND_HPP
27
28
#include "gc/shared/barrierSetConfig.hpp"
29
#include "memory/allocation.hpp"
30
#include "metaprogramming/conditional.hpp"
31
#include "metaprogramming/decay.hpp"
32
#include "metaprogramming/enableIf.hpp"
33
#include "metaprogramming/integralConstant.hpp"
34
#include "metaprogramming/isFloatingPoint.hpp"
35
#include "metaprogramming/isIntegral.hpp"
36
#include "metaprogramming/isPointer.hpp"
37
#include "metaprogramming/isSame.hpp"
38
#include "metaprogramming/isVolatile.hpp"
39
#include "oops/accessDecorators.hpp"
40
#include "oops/oopsHierarchy.hpp"
41
#include "runtime/globals.hpp"
42
#include "utilities/debug.hpp"
43
#include "utilities/globalDefinitions.hpp"
44
45
46
// This metafunction returns either oop or narrowOop depending on whether
47
// an access needs to use compressed oops or not.
48
template <DecoratorSet decorators>
49
struct HeapOopType: AllStatic {
50
static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
51
HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
52
typedef typename Conditional<needs_oop_compress, narrowOop, oop>::type type;
53
};
54
55
namespace AccessInternal {
56
enum BarrierType {
57
BARRIER_STORE,
58
BARRIER_STORE_AT,
59
BARRIER_LOAD,
60
BARRIER_LOAD_AT,
61
BARRIER_ATOMIC_CMPXCHG,
62
BARRIER_ATOMIC_CMPXCHG_AT,
63
BARRIER_ATOMIC_XCHG,
64
BARRIER_ATOMIC_XCHG_AT,
65
BARRIER_ARRAYCOPY,
66
BARRIER_CLONE,
67
BARRIER_RESOLVE
68
};
69
70
template <DecoratorSet decorators, typename T>
71
struct MustConvertCompressedOop: public IntegralConstant<bool,
72
HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
73
IsSame<typename HeapOopType<decorators>::type, narrowOop>::value &&
74
IsSame<T, oop>::value> {};
75
76
// This metafunction returns an appropriate oop type if the value is oop-like
77
// and otherwise returns the same type T.
78
template <DecoratorSet decorators, typename T>
79
struct EncodedType: AllStatic {
80
typedef typename Conditional<
81
HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
82
typename HeapOopType<decorators>::type, T>::type type;
83
};
84
85
template <DecoratorSet decorators>
86
inline typename HeapOopType<decorators>::type*
87
oop_field_addr(oop base, ptrdiff_t byte_offset) {
88
return reinterpret_cast<typename HeapOopType<decorators>::type*>(
89
reinterpret_cast<intptr_t>((void*)base) + byte_offset);
90
}
91
92
// This metafunction returns whether it is possible for a type T to require
93
// locking to support wide atomics or not.
94
template <typename T>
95
#ifdef SUPPORTS_NATIVE_CX8
96
struct PossiblyLockedAccess: public IntegralConstant<bool, false> {};
97
#else
98
struct PossiblyLockedAccess: public IntegralConstant<bool, (sizeof(T) > 4)> {};
99
#endif
100
101
template <DecoratorSet decorators, typename T>
102
struct AccessFunctionTypes {
103
typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
104
typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
105
typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value);
106
typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
107
108
typedef T (*load_func_t)(void* addr);
109
typedef void (*store_func_t)(void* addr, T value);
110
typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value);
111
typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
112
113
typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
114
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
115
size_t length);
116
typedef void (*clone_func_t)(oop src, oop dst, size_t size);
117
typedef oop (*resolve_func_t)(oop obj);
118
};
119
120
template <DecoratorSet decorators>
121
struct AccessFunctionTypes<decorators, void> {
122
typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src,
123
arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst,
124
size_t length);
125
};
126
127
template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
128
129
#define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func) \
130
template <DecoratorSet decorators, typename T> \
131
struct AccessFunction<decorators, T, bt>: AllStatic{ \
132
typedef typename AccessFunctionTypes<decorators, T>::func type; \
133
}
134
ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
135
ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
136
ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
137
ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
138
ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
139
ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
140
ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
141
ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
142
ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
143
ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
144
ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_RESOLVE, resolve_func_t);
145
#undef ACCESS_GENERATE_ACCESS_FUNCTION
146
147
template <DecoratorSet decorators, typename T, BarrierType barrier_type>
148
typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
149
150
template <DecoratorSet decorators, typename T, BarrierType barrier_type>
151
typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
152
153
class AccessLocker {
154
public:
155
AccessLocker();
156
~AccessLocker();
157
};
158
bool wide_atomic_needs_locking();
159
160
void* field_addr(oop base, ptrdiff_t offset);
161
162
// Forward calls to Copy:: in the cpp file to reduce dependencies and allow
163
// faster build times, given how frequently included access is.
164
void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
165
void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length);
166
void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length);
167
168
void arraycopy_disjoint_words(void* src, void* dst, size_t length);
169
void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length);
170
171
template<typename T>
172
void arraycopy_conjoint(T* src, T* dst, size_t length);
173
template<typename T>
174
void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length);
175
template<typename T>
176
void arraycopy_conjoint_atomic(T* src, T* dst, size_t length);
177
}
178
179
// This mask specifies what decorators are relevant for raw accesses. When passing
180
// accesses to the raw layer, irrelevant decorators are removed.
181
const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK |
182
ARRAYCOPY_DECORATOR_MASK | IS_NOT_NULL;
183
184
// The RawAccessBarrier performs raw accesses with additional knowledge of
185
// memory ordering, so that OrderAccess/Atomic is called when necessary.
186
// It additionally handles compressed oops, and hence is not completely "raw"
187
// strictly speaking.
188
template <DecoratorSet decorators>
189
class RawAccessBarrier: public AllStatic {
190
protected:
191
static inline void* field_addr(oop base, ptrdiff_t byte_offset) {
192
return AccessInternal::field_addr(base, byte_offset);
193
}
194
195
protected:
196
// Only encode if INTERNAL_VALUE_IS_OOP
197
template <DecoratorSet idecorators, typename T>
198
static inline typename EnableIf<
199
AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
200
typename HeapOopType<idecorators>::type>::type
201
encode_internal(T value);
202
203
template <DecoratorSet idecorators, typename T>
204
static inline typename EnableIf<
205
!AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
206
encode_internal(T value) {
207
return value;
208
}
209
210
template <typename T>
211
static inline typename AccessInternal::EncodedType<decorators, T>::type
212
encode(T value) {
213
return encode_internal<decorators, T>(value);
214
}
215
216
// Only decode if INTERNAL_VALUE_IS_OOP
217
template <DecoratorSet idecorators, typename T>
218
static inline typename EnableIf<
219
AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
220
decode_internal(typename HeapOopType<idecorators>::type value);
221
222
template <DecoratorSet idecorators, typename T>
223
static inline typename EnableIf<
224
!AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
225
decode_internal(T value) {
226
return value;
227
}
228
229
template <typename T>
230
static inline T decode(typename AccessInternal::EncodedType<decorators, T>::type value) {
231
return decode_internal<decorators, T>(value);
232
}
233
234
protected:
235
template <DecoratorSet ds, typename T>
236
static typename EnableIf<
237
HasDecorator<ds, MO_SEQ_CST>::value, T>::type
238
load_internal(void* addr);
239
240
template <DecoratorSet ds, typename T>
241
static typename EnableIf<
242
HasDecorator<ds, MO_ACQUIRE>::value, T>::type
243
load_internal(void* addr);
244
245
template <DecoratorSet ds, typename T>
246
static typename EnableIf<
247
HasDecorator<ds, MO_RELAXED>::value, T>::type
248
load_internal(void* addr);
249
250
template <DecoratorSet ds, typename T>
251
static inline typename EnableIf<
252
HasDecorator<ds, MO_UNORDERED>::value, T>::type
253
load_internal(void* addr) {
254
return *reinterpret_cast<T*>(addr);
255
}
256
257
template <DecoratorSet ds, typename T>
258
static typename EnableIf<
259
HasDecorator<ds, MO_SEQ_CST>::value>::type
260
store_internal(void* addr, T value);
261
262
template <DecoratorSet ds, typename T>
263
static typename EnableIf<
264
HasDecorator<ds, MO_RELEASE>::value>::type
265
store_internal(void* addr, T value);
266
267
template <DecoratorSet ds, typename T>
268
static typename EnableIf<
269
HasDecorator<ds, MO_RELAXED>::value>::type
270
store_internal(void* addr, T value);
271
272
template <DecoratorSet ds, typename T>
273
static inline typename EnableIf<
274
HasDecorator<ds, MO_UNORDERED>::value>::type
275
store_internal(void* addr, T value) {
276
*reinterpret_cast<T*>(addr) = value;
277
}
278
279
template <DecoratorSet ds, typename T>
280
static typename EnableIf<
281
HasDecorator<ds, MO_SEQ_CST>::value, T>::type
282
atomic_cmpxchg_internal(void* addr, T compare_value, T new_value);
283
284
template <DecoratorSet ds, typename T>
285
static typename EnableIf<
286
HasDecorator<ds, MO_RELAXED>::value, T>::type
287
atomic_cmpxchg_internal(void* addr, T compare_value, T new_value);
288
289
template <DecoratorSet ds, typename T>
290
static typename EnableIf<
291
HasDecorator<ds, MO_SEQ_CST>::value, T>::type
292
atomic_xchg_internal(void* addr, T new_value);
293
294
// The following *_locked mechanisms serve the purpose of handling atomic operations
295
// that are larger than a machine can handle, and then possibly opt for using
296
// a slower path using a mutex to perform the operation.
297
298
template <DecoratorSet ds, typename T>
299
static inline typename EnableIf<
300
!AccessInternal::PossiblyLockedAccess<T>::value, T>::type
301
atomic_cmpxchg_maybe_locked(void* addr, T compare_value, T new_value) {
302
return atomic_cmpxchg_internal<ds>(addr, compare_value, new_value);
303
}
304
305
template <DecoratorSet ds, typename T>
306
static typename EnableIf<
307
AccessInternal::PossiblyLockedAccess<T>::value, T>::type
308
atomic_cmpxchg_maybe_locked(void* addr, T compare_value, T new_value);
309
310
template <DecoratorSet ds, typename T>
311
static inline typename EnableIf<
312
!AccessInternal::PossiblyLockedAccess<T>::value, T>::type
313
atomic_xchg_maybe_locked(void* addr, T new_value) {
314
return atomic_xchg_internal<ds>(addr, new_value);
315
}
316
317
template <DecoratorSet ds, typename T>
318
static typename EnableIf<
319
AccessInternal::PossiblyLockedAccess<T>::value, T>::type
320
atomic_xchg_maybe_locked(void* addr, T new_value);
321
322
public:
323
template <typename T>
324
static inline void store(void* addr, T value) {
325
store_internal<decorators>(addr, value);
326
}
327
328
template <typename T>
329
static inline T load(void* addr) {
330
return load_internal<decorators, T>(addr);
331
}
332
333
template <typename T>
334
static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
335
return atomic_cmpxchg_maybe_locked<decorators>(addr, compare_value, new_value);
336
}
337
338
template <typename T>
339
static inline T atomic_xchg(void* addr, T new_value) {
340
return atomic_xchg_maybe_locked<decorators>(addr, new_value);
341
}
342
343
template <typename T>
344
static bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
345
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
346
size_t length);
347
348
template <typename T>
349
static void oop_store(void* addr, T value);
350
template <typename T>
351
static void oop_store_at(oop base, ptrdiff_t offset, T value);
352
353
template <typename T>
354
static T oop_load(void* addr);
355
template <typename T>
356
static T oop_load_at(oop base, ptrdiff_t offset);
357
358
template <typename T>
359
static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value);
360
template <typename T>
361
static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value);
362
363
template <typename T>
364
static T oop_atomic_xchg(void* addr, T new_value);
365
template <typename T>
366
static T oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value);
367
368
template <typename T>
369
static void store_at(oop base, ptrdiff_t offset, T value) {
370
store(field_addr(base, offset), value);
371
}
372
373
template <typename T>
374
static T load_at(oop base, ptrdiff_t offset) {
375
return load<T>(field_addr(base, offset));
376
}
377
378
template <typename T>
379
static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
380
return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
381
}
382
383
template <typename T>
384
static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
385
return atomic_xchg(field_addr(base, offset), new_value);
386
}
387
388
template <typename T>
389
static bool oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
390
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
391
size_t length);
392
393
static void clone(oop src, oop dst, size_t size);
394
395
static oop resolve(oop obj) { return obj; }
396
};
397
398
// Below is the implementation of the first 4 steps of the template pipeline:
399
// * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
400
// and sets default decorators to sensible values.
401
// * Step 2: Reduce types. This step makes sure there is only a single T type and not
402
// multiple types. The P type of the address and T type of the value must
403
// match.
404
// * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
405
// avoided, and in that case avoids it (calling raw accesses or
406
// primitive accesses in a build that does not require primitive GC barriers)
407
// * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
408
// BarrierSet::AccessBarrier accessor that attaches GC-required barriers
409
// to the access.
410
411
namespace AccessInternal {
412
template <typename T>
413
struct OopOrNarrowOopInternal: AllStatic {
414
typedef oop type;
415
};
416
417
template <>
418
struct OopOrNarrowOopInternal<narrowOop>: AllStatic {
419
typedef narrowOop type;
420
};
421
422
// This metafunction returns a canonicalized oop/narrowOop type for a passed
423
// in oop-like types passed in from oop_* overloads where the user has sworn
424
// that the passed in values should be oop-like (e.g. oop, oopDesc*, arrayOop,
425
// narrowOoop, instanceOopDesc*, and random other things).
426
// In the oop_* overloads, it must hold that if the passed in type T is not
427
// narrowOop, then it by contract has to be one of many oop-like types implicitly
428
// convertible to oop, and hence returns oop as the canonical oop type.
429
// If it turns out it was not, then the implicit conversion to oop will fail
430
// to compile, as desired.
431
template <typename T>
432
struct OopOrNarrowOop: AllStatic {
433
typedef typename OopOrNarrowOopInternal<typename Decay<T>::type>::type type;
434
};
435
436
inline void* field_addr(oop base, ptrdiff_t byte_offset) {
437
return reinterpret_cast<void*>(reinterpret_cast<intptr_t>((void*)base) + byte_offset);
438
}
439
// Step 4: Runtime dispatch
440
// The RuntimeDispatch class is responsible for performing a runtime dispatch of the
441
// accessor. This is required when the access either depends on whether compressed oops
442
// is being used, or it depends on which GC implementation was chosen (e.g. requires GC
443
// barriers). The way it works is that a function pointer initially pointing to an
444
// accessor resolution function gets called for each access. Upon first invocation,
445
// it resolves which accessor to be used in future invocations and patches the
446
// function pointer to this new accessor.
447
448
template <DecoratorSet decorators, typename T, BarrierType type>
449
struct RuntimeDispatch: AllStatic {};
450
451
template <DecoratorSet decorators, typename T>
452
struct RuntimeDispatch<decorators, T, BARRIER_STORE>: AllStatic {
453
typedef typename AccessFunction<decorators, T, BARRIER_STORE>::type func_t;
454
static func_t _store_func;
455
456
static void store_init(void* addr, T value);
457
458
static inline void store(void* addr, T value) {
459
_store_func(addr, value);
460
}
461
};
462
463
template <DecoratorSet decorators, typename T>
464
struct RuntimeDispatch<decorators, T, BARRIER_STORE_AT>: AllStatic {
465
typedef typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type func_t;
466
static func_t _store_at_func;
467
468
static void store_at_init(oop base, ptrdiff_t offset, T value);
469
470
static inline void store_at(oop base, ptrdiff_t offset, T value) {
471
_store_at_func(base, offset, value);
472
}
473
};
474
475
template <DecoratorSet decorators, typename T>
476
struct RuntimeDispatch<decorators, T, BARRIER_LOAD>: AllStatic {
477
typedef typename AccessFunction<decorators, T, BARRIER_LOAD>::type func_t;
478
static func_t _load_func;
479
480
static T load_init(void* addr);
481
482
static inline T load(void* addr) {
483
return _load_func(addr);
484
}
485
};
486
487
template <DecoratorSet decorators, typename T>
488
struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic {
489
typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t;
490
static func_t _load_at_func;
491
492
static T load_at_init(oop base, ptrdiff_t offset);
493
494
static inline T load_at(oop base, ptrdiff_t offset) {
495
return _load_at_func(base, offset);
496
}
497
};
498
499
template <DecoratorSet decorators, typename T>
500
struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic {
501
typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t;
502
static func_t _atomic_cmpxchg_func;
503
504
static T atomic_cmpxchg_init(void* addr, T compare_value, T new_value);
505
506
static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
507
return _atomic_cmpxchg_func(addr, compare_value, new_value);
508
}
509
};
510
511
template <DecoratorSet decorators, typename T>
512
struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic {
513
typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t;
514
static func_t _atomic_cmpxchg_at_func;
515
516
static T atomic_cmpxchg_at_init(oop base, ptrdiff_t offset, T compare_value, T new_value);
517
518
static inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
519
return _atomic_cmpxchg_at_func(base, offset, compare_value, new_value);
520
}
521
};
522
523
template <DecoratorSet decorators, typename T>
524
struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic {
525
typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t;
526
static func_t _atomic_xchg_func;
527
528
static T atomic_xchg_init(void* addr, T new_value);
529
530
static inline T atomic_xchg(void* addr, T new_value) {
531
return _atomic_xchg_func(addr, new_value);
532
}
533
};
534
535
template <DecoratorSet decorators, typename T>
536
struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
537
typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
538
static func_t _atomic_xchg_at_func;
539
540
static T atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value);
541
542
static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
543
return _atomic_xchg_at_func(base, offset, new_value);
544
}
545
};
546
547
template <DecoratorSet decorators, typename T>
548
struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
549
typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
550
static func_t _arraycopy_func;
551
552
static bool arraycopy_init(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
553
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
554
size_t length);
555
556
static inline bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
557
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
558
size_t length) {
559
return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw,
560
dst_obj, dst_offset_in_bytes, dst_raw,
561
length);
562
}
563
};
564
565
template <DecoratorSet decorators, typename T>
566
struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
567
typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
568
static func_t _clone_func;
569
570
static void clone_init(oop src, oop dst, size_t size);
571
572
static inline void clone(oop src, oop dst, size_t size) {
573
_clone_func(src, dst, size);
574
}
575
};
576
577
template <DecoratorSet decorators, typename T>
578
struct RuntimeDispatch<decorators, T, BARRIER_RESOLVE>: AllStatic {
579
typedef typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type func_t;
580
static func_t _resolve_func;
581
582
static oop resolve_init(oop obj);
583
584
static inline oop resolve(oop obj) {
585
return _resolve_func(obj);
586
}
587
};
588
589
// Initialize the function pointers to point to the resolving function.
590
template <DecoratorSet decorators, typename T>
591
typename AccessFunction<decorators, T, BARRIER_STORE>::type
592
RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
593
594
template <DecoratorSet decorators, typename T>
595
typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
596
RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
597
598
template <DecoratorSet decorators, typename T>
599
typename AccessFunction<decorators, T, BARRIER_LOAD>::type
600
RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
601
602
template <DecoratorSet decorators, typename T>
603
typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
604
RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
605
606
template <DecoratorSet decorators, typename T>
607
typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
608
RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
609
610
template <DecoratorSet decorators, typename T>
611
typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
612
RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
613
614
template <DecoratorSet decorators, typename T>
615
typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
616
RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
617
618
template <DecoratorSet decorators, typename T>
619
typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
620
RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
621
622
template <DecoratorSet decorators, typename T>
623
typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
624
RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
625
626
template <DecoratorSet decorators, typename T>
627
typename AccessFunction<decorators, T, BARRIER_CLONE>::type
628
RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
629
630
template <DecoratorSet decorators, typename T>
631
typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type
632
RuntimeDispatch<decorators, T, BARRIER_RESOLVE>::_resolve_func = &resolve_init;
633
634
// Step 3: Pre-runtime dispatching.
635
// The PreRuntimeDispatch class is responsible for filtering the barrier strength
636
// decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
637
// dispatch point. Otherwise it goes through a runtime check if hardwiring was
638
// not possible.
639
struct PreRuntimeDispatch: AllStatic {
640
template<DecoratorSet decorators>
641
struct CanHardwireRaw: public IntegralConstant<
642
bool,
643
!HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
644
!HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
645
HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
646
{};
647
648
static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
649
650
template<DecoratorSet decorators>
651
static bool is_hardwired_primitive() {
652
return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
653
}
654
655
template <DecoratorSet decorators, typename T>
656
inline static typename EnableIf<
657
HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value>::type
658
store(void* addr, T value) {
659
typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
660
if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
661
Raw::oop_store(addr, value);
662
} else {
663
Raw::store(addr, value);
664
}
665
}
666
667
template <DecoratorSet decorators, typename T>
668
inline static typename EnableIf<
669
HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value>::type
670
store(void* addr, T value) {
671
if (UseCompressedOops) {
672
const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
673
PreRuntimeDispatch::store<expanded_decorators>(addr, value);
674
} else {
675
const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
676
PreRuntimeDispatch::store<expanded_decorators>(addr, value);
677
}
678
}
679
680
template <DecoratorSet decorators, typename T>
681
inline static typename EnableIf<
682
!HasDecorator<decorators, AS_RAW>::value>::type
683
store(void* addr, T value) {
684
if (is_hardwired_primitive<decorators>()) {
685
const DecoratorSet expanded_decorators = decorators | AS_RAW;
686
PreRuntimeDispatch::store<expanded_decorators>(addr, value);
687
} else {
688
RuntimeDispatch<decorators, T, BARRIER_STORE>::store(addr, value);
689
}
690
}
691
692
template <DecoratorSet decorators, typename T>
693
inline static typename EnableIf<
694
HasDecorator<decorators, AS_RAW>::value>::type
695
store_at(oop base, ptrdiff_t offset, T value) {
696
store<decorators>(field_addr(base, offset), value);
697
}
698
699
template <DecoratorSet decorators, typename T>
700
inline static typename EnableIf<
701
!HasDecorator<decorators, AS_RAW>::value>::type
702
store_at(oop base, ptrdiff_t offset, T value) {
703
if (is_hardwired_primitive<decorators>()) {
704
const DecoratorSet expanded_decorators = decorators | AS_RAW;
705
PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, value);
706
} else {
707
RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at(base, offset, value);
708
}
709
}
710
711
template <DecoratorSet decorators, typename T>
712
inline static typename EnableIf<
713
HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
714
load(void* addr) {
715
typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
716
if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
717
return Raw::template oop_load<T>(addr);
718
} else {
719
return Raw::template load<T>(addr);
720
}
721
}
722
723
template <DecoratorSet decorators, typename T>
724
inline static typename EnableIf<
725
HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
726
load(void* addr) {
727
if (UseCompressedOops) {
728
const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
729
return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
730
} else {
731
const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
732
return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
733
}
734
}
735
736
template <DecoratorSet decorators, typename T>
737
inline static typename EnableIf<
738
!HasDecorator<decorators, AS_RAW>::value, T>::type
739
load(void* addr) {
740
if (is_hardwired_primitive<decorators>()) {
741
const DecoratorSet expanded_decorators = decorators | AS_RAW;
742
return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
743
} else {
744
return RuntimeDispatch<decorators, T, BARRIER_LOAD>::load(addr);
745
}
746
}
747
748
template <DecoratorSet decorators, typename T>
749
inline static typename EnableIf<
750
HasDecorator<decorators, AS_RAW>::value, T>::type
751
load_at(oop base, ptrdiff_t offset) {
752
return load<decorators, T>(field_addr(base, offset));
753
}
754
755
template <DecoratorSet decorators, typename T>
756
inline static typename EnableIf<
757
!HasDecorator<decorators, AS_RAW>::value, T>::type
758
load_at(oop base, ptrdiff_t offset) {
759
if (is_hardwired_primitive<decorators>()) {
760
const DecoratorSet expanded_decorators = decorators | AS_RAW;
761
return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset);
762
} else {
763
return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset);
764
}
765
}
766
767
template <DecoratorSet decorators, typename T>
768
inline static typename EnableIf<
769
HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
770
atomic_cmpxchg(void* addr, T compare_value, T new_value) {
771
typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
772
if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
773
return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
774
} else {
775
return Raw::atomic_cmpxchg(addr, compare_value, new_value);
776
}
777
}
778
779
template <DecoratorSet decorators, typename T>
780
inline static typename EnableIf<
781
HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
782
atomic_cmpxchg(void* addr, T compare_value, T new_value) {
783
if (UseCompressedOops) {
784
const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
785
return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
786
} else {
787
const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
788
return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
789
}
790
}
791
792
template <DecoratorSet decorators, typename T>
793
inline static typename EnableIf<
794
!HasDecorator<decorators, AS_RAW>::value, T>::type
795
atomic_cmpxchg(void* addr, T compare_value, T new_value) {
796
if (is_hardwired_primitive<decorators>()) {
797
const DecoratorSet expanded_decorators = decorators | AS_RAW;
798
return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
799
} else {
800
return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(addr, compare_value, new_value);
801
}
802
}
803
804
template <DecoratorSet decorators, typename T>
805
inline static typename EnableIf<
806
HasDecorator<decorators, AS_RAW>::value, T>::type
807
atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
808
return atomic_cmpxchg<decorators>(field_addr(base, offset), compare_value, new_value);
809
}
810
811
template <DecoratorSet decorators, typename T>
812
inline static typename EnableIf<
813
!HasDecorator<decorators, AS_RAW>::value, T>::type
814
atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
815
if (is_hardwired_primitive<decorators>()) {
816
const DecoratorSet expanded_decorators = decorators | AS_RAW;
817
return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(base, offset, compare_value, new_value);
818
} else {
819
return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(base, offset, compare_value, new_value);
820
}
821
}
822
823
template <DecoratorSet decorators, typename T>
824
inline static typename EnableIf<
825
HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
826
atomic_xchg(void* addr, T new_value) {
827
typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
828
if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
829
return Raw::oop_atomic_xchg(addr, new_value);
830
} else {
831
return Raw::atomic_xchg(addr, new_value);
832
}
833
}
834
835
template <DecoratorSet decorators, typename T>
836
inline static typename EnableIf<
837
HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
838
atomic_xchg(void* addr, T new_value) {
839
if (UseCompressedOops) {
840
const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
841
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
842
} else {
843
const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
844
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
845
}
846
}
847
848
template <DecoratorSet decorators, typename T>
849
inline static typename EnableIf<
850
!HasDecorator<decorators, AS_RAW>::value, T>::type
851
atomic_xchg(void* addr, T new_value) {
852
if (is_hardwired_primitive<decorators>()) {
853
const DecoratorSet expanded_decorators = decorators | AS_RAW;
854
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
855
} else {
856
return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(addr, new_value);
857
}
858
}
859
860
template <DecoratorSet decorators, typename T>
861
inline static typename EnableIf<
862
HasDecorator<decorators, AS_RAW>::value, T>::type
863
atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
864
return atomic_xchg<decorators>(field_addr(base, offset), new_value);
865
}
866
867
template <DecoratorSet decorators, typename T>
868
inline static typename EnableIf<
869
!HasDecorator<decorators, AS_RAW>::value, T>::type
870
atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
871
if (is_hardwired_primitive<decorators>()) {
872
const DecoratorSet expanded_decorators = decorators | AS_RAW;
873
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(base, offset, new_value);
874
} else {
875
return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(base, offset, new_value);
876
}
877
}
878
879
template <DecoratorSet decorators, typename T>
880
inline static typename EnableIf<
881
HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, bool>::type
882
arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
883
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
884
size_t length) {
885
typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
886
if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
887
return Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
888
dst_obj, dst_offset_in_bytes, dst_raw,
889
length);
890
} else {
891
return Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
892
dst_obj, dst_offset_in_bytes, dst_raw,
893
length);
894
}
895
}
896
897
template <DecoratorSet decorators, typename T>
898
inline static typename EnableIf<
899
HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, bool>::type
900
arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
901
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
902
size_t length) {
903
if (UseCompressedOops) {
904
const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
905
return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
906
dst_obj, dst_offset_in_bytes, dst_raw,
907
length);
908
} else {
909
const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
910
return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
911
dst_obj, dst_offset_in_bytes, dst_raw,
912
length);
913
}
914
}
915
916
template <DecoratorSet decorators, typename T>
917
inline static typename EnableIf<
918
!HasDecorator<decorators, AS_RAW>::value, bool>::type
919
arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
920
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
921
size_t length) {
922
if (is_hardwired_primitive<decorators>()) {
923
const DecoratorSet expanded_decorators = decorators | AS_RAW;
924
return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
925
dst_obj, dst_offset_in_bytes, dst_raw,
926
length);
927
} else {
928
return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, src_offset_in_bytes, src_raw,
929
dst_obj, dst_offset_in_bytes, dst_raw,
930
length);
931
}
932
}
933
934
template <DecoratorSet decorators>
935
inline static typename EnableIf<
936
HasDecorator<decorators, AS_RAW>::value>::type
937
clone(oop src, oop dst, size_t size) {
938
typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
939
Raw::clone(src, dst, size);
940
}
941
942
template <DecoratorSet decorators>
943
inline static typename EnableIf<
944
!HasDecorator<decorators, AS_RAW>::value>::type
945
clone(oop src, oop dst, size_t size) {
946
RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
947
}
948
};
949
950
// Step 2: Reduce types.
951
// Enforce that for non-oop types, T and P have to be strictly the same.
952
// P is the type of the address and T is the type of the values.
953
// As for oop types, it is allow to send T in {narrowOop, oop} and
954
// P in {narrowOop, oop, HeapWord*}. The following rules apply according to
955
// the subsequent table. (columns are P, rows are T)
956
// | | HeapWord | oop | narrowOop |
957
// | oop | rt-comp | hw-none | hw-comp |
958
// | narrowOop | x | x | hw-none |
959
//
960
// x means not allowed
961
// rt-comp means it must be checked at runtime whether the oop is compressed.
962
// hw-none means it is statically known the oop will not be compressed.
963
// hw-comp means it is statically known the oop will be compressed.
964
965
template <DecoratorSet decorators, typename T>
966
inline void store_reduce_types(T* addr, T value) {
967
PreRuntimeDispatch::store<decorators>(addr, value);
968
}
969
970
template <DecoratorSet decorators>
971
inline void store_reduce_types(narrowOop* addr, oop value) {
972
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
973
INTERNAL_RT_USE_COMPRESSED_OOPS;
974
PreRuntimeDispatch::store<expanded_decorators>(addr, value);
975
}
976
977
template <DecoratorSet decorators>
978
inline void store_reduce_types(narrowOop* addr, narrowOop value) {
979
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
980
INTERNAL_RT_USE_COMPRESSED_OOPS;
981
PreRuntimeDispatch::store<expanded_decorators>(addr, value);
982
}
983
984
template <DecoratorSet decorators>
985
inline void store_reduce_types(HeapWord* addr, oop value) {
986
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
987
PreRuntimeDispatch::store<expanded_decorators>(addr, value);
988
}
989
990
template <DecoratorSet decorators, typename T>
991
inline T atomic_cmpxchg_reduce_types(T* addr, T compare_value, T new_value) {
992
return PreRuntimeDispatch::atomic_cmpxchg<decorators>(addr, compare_value, new_value);
993
}
994
995
template <DecoratorSet decorators>
996
inline oop atomic_cmpxchg_reduce_types(narrowOop* addr, oop compare_value, oop new_value) {
997
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
998
INTERNAL_RT_USE_COMPRESSED_OOPS;
999
return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
1000
}
1001
1002
template <DecoratorSet decorators>
1003
inline narrowOop atomic_cmpxchg_reduce_types(narrowOop* addr, narrowOop compare_value, narrowOop new_value) {
1004
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1005
INTERNAL_RT_USE_COMPRESSED_OOPS;
1006
return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
1007
}
1008
1009
template <DecoratorSet decorators>
1010
inline oop atomic_cmpxchg_reduce_types(HeapWord* addr,
1011
oop compare_value,
1012
oop new_value) {
1013
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1014
return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
1015
}
1016
1017
template <DecoratorSet decorators, typename T>
1018
inline T atomic_xchg_reduce_types(T* addr, T new_value) {
1019
const DecoratorSet expanded_decorators = decorators;
1020
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
1021
}
1022
1023
template <DecoratorSet decorators>
1024
inline oop atomic_xchg_reduce_types(narrowOop* addr, oop new_value) {
1025
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1026
INTERNAL_RT_USE_COMPRESSED_OOPS;
1027
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
1028
}
1029
1030
template <DecoratorSet decorators>
1031
inline narrowOop atomic_xchg_reduce_types(narrowOop* addr, narrowOop new_value) {
1032
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1033
INTERNAL_RT_USE_COMPRESSED_OOPS;
1034
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
1035
}
1036
1037
template <DecoratorSet decorators>
1038
inline oop atomic_xchg_reduce_types(HeapWord* addr, oop new_value) {
1039
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1040
return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
1041
}
1042
1043
template <DecoratorSet decorators, typename T>
1044
inline T load_reduce_types(T* addr) {
1045
return PreRuntimeDispatch::load<decorators, T>(addr);
1046
}
1047
1048
template <DecoratorSet decorators, typename T>
1049
inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
1050
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1051
INTERNAL_RT_USE_COMPRESSED_OOPS;
1052
return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
1053
}
1054
1055
template <DecoratorSet decorators, typename T>
1056
inline oop load_reduce_types(HeapWord* addr) {
1057
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1058
return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
1059
}
1060
1061
template <DecoratorSet decorators, typename T>
1062
inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
1063
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1064
size_t length) {
1065
return PreRuntimeDispatch::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
1066
dst_obj, dst_offset_in_bytes, dst_raw,
1067
length);
1068
}
1069
1070
template <DecoratorSet decorators>
1071
inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, HeapWord* src_raw,
1072
arrayOop dst_obj, size_t dst_offset_in_bytes, HeapWord* dst_raw,
1073
size_t length) {
1074
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1075
return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1076
dst_obj, dst_offset_in_bytes, dst_raw,
1077
length);
1078
}
1079
1080
template <DecoratorSet decorators>
1081
inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, narrowOop* src_raw,
1082
arrayOop dst_obj, size_t dst_offset_in_bytes, narrowOop* dst_raw,
1083
size_t length) {
1084
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1085
INTERNAL_RT_USE_COMPRESSED_OOPS;
1086
return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1087
dst_obj, dst_offset_in_bytes, dst_raw,
1088
length);
1089
}
1090
1091
// Step 1: Set default decorators. This step remembers if a type was volatile
1092
// and then sets the MO_RELAXED decorator by default. Otherwise, a default
1093
// memory ordering is set for the access, and the implied decorator rules
1094
// are applied to select sensible defaults for decorators that have not been
1095
// explicitly set. For example, default object referent strength is set to strong.
1096
// This step also decays the types passed in (e.g. getting rid of CV qualifiers
1097
// and references from the types). This step also perform some type verification
1098
// that the passed in types make sense.
1099
1100
template <DecoratorSet decorators, typename T>
1101
static void verify_types(){
1102
// If this fails to compile, then you have sent in something that is
1103
// not recognized as a valid primitive type to a primitive Access function.
1104
STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
1105
(IsPointer<T>::value || IsIntegral<T>::value) ||
1106
IsFloatingPoint<T>::value)); // not allowed primitive type
1107
}
1108
1109
template <DecoratorSet decorators, typename P, typename T>
1110
inline void store(P* addr, T value) {
1111
verify_types<decorators, T>();
1112
typedef typename Decay<P>::type DecayedP;
1113
typedef typename Decay<T>::type DecayedT;
1114
DecayedT decayed_value = value;
1115
// If a volatile address is passed in but no memory ordering decorator,
1116
// set the memory ordering to MO_RELAXED by default.
1117
const DecoratorSet expanded_decorators = DecoratorFixup<
1118
(IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1119
(MO_RELAXED | decorators) : decorators>::value;
1120
store_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr), decayed_value);
1121
}
1122
1123
template <DecoratorSet decorators, typename T>
1124
inline void store_at(oop base, ptrdiff_t offset, T value) {
1125
verify_types<decorators, T>();
1126
typedef typename Decay<T>::type DecayedT;
1127
DecayedT decayed_value = value;
1128
const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
1129
(HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1130
INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1131
PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, decayed_value);
1132
}
1133
1134
template <DecoratorSet decorators, typename P, typename T>
1135
inline T load(P* addr) {
1136
verify_types<decorators, T>();
1137
typedef typename Decay<P>::type DecayedP;
1138
typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
1139
typename OopOrNarrowOop<T>::type,
1140
typename Decay<T>::type>::type DecayedT;
1141
// If a volatile address is passed in but no memory ordering decorator,
1142
// set the memory ordering to MO_RELAXED by default.
1143
const DecoratorSet expanded_decorators = DecoratorFixup<
1144
(IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1145
(MO_RELAXED | decorators) : decorators>::value;
1146
return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr));
1147
}
1148
1149
template <DecoratorSet decorators, typename T>
1150
inline T load_at(oop base, ptrdiff_t offset) {
1151
verify_types<decorators, T>();
1152
typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
1153
typename OopOrNarrowOop<T>::type,
1154
typename Decay<T>::type>::type DecayedT;
1155
// Expand the decorators (figure out sensible defaults)
1156
// Potentially remember if we need compressed oop awareness
1157
const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
1158
(HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1159
INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1160
return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset);
1161
}
1162
1163
template <DecoratorSet decorators, typename P, typename T>
1164
inline T atomic_cmpxchg(P* addr, T compare_value, T new_value) {
1165
verify_types<decorators, T>();
1166
typedef typename Decay<P>::type DecayedP;
1167
typedef typename Decay<T>::type DecayedT;
1168
DecayedT new_decayed_value = new_value;
1169
DecayedT compare_decayed_value = compare_value;
1170
const DecoratorSet expanded_decorators = DecoratorFixup<
1171
(!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1172
(MO_SEQ_CST | decorators) : decorators>::value;
1173
return atomic_cmpxchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1174
compare_decayed_value,
1175
new_decayed_value);
1176
}
1177
1178
template <DecoratorSet decorators, typename T>
1179
inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
1180
verify_types<decorators, T>();
1181
typedef typename Decay<T>::type DecayedT;
1182
DecayedT new_decayed_value = new_value;
1183
DecayedT compare_decayed_value = compare_value;
1184
// Determine default memory ordering
1185
const DecoratorSet expanded_decorators = DecoratorFixup<
1186
(!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1187
(MO_SEQ_CST | decorators) : decorators>::value;
1188
// Potentially remember that we need compressed oop awareness
1189
const DecoratorSet final_decorators = expanded_decorators |
1190
(HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1191
INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE);
1192
return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(base, offset, compare_decayed_value,
1193
new_decayed_value);
1194
}
1195
1196
template <DecoratorSet decorators, typename P, typename T>
1197
inline T atomic_xchg(P* addr, T new_value) {
1198
verify_types<decorators, T>();
1199
typedef typename Decay<P>::type DecayedP;
1200
typedef typename Decay<T>::type DecayedT;
1201
DecayedT new_decayed_value = new_value;
1202
// atomic_xchg is only available in SEQ_CST flavour.
1203
const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
1204
return atomic_xchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1205
new_decayed_value);
1206
}
1207
1208
template <DecoratorSet decorators, typename T>
1209
inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
1210
verify_types<decorators, T>();
1211
typedef typename Decay<T>::type DecayedT;
1212
DecayedT new_decayed_value = new_value;
1213
// atomic_xchg is only available in SEQ_CST flavour.
1214
const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
1215
(HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1216
INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1217
return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(base, offset, new_decayed_value);
1218
}
1219
1220
template <DecoratorSet decorators, typename T>
1221
inline bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw,
1222
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1223
size_t length) {
1224
STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1225
(IsSame<T, void>::value || IsIntegral<T>::value) ||
1226
IsFloatingPoint<T>::value)); // arraycopy allows type erased void elements
1227
typedef typename Decay<T>::type DecayedT;
1228
const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value;
1229
return arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw),
1230
dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw),
1231
length);
1232
}
1233
1234
template <DecoratorSet decorators>
1235
inline void clone(oop src, oop dst, size_t size) {
1236
const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1237
PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1238
}
1239
1240
// Infer the type that should be returned from an Access::oop_load.
1241
template <typename P, DecoratorSet decorators>
1242
class OopLoadProxy: public StackObj {
1243
private:
1244
P *const _addr;
1245
public:
1246
OopLoadProxy(P* addr) : _addr(addr) {}
1247
1248
inline operator oop() {
1249
return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
1250
}
1251
1252
inline operator narrowOop() {
1253
return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
1254
}
1255
1256
template <typename T>
1257
inline bool operator ==(const T& other) const {
1258
return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
1259
}
1260
1261
template <typename T>
1262
inline bool operator !=(const T& other) const {
1263
return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) != other;
1264
}
1265
};
1266
1267
// Infer the type that should be returned from an Access::load_at.
1268
template <DecoratorSet decorators>
1269
class LoadAtProxy: public StackObj {
1270
private:
1271
const oop _base;
1272
const ptrdiff_t _offset;
1273
public:
1274
LoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
1275
1276
template <typename T>
1277
inline operator T() const {
1278
return load_at<decorators, T>(_base, _offset);
1279
}
1280
1281
template <typename T>
1282
inline bool operator ==(const T& other) const { return load_at<decorators, T>(_base, _offset) == other; }
1283
1284
template <typename T>
1285
inline bool operator !=(const T& other) const { return load_at<decorators, T>(_base, _offset) != other; }
1286
};
1287
1288
// Infer the type that should be returned from an Access::oop_load_at.
1289
template <DecoratorSet decorators>
1290
class OopLoadAtProxy: public StackObj {
1291
private:
1292
const oop _base;
1293
const ptrdiff_t _offset;
1294
public:
1295
OopLoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
1296
1297
inline operator oop() const {
1298
return load_at<decorators | INTERNAL_VALUE_IS_OOP, oop>(_base, _offset);
1299
}
1300
1301
inline operator narrowOop() const {
1302
return load_at<decorators | INTERNAL_VALUE_IS_OOP, narrowOop>(_base, _offset);
1303
}
1304
1305
template <typename T>
1306
inline bool operator ==(const T& other) const {
1307
return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) == other;
1308
}
1309
1310
template <typename T>
1311
inline bool operator !=(const T& other) const {
1312
return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) != other;
1313
}
1314
};
1315
}
1316
1317
#endif // SHARE_OOPS_ACCESSBACKEND_HPP
1318
1319