Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/gc/z/zBarrier.inline.hpp
40957 views
1
/*
2
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*/
23
24
#ifndef SHARE_GC_Z_ZBARRIER_INLINE_HPP
25
#define SHARE_GC_Z_ZBARRIER_INLINE_HPP
26
27
#include "gc/z/zBarrier.hpp"
28
29
#include "gc/z/zAddress.inline.hpp"
30
#include "gc/z/zOop.inline.hpp"
31
#include "gc/z/zResurrection.inline.hpp"
32
#include "oops/oop.hpp"
33
#include "runtime/atomic.hpp"
34
35
// A self heal must always "upgrade" the address metadata bits in
36
// accordance with the metadata bits state machine, which has the
37
// valid state transitions as described below (where N is the GC
38
// cycle).
39
//
40
// Note the subtleness of overlapping GC cycles. Specifically that
41
// oops are colored Remapped(N) starting at relocation N and ending
42
// at marking N + 1.
43
//
44
// +--- Mark Start
45
// | +--- Mark End
46
// | | +--- Relocate Start
47
// | | | +--- Relocate End
48
// | | | |
49
// Marked |---N---|--N+1--|--N+2--|----
50
// Finalizable |---N---|--N+1--|--N+2--|----
51
// Remapped ----|---N---|--N+1--|--N+2--|
52
//
53
// VALID STATE TRANSITIONS
54
//
55
// Marked(N) -> Remapped(N)
56
// -> Marked(N + 1)
57
// -> Finalizable(N + 1)
58
//
59
// Finalizable(N) -> Marked(N)
60
// -> Remapped(N)
61
// -> Marked(N + 1)
62
// -> Finalizable(N + 1)
63
//
64
// Remapped(N) -> Marked(N + 1)
65
// -> Finalizable(N + 1)
66
//
67
// PHASE VIEW
68
//
69
// ZPhaseMark
70
// Load & Mark
71
// Marked(N) <- Marked(N - 1)
72
// <- Finalizable(N - 1)
73
// <- Remapped(N - 1)
74
// <- Finalizable(N)
75
//
76
// Mark(Finalizable)
77
// Finalizable(N) <- Marked(N - 1)
78
// <- Finalizable(N - 1)
79
// <- Remapped(N - 1)
80
//
81
// Load(AS_NO_KEEPALIVE)
82
// Remapped(N - 1) <- Marked(N - 1)
83
// <- Finalizable(N - 1)
84
//
85
// ZPhaseMarkCompleted (Resurrection blocked)
86
// Load & Load(ON_WEAK/PHANTOM_OOP_REF | AS_NO_KEEPALIVE) & KeepAlive
87
// Marked(N) <- Marked(N - 1)
88
// <- Finalizable(N - 1)
89
// <- Remapped(N - 1)
90
// <- Finalizable(N)
91
//
92
// Load(ON_STRONG_OOP_REF | AS_NO_KEEPALIVE)
93
// Remapped(N - 1) <- Marked(N - 1)
94
// <- Finalizable(N - 1)
95
//
96
// ZPhaseMarkCompleted (Resurrection unblocked)
97
// Load
98
// Marked(N) <- Finalizable(N)
99
//
100
// ZPhaseRelocate
101
// Load & Load(AS_NO_KEEPALIVE)
102
// Remapped(N) <- Marked(N)
103
// <- Finalizable(N)
104
105
template <ZBarrierFastPath fast_path>
106
inline void ZBarrier::self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_addr) {
107
if (heal_addr == 0) {
108
// Never heal with null since it interacts badly with reference processing.
109
// A mutator clearing an oop would be similar to calling Reference.clear(),
110
// which would make the reference non-discoverable or silently dropped
111
// by the reference processor.
112
return;
113
}
114
115
assert(!fast_path(addr), "Invalid self heal");
116
assert(fast_path(heal_addr), "Invalid self heal");
117
118
for (;;) {
119
// Heal
120
const uintptr_t prev_addr = Atomic::cmpxchg((volatile uintptr_t*)p, addr, heal_addr);
121
if (prev_addr == addr) {
122
// Success
123
return;
124
}
125
126
if (fast_path(prev_addr)) {
127
// Must not self heal
128
return;
129
}
130
131
// The oop location was healed by another barrier, but still needs upgrading.
132
// Re-apply healing to make sure the oop is not left with weaker (remapped or
133
// finalizable) metadata bits than what this barrier tried to apply.
134
assert(ZAddress::offset(prev_addr) == ZAddress::offset(heal_addr), "Invalid offset");
135
addr = prev_addr;
136
}
137
}
138
139
template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
140
inline oop ZBarrier::barrier(volatile oop* p, oop o) {
141
const uintptr_t addr = ZOop::to_address(o);
142
143
// Fast path
144
if (fast_path(addr)) {
145
return ZOop::from_address(addr);
146
}
147
148
// Slow path
149
const uintptr_t good_addr = slow_path(addr);
150
151
if (p != NULL) {
152
self_heal<fast_path>(p, addr, good_addr);
153
}
154
155
return ZOop::from_address(good_addr);
156
}
157
158
template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
159
inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) {
160
const uintptr_t addr = ZOop::to_address(o);
161
162
// Fast path
163
if (fast_path(addr)) {
164
// Return the good address instead of the weak good address
165
// to ensure that the currently active heap view is used.
166
return ZOop::from_address(ZAddress::good_or_null(addr));
167
}
168
169
// Slow path
170
const uintptr_t good_addr = slow_path(addr);
171
172
if (p != NULL) {
173
// The slow path returns a good/marked address or null, but we never mark
174
// oops in a weak load barrier so we always heal with the remapped address.
175
self_heal<fast_path>(p, addr, ZAddress::remapped_or_null(good_addr));
176
}
177
178
return ZOop::from_address(good_addr);
179
}
180
181
template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
182
inline void ZBarrier::root_barrier(oop* p, oop o) {
183
const uintptr_t addr = ZOop::to_address(o);
184
185
// Fast path
186
if (fast_path(addr)) {
187
return;
188
}
189
190
// Slow path
191
const uintptr_t good_addr = slow_path(addr);
192
193
// Non-atomic healing helps speed up root scanning. This is safe to do
194
// since we are always healing roots in a safepoint, or under a lock,
195
// which ensures we are never racing with mutators modifying roots while
196
// we are healing them. It's also safe in case multiple GC threads try
197
// to heal the same root if it is aligned, since they would always heal
198
// the root in the same way and it does not matter in which order it
199
// happens. For misaligned oops, there needs to be mutual exclusion.
200
*p = ZOop::from_address(good_addr);
201
}
202
203
inline bool ZBarrier::is_good_or_null_fast_path(uintptr_t addr) {
204
return ZAddress::is_good_or_null(addr);
205
}
206
207
inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) {
208
return ZAddress::is_weak_good_or_null(addr);
209
}
210
211
inline bool ZBarrier::is_marked_or_null_fast_path(uintptr_t addr) {
212
return ZAddress::is_marked_or_null(addr);
213
}
214
215
inline bool ZBarrier::during_mark() {
216
return ZGlobalPhase == ZPhaseMark;
217
}
218
219
inline bool ZBarrier::during_relocate() {
220
return ZGlobalPhase == ZPhaseRelocate;
221
}
222
223
//
224
// Load barrier
225
//
226
inline oop ZBarrier::load_barrier_on_oop(oop o) {
227
return load_barrier_on_oop_field_preloaded((oop*)NULL, o);
228
}
229
230
inline oop ZBarrier::load_barrier_on_oop_field(volatile oop* p) {
231
const oop o = Atomic::load(p);
232
return load_barrier_on_oop_field_preloaded(p, o);
233
}
234
235
inline oop ZBarrier::load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
236
return barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
237
}
238
239
inline void ZBarrier::load_barrier_on_oop_array(volatile oop* p, size_t length) {
240
for (volatile const oop* const end = p + length; p < end; p++) {
241
load_barrier_on_oop_field(p);
242
}
243
}
244
245
inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
246
verify_on_weak(p);
247
248
if (ZResurrection::is_blocked()) {
249
return barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
250
}
251
252
return load_barrier_on_oop_field_preloaded(p, o);
253
}
254
255
inline oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
256
if (ZResurrection::is_blocked()) {
257
return barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
258
}
259
260
return load_barrier_on_oop_field_preloaded(p, o);
261
}
262
263
inline void ZBarrier::load_barrier_on_root_oop_field(oop* p) {
264
const oop o = *p;
265
root_barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
266
}
267
268
inline void ZBarrier::load_barrier_on_invisible_root_oop_field(oop* p) {
269
const oop o = *p;
270
root_barrier<is_good_or_null_fast_path, load_barrier_on_invisible_root_oop_slow_path>(p, o);
271
}
272
273
//
274
// Weak load barrier
275
//
276
inline oop ZBarrier::weak_load_barrier_on_oop_field(volatile oop* p) {
277
assert(!ZResurrection::is_blocked(), "Should not be called during resurrection blocked phase");
278
const oop o = Atomic::load(p);
279
return weak_load_barrier_on_oop_field_preloaded(p, o);
280
}
281
282
inline oop ZBarrier::weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
283
return weak_barrier<is_weak_good_or_null_fast_path, weak_load_barrier_on_oop_slow_path>(p, o);
284
}
285
286
inline oop ZBarrier::weak_load_barrier_on_weak_oop(oop o) {
287
return weak_load_barrier_on_weak_oop_field_preloaded((oop*)NULL, o);
288
}
289
290
inline oop ZBarrier::weak_load_barrier_on_weak_oop_field(volatile oop* p) {
291
const oop o = Atomic::load(p);
292
return weak_load_barrier_on_weak_oop_field_preloaded(p, o);
293
}
294
295
inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
296
verify_on_weak(p);
297
298
if (ZResurrection::is_blocked()) {
299
return barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
300
}
301
302
return weak_load_barrier_on_oop_field_preloaded(p, o);
303
}
304
305
inline oop ZBarrier::weak_load_barrier_on_phantom_oop(oop o) {
306
return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)NULL, o);
307
}
308
309
inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field(volatile oop* p) {
310
const oop o = Atomic::load(p);
311
return weak_load_barrier_on_phantom_oop_field_preloaded(p, o);
312
}
313
314
inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
315
if (ZResurrection::is_blocked()) {
316
return barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
317
}
318
319
return weak_load_barrier_on_oop_field_preloaded(p, o);
320
}
321
322
//
323
// Is alive barrier
324
//
325
inline bool ZBarrier::is_alive_barrier_on_weak_oop(oop o) {
326
// Check if oop is logically non-null. This operation
327
// is only valid when resurrection is blocked.
328
assert(ZResurrection::is_blocked(), "Invalid phase");
329
return weak_load_barrier_on_weak_oop(o) != NULL;
330
}
331
332
inline bool ZBarrier::is_alive_barrier_on_phantom_oop(oop o) {
333
// Check if oop is logically non-null. This operation
334
// is only valid when resurrection is blocked.
335
assert(ZResurrection::is_blocked(), "Invalid phase");
336
return weak_load_barrier_on_phantom_oop(o) != NULL;
337
}
338
339
//
340
// Keep alive barrier
341
//
342
inline void ZBarrier::keep_alive_barrier_on_weak_oop_field(volatile oop* p) {
343
// This operation is only valid when resurrection is blocked.
344
assert(ZResurrection::is_blocked(), "Invalid phase");
345
const oop o = Atomic::load(p);
346
barrier<is_good_or_null_fast_path, keep_alive_barrier_on_weak_oop_slow_path>(p, o);
347
}
348
349
inline void ZBarrier::keep_alive_barrier_on_phantom_oop_field(volatile oop* p) {
350
// This operation is only valid when resurrection is blocked.
351
assert(ZResurrection::is_blocked(), "Invalid phase");
352
const oop o = Atomic::load(p);
353
barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
354
}
355
356
inline void ZBarrier::keep_alive_barrier_on_phantom_root_oop_field(oop* p) {
357
// This operation is only valid when resurrection is blocked.
358
assert(ZResurrection::is_blocked(), "Invalid phase");
359
const oop o = *p;
360
root_barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
361
}
362
363
inline void ZBarrier::keep_alive_barrier_on_oop(oop o) {
364
const uintptr_t addr = ZOop::to_address(o);
365
assert(ZAddress::is_good(addr), "Invalid address");
366
367
if (during_mark()) {
368
keep_alive_barrier_on_oop_slow_path(addr);
369
}
370
}
371
372
//
373
// Mark barrier
374
//
375
inline void ZBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) {
376
const oop o = Atomic::load(p);
377
378
if (finalizable) {
379
barrier<is_marked_or_null_fast_path, mark_barrier_on_finalizable_oop_slow_path>(p, o);
380
} else {
381
const uintptr_t addr = ZOop::to_address(o);
382
if (ZAddress::is_good(addr)) {
383
// Mark through good oop
384
mark_barrier_on_oop_slow_path(addr);
385
} else {
386
// Mark through bad oop
387
barrier<is_good_or_null_fast_path, mark_barrier_on_oop_slow_path>(p, o);
388
}
389
}
390
}
391
392
inline void ZBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) {
393
for (volatile const oop* const end = p + length; p < end; p++) {
394
mark_barrier_on_oop_field(p, finalizable);
395
}
396
}
397
398
#endif // SHARE_GC_Z_ZBARRIER_INLINE_HPP
399
400