Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp
40975 views
1
/*
2
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*/
23
24
#include "precompiled.hpp"
25
#include "classfile/javaClasses.hpp"
26
#include "gc/z/c2/zBarrierSetC2.hpp"
27
#include "gc/z/zBarrierSet.hpp"
28
#include "gc/z/zBarrierSetAssembler.hpp"
29
#include "gc/z/zBarrierSetRuntime.hpp"
30
#include "opto/arraycopynode.hpp"
31
#include "opto/addnode.hpp"
32
#include "opto/block.hpp"
33
#include "opto/compile.hpp"
34
#include "opto/graphKit.hpp"
35
#include "opto/machnode.hpp"
36
#include "opto/macro.hpp"
37
#include "opto/memnode.hpp"
38
#include "opto/node.hpp"
39
#include "opto/output.hpp"
40
#include "opto/regalloc.hpp"
41
#include "opto/rootnode.hpp"
42
#include "opto/type.hpp"
43
#include "utilities/growableArray.hpp"
44
#include "utilities/macros.hpp"
45
46
class ZBarrierSetC2State : public ResourceObj {
47
private:
48
GrowableArray<ZLoadBarrierStubC2*>* _stubs;
49
Node_Array _live;
50
51
public:
52
ZBarrierSetC2State(Arena* arena) :
53
_stubs(new (arena) GrowableArray<ZLoadBarrierStubC2*>(arena, 8, 0, NULL)),
54
_live(arena) {}
55
56
GrowableArray<ZLoadBarrierStubC2*>* stubs() {
57
return _stubs;
58
}
59
60
RegMask* live(const Node* node) {
61
if (!node->is_Mach()) {
62
// Don't need liveness for non-MachNodes
63
return NULL;
64
}
65
66
const MachNode* const mach = node->as_Mach();
67
if (mach->barrier_data() == ZLoadBarrierElided) {
68
// Don't need liveness data for nodes without barriers
69
return NULL;
70
}
71
72
RegMask* live = (RegMask*)_live[node->_idx];
73
if (live == NULL) {
74
live = new (Compile::current()->comp_arena()->Amalloc_D(sizeof(RegMask))) RegMask();
75
_live.map(node->_idx, (Node*)live);
76
}
77
78
return live;
79
}
80
};
81
82
static ZBarrierSetC2State* barrier_set_state() {
83
return reinterpret_cast<ZBarrierSetC2State*>(Compile::current()->barrier_set_state());
84
}
85
86
ZLoadBarrierStubC2* ZLoadBarrierStubC2::create(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) {
87
ZLoadBarrierStubC2* const stub = new (Compile::current()->comp_arena()) ZLoadBarrierStubC2(node, ref_addr, ref, tmp, barrier_data);
88
if (!Compile::current()->output()->in_scratch_emit_size()) {
89
barrier_set_state()->stubs()->append(stub);
90
}
91
92
return stub;
93
}
94
95
ZLoadBarrierStubC2::ZLoadBarrierStubC2(const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) :
96
_node(node),
97
_ref_addr(ref_addr),
98
_ref(ref),
99
_tmp(tmp),
100
_barrier_data(barrier_data),
101
_entry(),
102
_continuation() {
103
assert_different_registers(ref, ref_addr.base());
104
assert_different_registers(ref, ref_addr.index());
105
}
106
107
Address ZLoadBarrierStubC2::ref_addr() const {
108
return _ref_addr;
109
}
110
111
Register ZLoadBarrierStubC2::ref() const {
112
return _ref;
113
}
114
115
Register ZLoadBarrierStubC2::tmp() const {
116
return _tmp;
117
}
118
119
address ZLoadBarrierStubC2::slow_path() const {
120
DecoratorSet decorators = DECORATORS_NONE;
121
if (_barrier_data & ZLoadBarrierStrong) {
122
decorators |= ON_STRONG_OOP_REF;
123
}
124
if (_barrier_data & ZLoadBarrierWeak) {
125
decorators |= ON_WEAK_OOP_REF;
126
}
127
if (_barrier_data & ZLoadBarrierPhantom) {
128
decorators |= ON_PHANTOM_OOP_REF;
129
}
130
if (_barrier_data & ZLoadBarrierNoKeepalive) {
131
decorators |= AS_NO_KEEPALIVE;
132
}
133
return ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators);
134
}
135
136
RegMask& ZLoadBarrierStubC2::live() const {
137
return *barrier_set_state()->live(_node);
138
}
139
140
Label* ZLoadBarrierStubC2::entry() {
141
// The _entry will never be bound when in_scratch_emit_size() is true.
142
// However, we still need to return a label that is not bound now, but
143
// will eventually be bound. Any lable will do, as it will only act as
144
// a placeholder, so we return the _continuation label.
145
return Compile::current()->output()->in_scratch_emit_size() ? &_continuation : &_entry;
146
}
147
148
Label* ZLoadBarrierStubC2::continuation() {
149
return &_continuation;
150
}
151
152
void* ZBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
153
return new (comp_arena) ZBarrierSetC2State(comp_arena);
154
}
155
156
void ZBarrierSetC2::late_barrier_analysis() const {
157
analyze_dominating_barriers();
158
compute_liveness_at_stubs();
159
}
160
161
void ZBarrierSetC2::emit_stubs(CodeBuffer& cb) const {
162
MacroAssembler masm(&cb);
163
GrowableArray<ZLoadBarrierStubC2*>* const stubs = barrier_set_state()->stubs();
164
165
for (int i = 0; i < stubs->length(); i++) {
166
// Make sure there is enough space in the code buffer
167
if (cb.insts()->maybe_expand_to_ensure_remaining(PhaseOutput::MAX_inst_size) && cb.blob() == NULL) {
168
ciEnv::current()->record_failure("CodeCache is full");
169
return;
170
}
171
172
ZBarrierSet::assembler()->generate_c2_load_barrier_stub(&masm, stubs->at(i));
173
}
174
175
masm.flush();
176
}
177
178
int ZBarrierSetC2::estimate_stub_size() const {
179
Compile* const C = Compile::current();
180
BufferBlob* const blob = C->output()->scratch_buffer_blob();
181
GrowableArray<ZLoadBarrierStubC2*>* const stubs = barrier_set_state()->stubs();
182
int size = 0;
183
184
for (int i = 0; i < stubs->length(); i++) {
185
CodeBuffer cb(blob->content_begin(), (address)C->output()->scratch_locs_memory() - blob->content_begin());
186
MacroAssembler masm(&cb);
187
ZBarrierSet::assembler()->generate_c2_load_barrier_stub(&masm, stubs->at(i));
188
size += cb.insts_size();
189
}
190
191
return size;
192
}
193
194
static void set_barrier_data(C2Access& access) {
195
if (ZBarrierSet::barrier_needed(access.decorators(), access.type())) {
196
if (access.decorators() & ON_WEAK_OOP_REF) {
197
access.set_barrier_data(ZLoadBarrierWeak);
198
} else {
199
access.set_barrier_data(ZLoadBarrierStrong);
200
}
201
}
202
}
203
204
Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
205
set_barrier_data(access);
206
return BarrierSetC2::load_at_resolved(access, val_type);
207
}
208
209
Node* ZBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
210
Node* new_val, const Type* val_type) const {
211
set_barrier_data(access);
212
return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type);
213
}
214
215
Node* ZBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
216
Node* new_val, const Type* value_type) const {
217
set_barrier_data(access);
218
return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
219
}
220
221
Node* ZBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const {
222
set_barrier_data(access);
223
return BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type);
224
}
225
226
bool ZBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type,
227
bool is_clone, bool is_clone_instance,
228
ArrayCopyPhase phase) const {
229
if (phase == ArrayCopyPhase::Parsing) {
230
return false;
231
}
232
if (phase == ArrayCopyPhase::Optimization) {
233
return is_clone_instance;
234
}
235
// else ArrayCopyPhase::Expansion
236
return type == T_OBJECT || type == T_ARRAY;
237
}
238
239
// This TypeFunc assumes a 64bit system
240
static const TypeFunc* clone_type() {
241
// Create input type (domain)
242
const Type** domain_fields = TypeTuple::fields(4);
243
domain_fields[TypeFunc::Parms + 0] = TypeInstPtr::NOTNULL; // src
244
domain_fields[TypeFunc::Parms + 1] = TypeInstPtr::NOTNULL; // dst
245
domain_fields[TypeFunc::Parms + 2] = TypeLong::LONG; // size lower
246
domain_fields[TypeFunc::Parms + 3] = Type::HALF; // size upper
247
const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + 4, domain_fields);
248
249
// Create result type (range)
250
const Type** range_fields = TypeTuple::fields(0);
251
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 0, range_fields);
252
253
return TypeFunc::make(domain, range);
254
}
255
256
void ZBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
257
Node* const src = ac->in(ArrayCopyNode::Src);
258
if (ac->is_clone_array()) {
259
// Clone primitive array
260
BarrierSetC2::clone_at_expansion(phase, ac);
261
return;
262
}
263
264
// Clone instance
265
Node* const ctrl = ac->in(TypeFunc::Control);
266
Node* const mem = ac->in(TypeFunc::Memory);
267
Node* const dst = ac->in(ArrayCopyNode::Dest);
268
Node* const size = ac->in(ArrayCopyNode::Length);
269
270
assert(ac->is_clone_inst(), "Sanity check");
271
assert(size->bottom_type()->is_long(), "Should be long");
272
273
// The native clone we are calling here expects the instance size in words
274
// Add header/offset size to payload size to get instance size.
275
Node* const base_offset = phase->longcon(arraycopy_payload_base_offset(false) >> LogBytesPerLong);
276
Node* const full_size = phase->transform_later(new AddLNode(size, base_offset));
277
278
Node* const call = phase->make_leaf_call(ctrl,
279
mem,
280
clone_type(),
281
ZBarrierSetRuntime::clone_addr(),
282
"ZBarrierSetRuntime::clone",
283
TypeRawPtr::BOTTOM,
284
src,
285
dst,
286
full_size,
287
phase->top());
288
phase->transform_later(call);
289
phase->igvn().replace_node(ac, call);
290
}
291
292
// == Dominating barrier elision ==
293
294
static bool block_has_safepoint(const Block* block, uint from, uint to) {
295
for (uint i = from; i < to; i++) {
296
if (block->get_node(i)->is_MachSafePoint()) {
297
// Safepoint found
298
return true;
299
}
300
}
301
302
// Safepoint not found
303
return false;
304
}
305
306
static bool block_has_safepoint(const Block* block) {
307
return block_has_safepoint(block, 0, block->number_of_nodes());
308
}
309
310
static uint block_index(const Block* block, const Node* node) {
311
for (uint j = 0; j < block->number_of_nodes(); ++j) {
312
if (block->get_node(j) == node) {
313
return j;
314
}
315
}
316
ShouldNotReachHere();
317
return 0;
318
}
319
320
void ZBarrierSetC2::analyze_dominating_barriers() const {
321
ResourceMark rm;
322
Compile* const C = Compile::current();
323
PhaseCFG* const cfg = C->cfg();
324
Block_List worklist;
325
Node_List mem_ops;
326
Node_List barrier_loads;
327
328
// Step 1 - Find accesses, and track them in lists
329
for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
330
const Block* const block = cfg->get_block(i);
331
for (uint j = 0; j < block->number_of_nodes(); ++j) {
332
const Node* const node = block->get_node(j);
333
if (!node->is_Mach()) {
334
continue;
335
}
336
337
MachNode* const mach = node->as_Mach();
338
switch (mach->ideal_Opcode()) {
339
case Op_LoadP:
340
if ((mach->barrier_data() & ZLoadBarrierStrong) != 0) {
341
barrier_loads.push(mach);
342
}
343
if ((mach->barrier_data() & (ZLoadBarrierStrong | ZLoadBarrierNoKeepalive)) ==
344
ZLoadBarrierStrong) {
345
mem_ops.push(mach);
346
}
347
break;
348
case Op_CompareAndExchangeP:
349
case Op_CompareAndSwapP:
350
case Op_GetAndSetP:
351
if ((mach->barrier_data() & ZLoadBarrierStrong) != 0) {
352
barrier_loads.push(mach);
353
}
354
case Op_StoreP:
355
mem_ops.push(mach);
356
break;
357
358
default:
359
break;
360
}
361
}
362
}
363
364
// Step 2 - Find dominating accesses for each load
365
for (uint i = 0; i < barrier_loads.size(); i++) {
366
MachNode* const load = barrier_loads.at(i)->as_Mach();
367
const TypePtr* load_adr_type = NULL;
368
intptr_t load_offset = 0;
369
const Node* const load_obj = load->get_base_and_disp(load_offset, load_adr_type);
370
Block* const load_block = cfg->get_block_for_node(load);
371
const uint load_index = block_index(load_block, load);
372
373
for (uint j = 0; j < mem_ops.size(); j++) {
374
MachNode* mem = mem_ops.at(j)->as_Mach();
375
const TypePtr* mem_adr_type = NULL;
376
intptr_t mem_offset = 0;
377
const Node* mem_obj = mem->get_base_and_disp(mem_offset, mem_adr_type);
378
Block* mem_block = cfg->get_block_for_node(mem);
379
uint mem_index = block_index(mem_block, mem);
380
381
if (load_obj == NodeSentinel || mem_obj == NodeSentinel ||
382
load_obj == NULL || mem_obj == NULL ||
383
load_offset < 0 || mem_offset < 0) {
384
continue;
385
}
386
387
if (mem_obj != load_obj || mem_offset != load_offset) {
388
// Not the same addresses, not a candidate
389
continue;
390
}
391
392
if (load_block == mem_block) {
393
// Earlier accesses in the same block
394
if (mem_index < load_index && !block_has_safepoint(mem_block, mem_index + 1, load_index)) {
395
load->set_barrier_data(ZLoadBarrierElided);
396
}
397
} else if (mem_block->dominates(load_block)) {
398
// Dominating block? Look around for safepoints
399
ResourceMark rm;
400
Block_List stack;
401
VectorSet visited;
402
stack.push(load_block);
403
bool safepoint_found = block_has_safepoint(load_block);
404
while (!safepoint_found && stack.size() > 0) {
405
Block* block = stack.pop();
406
if (visited.test_set(block->_pre_order)) {
407
continue;
408
}
409
if (block_has_safepoint(block)) {
410
safepoint_found = true;
411
break;
412
}
413
if (block == mem_block) {
414
continue;
415
}
416
417
// Push predecessor blocks
418
for (uint p = 1; p < block->num_preds(); ++p) {
419
Block* pred = cfg->get_block_for_node(block->pred(p));
420
stack.push(pred);
421
}
422
}
423
424
if (!safepoint_found) {
425
load->set_barrier_data(ZLoadBarrierElided);
426
}
427
}
428
}
429
}
430
}
431
432
// == Reduced spilling optimization ==
433
434
void ZBarrierSetC2::compute_liveness_at_stubs() const {
435
ResourceMark rm;
436
Compile* const C = Compile::current();
437
Arena* const A = Thread::current()->resource_area();
438
PhaseCFG* const cfg = C->cfg();
439
PhaseRegAlloc* const regalloc = C->regalloc();
440
RegMask* const live = NEW_ARENA_ARRAY(A, RegMask, cfg->number_of_blocks() * sizeof(RegMask));
441
ZBarrierSetAssembler* const bs = ZBarrierSet::assembler();
442
Block_List worklist;
443
444
for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
445
new ((void*)(live + i)) RegMask();
446
worklist.push(cfg->get_block(i));
447
}
448
449
while (worklist.size() > 0) {
450
const Block* const block = worklist.pop();
451
RegMask& old_live = live[block->_pre_order];
452
RegMask new_live;
453
454
// Initialize to union of successors
455
for (uint i = 0; i < block->_num_succs; i++) {
456
const uint succ_id = block->_succs[i]->_pre_order;
457
new_live.OR(live[succ_id]);
458
}
459
460
// Walk block backwards, computing liveness
461
for (int i = block->number_of_nodes() - 1; i >= 0; --i) {
462
const Node* const node = block->get_node(i);
463
464
// Remove def bits
465
const OptoReg::Name first = bs->refine_register(node, regalloc->get_reg_first(node));
466
const OptoReg::Name second = bs->refine_register(node, regalloc->get_reg_second(node));
467
if (first != OptoReg::Bad) {
468
new_live.Remove(first);
469
}
470
if (second != OptoReg::Bad) {
471
new_live.Remove(second);
472
}
473
474
// Add use bits
475
for (uint j = 1; j < node->req(); ++j) {
476
const Node* const use = node->in(j);
477
const OptoReg::Name first = bs->refine_register(use, regalloc->get_reg_first(use));
478
const OptoReg::Name second = bs->refine_register(use, regalloc->get_reg_second(use));
479
if (first != OptoReg::Bad) {
480
new_live.Insert(first);
481
}
482
if (second != OptoReg::Bad) {
483
new_live.Insert(second);
484
}
485
}
486
487
// If this node tracks liveness, update it
488
RegMask* const regs = barrier_set_state()->live(node);
489
if (regs != NULL) {
490
regs->OR(new_live);
491
}
492
}
493
494
// Now at block top, see if we have any changes
495
new_live.SUBTRACT(old_live);
496
if (new_live.is_NotEmpty()) {
497
// Liveness has refined, update and propagate to prior blocks
498
old_live.OR(new_live);
499
for (uint i = 1; i < block->num_preds(); ++i) {
500
Block* const pred = cfg->get_block_for_node(block->pred(i));
501
worklist.push(pred);
502
}
503
}
504
}
505
}
506
507