Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.cpp
40948 views
1
/*
2
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "asm/macroAssembler.inline.hpp"
27
#include "gc/g1/g1BarrierSet.hpp"
28
#include "gc/g1/g1BarrierSetAssembler.hpp"
29
#include "gc/g1/g1BarrierSetRuntime.hpp"
30
#include "gc/g1/g1ThreadLocalData.hpp"
31
#include "gc/g1/g1CardTable.hpp"
32
#include "gc/g1/g1ThreadLocalData.hpp"
33
#include "gc/g1/heapRegion.hpp"
34
#include "interpreter/interp_masm.hpp"
35
#include "runtime/sharedRuntime.hpp"
36
#include "runtime/thread.hpp"
37
#include "utilities/macros.hpp"
38
#ifdef COMPILER1
39
#include "c1/c1_LIRAssembler.hpp"
40
#include "c1/c1_MacroAssembler.hpp"
41
#include "gc/g1/c1/g1BarrierSetC1.hpp"
42
#endif
43
44
#define __ masm->
45
46
#ifdef PRODUCT
47
#define BLOCK_COMMENT(str) /* nothing */
48
#else
49
#define BLOCK_COMMENT(str) __ block_comment(str)
50
#endif
51
52
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
53
54
void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
55
Register addr, Register count, int callee_saved_regs) {
56
bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
57
if (!dest_uninitialized) {
58
assert( addr->encoding() < callee_saved_regs, "addr must be saved");
59
assert(count->encoding() < callee_saved_regs, "count must be saved");
60
61
BLOCK_COMMENT("PreBarrier");
62
63
RegisterSet saved_regs = RegisterSet(R0, as_Register(callee_saved_regs-1));
64
__ push(saved_regs | R9ifScratched);
65
66
if (addr != R0) {
67
assert_different_registers(count, R0);
68
__ mov(R0, addr);
69
}
70
if (count != R1) {
71
__ mov(R1, count);
72
}
73
74
if (UseCompressedOops) {
75
__ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry));
76
} else {
77
__ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry));
78
}
79
80
__ pop(saved_regs | R9ifScratched);
81
}
82
}
83
84
void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
85
Register addr, Register count, Register tmp) {
86
87
BLOCK_COMMENT("G1PostBarrier");
88
if (addr != R0) {
89
assert_different_registers(count, R0);
90
__ mov(R0, addr);
91
}
92
if (count != R1) {
93
__ mov(R1, count);
94
}
95
#if R9_IS_SCRATCHED
96
// Safer to save R9 here since callers may have been written
97
// assuming R9 survives. This is suboptimal but is not in
98
// general worth optimizing for the few platforms where R9
99
// is scratched. Note that the optimization might not be to
100
// difficult for this particular call site.
101
__ push(R9);
102
#endif // !R9_IS_SCRATCHED
103
__ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry));
104
#if R9_IS_SCRATCHED
105
__ pop(R9);
106
#endif // !R9_IS_SCRATCHED
107
}
108
109
// G1 pre-barrier.
110
// Blows all volatile registers R0-R3, Rtemp, LR).
111
// If store_addr != noreg, then previous value is loaded from [store_addr];
112
// in such case store_addr and new_val registers are preserved;
113
// otherwise pre_val register is preserved.
114
void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
115
Register store_addr,
116
Register new_val,
117
Register pre_val,
118
Register tmp1,
119
Register tmp2) {
120
Label done;
121
Label runtime;
122
123
if (store_addr != noreg) {
124
assert_different_registers(store_addr, new_val, pre_val, tmp1, tmp2, noreg);
125
} else {
126
assert (new_val == noreg, "should be");
127
assert_different_registers(pre_val, tmp1, tmp2, noreg);
128
}
129
130
Address in_progress(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
131
Address index(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
132
Address buffer(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
133
134
// Is marking active?
135
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "adjust this code");
136
__ ldrb(tmp1, in_progress);
137
__ cbz(tmp1, done);
138
139
// Do we need to load the previous value?
140
if (store_addr != noreg) {
141
__ load_heap_oop(pre_val, Address(store_addr, 0));
142
}
143
144
// Is the previous value null?
145
__ cbz(pre_val, done);
146
147
// Can we store original value in the thread's buffer?
148
// Is index == 0?
149
// (The index field is typed as size_t.)
150
151
__ ldr(tmp1, index); // tmp1 := *index_adr
152
__ ldr(tmp2, buffer);
153
154
__ subs(tmp1, tmp1, wordSize); // tmp1 := tmp1 - wordSize
155
__ b(runtime, lt); // If negative, goto runtime
156
157
__ str(tmp1, index); // *index_adr := tmp1
158
159
// Record the previous value
160
__ str(pre_val, Address(tmp2, tmp1));
161
__ b(done);
162
163
__ bind(runtime);
164
165
// save the live input values
166
if (store_addr != noreg) {
167
// avoid raw_push to support any ordering of store_addr and new_val
168
__ push(RegisterSet(store_addr) | RegisterSet(new_val));
169
} else {
170
__ push(pre_val);
171
}
172
173
if (pre_val != R0) {
174
__ mov(R0, pre_val);
175
}
176
__ mov(R1, Rthread);
177
178
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), R0, R1);
179
180
if (store_addr != noreg) {
181
__ pop(RegisterSet(store_addr) | RegisterSet(new_val));
182
} else {
183
__ pop(pre_val);
184
}
185
186
__ bind(done);
187
}
188
189
// G1 post-barrier.
190
// Blows all volatile registers R0-R3, Rtemp, LR).
191
void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
192
Register store_addr,
193
Register new_val,
194
Register tmp1,
195
Register tmp2,
196
Register tmp3) {
197
198
Address queue_index(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
199
Address buffer(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
200
201
BarrierSet* bs = BarrierSet::barrier_set();
202
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
203
CardTable* ct = ctbs->card_table();
204
Label done;
205
Label runtime;
206
207
// Does store cross heap regions?
208
209
__ eor(tmp1, store_addr, new_val);
210
__ movs(tmp1, AsmOperand(tmp1, lsr, HeapRegion::LogOfHRGrainBytes));
211
__ b(done, eq);
212
213
// crosses regions, storing NULL?
214
215
__ cbz(new_val, done);
216
217
// storing region crossing non-NULL, is card already dirty?
218
const Register card_addr = tmp1;
219
220
__ mov_address(tmp2, (address)ct->byte_map_base());
221
__ add(card_addr, tmp2, AsmOperand(store_addr, lsr, CardTable::card_shift));
222
223
__ ldrb(tmp2, Address(card_addr));
224
__ cmp(tmp2, (int)G1CardTable::g1_young_card_val());
225
__ b(done, eq);
226
227
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp2);
228
229
assert(CardTable::dirty_card_val() == 0, "adjust this code");
230
__ ldrb(tmp2, Address(card_addr));
231
__ cbz(tmp2, done);
232
233
// storing a region crossing, non-NULL oop, card is clean.
234
// dirty card and log.
235
236
__ strb(__ zero_register(tmp2), Address(card_addr));
237
238
__ ldr(tmp2, queue_index);
239
__ ldr(tmp3, buffer);
240
241
__ subs(tmp2, tmp2, wordSize);
242
__ b(runtime, lt); // go to runtime if now negative
243
244
__ str(tmp2, queue_index);
245
246
__ str(card_addr, Address(tmp3, tmp2));
247
__ b(done);
248
249
__ bind(runtime);
250
251
if (card_addr != R0) {
252
__ mov(R0, card_addr);
253
}
254
__ mov(R1, Rthread);
255
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), R0, R1);
256
257
__ bind(done);
258
}
259
260
void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
261
Register dst, Address src, Register tmp1, Register tmp2, Register tmp3) {
262
bool on_oop = type == T_OBJECT || type == T_ARRAY;
263
bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
264
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
265
bool on_reference = on_weak || on_phantom;
266
267
ModRefBarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2, tmp3);
268
if (on_oop && on_reference) {
269
// Generate the G1 pre-barrier code to log the value of
270
// the referent field in an SATB buffer.
271
g1_write_barrier_pre(masm, noreg, noreg, dst, tmp1, tmp2);
272
}
273
}
274
275
276
void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
277
Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null) {
278
bool in_heap = (decorators & IN_HEAP) != 0;
279
bool as_normal = (decorators & AS_NORMAL) != 0;
280
assert((decorators & IS_DEST_UNINITIALIZED) == 0, "unsupported");
281
282
bool needs_pre_barrier = as_normal;
283
bool needs_post_barrier = (new_val != noreg) && in_heap;
284
285
// flatten object address if needed
286
assert (obj.mode() == basic_offset, "pre- or post-indexing is not supported here");
287
288
const Register store_addr = obj.base();
289
if (obj.index() != noreg) {
290
assert (obj.disp() == 0, "index or displacement, not both");
291
assert(obj.offset_op() == add_offset, "addition is expected");
292
__ add(store_addr, obj.base(), AsmOperand(obj.index(), obj.shift(), obj.shift_imm()));
293
} else if (obj.disp() != 0) {
294
__ add(store_addr, obj.base(), obj.disp());
295
}
296
297
if (needs_pre_barrier) {
298
g1_write_barrier_pre(masm, store_addr, new_val, tmp1, tmp2, tmp3);
299
}
300
301
if (is_null) {
302
BarrierSetAssembler::store_at(masm, decorators, type, Address(store_addr), new_val, tmp1, tmp2, tmp3, true);
303
} else {
304
// G1 barrier needs uncompressed oop for region cross check.
305
Register val_to_store = new_val;
306
if (UseCompressedOops) {
307
val_to_store = tmp1;
308
__ mov(val_to_store, new_val);
309
}
310
BarrierSetAssembler::store_at(masm, decorators, type, Address(store_addr), val_to_store, tmp1, tmp2, tmp3, false);
311
if (needs_post_barrier) {
312
g1_write_barrier_post(masm, store_addr, new_val, tmp1, tmp2, tmp3);
313
}
314
}
315
}
316
317
#ifdef COMPILER1
318
319
#undef __
320
#define __ ce->masm()->
321
322
void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
323
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
324
// At this point we know that marking is in progress.
325
// If do_load() is true then we have to emit the
326
// load of the previous value; otherwise it has already
327
// been loaded into _pre_val.
328
329
__ bind(*stub->entry());
330
assert(stub->pre_val()->is_register(), "Precondition.");
331
332
Register pre_val_reg = stub->pre_val()->as_register();
333
334
if (stub->do_load()) {
335
ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
336
}
337
338
__ cbz(pre_val_reg, *stub->continuation());
339
ce->verify_reserved_argument_area_size(1);
340
__ str(pre_val_reg, Address(SP));
341
__ call(bs->pre_barrier_c1_runtime_code_blob()->code_begin(), relocInfo::runtime_call_type);
342
343
__ b(*stub->continuation());
344
}
345
346
void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
347
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
348
__ bind(*stub->entry());
349
assert(stub->addr()->is_register(), "Precondition.");
350
assert(stub->new_val()->is_register(), "Precondition.");
351
Register new_val_reg = stub->new_val()->as_register();
352
__ cbz(new_val_reg, *stub->continuation());
353
ce->verify_reserved_argument_area_size(1);
354
__ str(stub->addr()->as_pointer_register(), Address(SP));
355
__ call(bs->post_barrier_c1_runtime_code_blob()->code_begin(), relocInfo::runtime_call_type);
356
__ b(*stub->continuation());
357
}
358
359
#undef __
360
#define __ sasm->
361
362
void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
363
// Input:
364
// - pre_val pushed on the stack
365
366
__ set_info("g1_pre_barrier_slow_id", false);
367
368
// save at least the registers that need saving if the runtime is called
369
const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
370
const int nb_saved_regs = 6;
371
assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
372
__ push(saved_regs);
373
374
const Register r_pre_val_0 = R0; // must be R0, to be ready for the runtime call
375
const Register r_index_1 = R1;
376
const Register r_buffer_2 = R2;
377
378
Address queue_active(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
379
Address queue_index(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
380
Address buffer(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
381
382
Label done;
383
Label runtime;
384
385
// Is marking still active?
386
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
387
__ ldrb(R1, queue_active);
388
__ cbz(R1, done);
389
390
__ ldr(r_index_1, queue_index);
391
__ ldr(r_pre_val_0, Address(SP, nb_saved_regs*wordSize));
392
__ ldr(r_buffer_2, buffer);
393
394
__ subs(r_index_1, r_index_1, wordSize);
395
__ b(runtime, lt);
396
397
__ str(r_index_1, queue_index);
398
__ str(r_pre_val_0, Address(r_buffer_2, r_index_1));
399
400
__ bind(done);
401
402
__ pop(saved_regs);
403
404
__ ret();
405
406
__ bind(runtime);
407
408
__ save_live_registers();
409
410
assert(r_pre_val_0 == c_rarg0, "pre_val should be in R0");
411
__ mov(c_rarg1, Rthread);
412
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), c_rarg0, c_rarg1);
413
414
__ restore_live_registers_without_return();
415
416
__ b(done);
417
}
418
419
void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
420
// Input:
421
// - store_addr, pushed on the stack
422
423
__ set_info("g1_post_barrier_slow_id", false);
424
425
Label done;
426
Label recheck;
427
Label runtime;
428
429
Address queue_index(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
430
Address buffer(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
431
432
AddressLiteral cardtable(ci_card_table_address_as<address>(), relocInfo::none);
433
434
// save at least the registers that need saving if the runtime is called
435
const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
436
const int nb_saved_regs = 6;
437
assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
438
__ push(saved_regs);
439
440
const Register r_card_addr_0 = R0; // must be R0 for the slow case
441
const Register r_obj_0 = R0;
442
const Register r_card_base_1 = R1;
443
const Register r_tmp2 = R2;
444
const Register r_index_2 = R2;
445
const Register r_buffer_3 = R3;
446
const Register tmp1 = Rtemp;
447
448
__ ldr(r_obj_0, Address(SP, nb_saved_regs*wordSize));
449
// Note: there is a comment in x86 code about not using
450
// ExternalAddress / lea, due to relocation not working
451
// properly for that address. Should be OK for arm, where we
452
// explicitly specify that 'cardtable' has a relocInfo::none
453
// type.
454
__ lea(r_card_base_1, cardtable);
455
__ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTable::card_shift));
456
457
// first quick check without barrier
458
__ ldrb(r_tmp2, Address(r_card_addr_0));
459
460
__ cmp(r_tmp2, (int)G1CardTable::g1_young_card_val());
461
__ b(recheck, ne);
462
463
__ bind(done);
464
465
__ pop(saved_regs);
466
467
__ ret();
468
469
__ bind(recheck);
470
471
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp1);
472
473
// reload card state after the barrier that ensures the stored oop was visible
474
__ ldrb(r_tmp2, Address(r_card_addr_0));
475
476
assert(CardTable::dirty_card_val() == 0, "adjust this code");
477
__ cbz(r_tmp2, done);
478
479
// storing region crossing non-NULL, card is clean.
480
// dirty card and log.
481
482
assert(0 == (int)CardTable::dirty_card_val(), "adjust this code");
483
if ((ci_card_table_address_as<intptr_t>() & 0xff) == 0) {
484
// Card table is aligned so the lowest byte of the table address base is zero.
485
__ strb(r_card_base_1, Address(r_card_addr_0));
486
} else {
487
__ strb(__ zero_register(r_tmp2), Address(r_card_addr_0));
488
}
489
490
__ ldr(r_index_2, queue_index);
491
__ ldr(r_buffer_3, buffer);
492
493
__ subs(r_index_2, r_index_2, wordSize);
494
__ b(runtime, lt); // go to runtime if now negative
495
496
__ str(r_index_2, queue_index);
497
498
__ str(r_card_addr_0, Address(r_buffer_3, r_index_2));
499
500
__ b(done);
501
502
__ bind(runtime);
503
504
__ save_live_registers();
505
506
assert(r_card_addr_0 == c_rarg0, "card_addr should be in R0");
507
__ mov(c_rarg1, Rthread);
508
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), c_rarg0, c_rarg1);
509
510
__ restore_live_registers_without_return();
511
512
__ b(done);
513
}
514
515
#undef __
516
517
#endif // COMPILER1
518
519