Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp
40948 views
1
/*
2
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2018, 2021 SAP SE. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
#include "precompiled.hpp"
27
#include "asm/macroAssembler.inline.hpp"
28
#include "gc/g1/g1BarrierSet.hpp"
29
#include "gc/g1/g1BarrierSetAssembler.hpp"
30
#include "gc/g1/g1BarrierSetRuntime.hpp"
31
#include "gc/g1/g1CardTable.hpp"
32
#include "gc/g1/g1DirtyCardQueue.hpp"
33
#include "gc/g1/g1SATBMarkQueueSet.hpp"
34
#include "gc/g1/g1ThreadLocalData.hpp"
35
#include "gc/g1/heapRegion.hpp"
36
#include "interpreter/interp_masm.hpp"
37
#include "runtime/jniHandles.hpp"
38
#include "runtime/sharedRuntime.hpp"
39
#ifdef COMPILER1
40
#include "c1/c1_LIRAssembler.hpp"
41
#include "c1/c1_MacroAssembler.hpp"
42
#include "gc/g1/c1/g1BarrierSetC1.hpp"
43
#endif
44
45
#define __ masm->
46
47
void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
48
Register from, Register to, Register count,
49
Register preserve1, Register preserve2) {
50
bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
51
// With G1, don't generate the call if we statically know that the target in uninitialized
52
if (!dest_uninitialized) {
53
int spill_slots = 3;
54
if (preserve1 != noreg) { spill_slots++; }
55
if (preserve2 != noreg) { spill_slots++; }
56
const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
57
Label filtered;
58
59
// Is marking active?
60
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
61
__ lwz(R0, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()), R16_thread);
62
} else {
63
guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
64
__ lbz(R0, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()), R16_thread);
65
}
66
__ cmpdi(CCR0, R0, 0);
67
__ beq(CCR0, filtered);
68
69
__ save_LR_CR(R0);
70
__ push_frame(frame_size, R0);
71
int slot_nr = 0;
72
__ std(from, frame_size - (++slot_nr) * wordSize, R1_SP);
73
__ std(to, frame_size - (++slot_nr) * wordSize, R1_SP);
74
__ std(count, frame_size - (++slot_nr) * wordSize, R1_SP);
75
if (preserve1 != noreg) { __ std(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
76
if (preserve2 != noreg) { __ std(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
77
78
if (UseCompressedOops) {
79
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), to, count);
80
} else {
81
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), to, count);
82
}
83
84
slot_nr = 0;
85
__ ld(from, frame_size - (++slot_nr) * wordSize, R1_SP);
86
__ ld(to, frame_size - (++slot_nr) * wordSize, R1_SP);
87
__ ld(count, frame_size - (++slot_nr) * wordSize, R1_SP);
88
if (preserve1 != noreg) { __ ld(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
89
if (preserve2 != noreg) { __ ld(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
90
__ addi(R1_SP, R1_SP, frame_size); // pop_frame()
91
__ restore_LR_CR(R0);
92
93
__ bind(filtered);
94
}
95
}
96
97
void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
98
Register addr, Register count, Register preserve) {
99
int spill_slots = (preserve != noreg) ? 1 : 0;
100
const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
101
102
__ save_LR_CR(R0);
103
__ push_frame(frame_size, R0);
104
if (preserve != noreg) { __ std(preserve, frame_size - 1 * wordSize, R1_SP); }
105
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry), addr, count);
106
if (preserve != noreg) { __ ld(preserve, frame_size - 1 * wordSize, R1_SP); }
107
__ addi(R1_SP, R1_SP, frame_size); // pop_frame();
108
__ restore_LR_CR(R0);
109
}
110
111
void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, DecoratorSet decorators,
112
Register obj, RegisterOrConstant ind_or_offs, Register pre_val,
113
Register tmp1, Register tmp2,
114
MacroAssembler::PreservationLevel preservation_level) {
115
bool not_null = (decorators & IS_NOT_NULL) != 0,
116
preloaded = obj == noreg;
117
Register nv_save = noreg;
118
119
if (preloaded) {
120
// We are not loading the previous value so make
121
// sure that we don't trash the value in pre_val
122
// with the code below.
123
assert_different_registers(pre_val, tmp1, tmp2);
124
if (pre_val->is_volatile()) {
125
nv_save = !tmp1->is_volatile() ? tmp1 : tmp2;
126
assert(!nv_save->is_volatile(), "need one nv temp register if pre_val lives in volatile register");
127
}
128
}
129
130
Label runtime, filtered;
131
132
// Is marking active?
133
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
134
__ lwz(tmp1, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()), R16_thread);
135
} else {
136
guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
137
__ lbz(tmp1, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()), R16_thread);
138
}
139
__ cmpdi(CCR0, tmp1, 0);
140
__ beq(CCR0, filtered);
141
142
// Do we need to load the previous value?
143
if (!preloaded) {
144
// Load the previous value...
145
if (UseCompressedOops) {
146
__ lwz(pre_val, ind_or_offs, obj);
147
} else {
148
__ ld(pre_val, ind_or_offs, obj);
149
}
150
// Previous value has been loaded into Rpre_val.
151
}
152
assert(pre_val != noreg, "must have a real register");
153
154
// Is the previous value null?
155
if (preloaded && not_null) {
156
#ifdef ASSERT
157
__ cmpdi(CCR0, pre_val, 0);
158
__ asm_assert_ne("null oop not allowed (G1 pre)"); // Checked by caller.
159
#endif
160
} else {
161
__ cmpdi(CCR0, pre_val, 0);
162
__ beq(CCR0, filtered);
163
}
164
165
if (!preloaded && UseCompressedOops) {
166
__ decode_heap_oop_not_null(pre_val);
167
}
168
169
// OK, it's not filtered, so we'll need to call enqueue. In the normal
170
// case, pre_val will be a scratch G-reg, but there are some cases in
171
// which it's an O-reg. In the first case, do a normal call. In the
172
// latter, do a save here and call the frameless version.
173
174
// Can we store original value in the thread's buffer?
175
// Is index == 0?
176
// (The index field is typed as size_t.)
177
const Register Rbuffer = tmp1, Rindex = tmp2;
178
179
__ ld(Rindex, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()), R16_thread);
180
__ cmpdi(CCR0, Rindex, 0);
181
__ beq(CCR0, runtime); // If index == 0, goto runtime.
182
__ ld(Rbuffer, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()), R16_thread);
183
184
__ addi(Rindex, Rindex, -wordSize); // Decrement index.
185
__ std(Rindex, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()), R16_thread);
186
187
// Record the previous value.
188
__ stdx(pre_val, Rbuffer, Rindex);
189
__ b(filtered);
190
191
__ bind(runtime);
192
193
// Determine necessary runtime invocation preservation measures
194
const bool needs_frame = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR;
195
assert(preservation_level <= MacroAssembler::PRESERVATION_FRAME_LR,
196
"g1_write_barrier_pre doesn't support preservation levels higher than PRESERVATION_FRAME_LR");
197
198
// May need to preserve LR. Also needed if current frame is not compatible with C calling convention.
199
if (needs_frame) {
200
__ save_LR_CR(tmp1);
201
__ push_frame_reg_args(0, tmp2);
202
}
203
204
if (pre_val->is_volatile() && preloaded) { __ mr(nv_save, pre_val); } // Save pre_val across C call if it was preloaded.
205
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, R16_thread);
206
if (pre_val->is_volatile() && preloaded) { __ mr(pre_val, nv_save); } // restore
207
208
if (needs_frame) {
209
__ pop_frame();
210
__ restore_LR_CR(tmp1);
211
}
212
213
__ bind(filtered);
214
}
215
216
void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, DecoratorSet decorators,
217
Register store_addr, Register new_val,
218
Register tmp1, Register tmp2, Register tmp3,
219
MacroAssembler::PreservationLevel preservation_level) {
220
bool not_null = (decorators & IS_NOT_NULL) != 0;
221
222
Label runtime, filtered;
223
assert_different_registers(store_addr, new_val, tmp1, tmp2);
224
225
CardTableBarrierSet* ct = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
226
227
// Does store cross heap regions?
228
__ xorr(tmp1, store_addr, new_val);
229
__ srdi_(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes);
230
__ beq(CCR0, filtered);
231
232
// Crosses regions, storing NULL?
233
if (not_null) {
234
#ifdef ASSERT
235
__ cmpdi(CCR0, new_val, 0);
236
__ asm_assert_ne("null oop not allowed (G1 post)"); // Checked by caller.
237
#endif
238
} else {
239
__ cmpdi(CCR0, new_val, 0);
240
__ beq(CCR0, filtered);
241
}
242
243
// Storing region crossing non-NULL, is card already dirty?
244
const Register Rcard_addr = tmp1;
245
Register Rbase = tmp2;
246
__ load_const_optimized(Rbase, (address)(ct->card_table()->byte_map_base()), /*temp*/ tmp3);
247
248
__ srdi(Rcard_addr, store_addr, CardTable::card_shift);
249
250
// Get the address of the card.
251
__ lbzx(/*card value*/ tmp3, Rbase, Rcard_addr);
252
__ cmpwi(CCR0, tmp3, (int)G1CardTable::g1_young_card_val());
253
__ beq(CCR0, filtered);
254
255
__ membar(Assembler::StoreLoad);
256
__ lbzx(/*card value*/ tmp3, Rbase, Rcard_addr); // Reload after membar.
257
__ cmpwi(CCR0, tmp3 /* card value */, (int)G1CardTable::dirty_card_val());
258
__ beq(CCR0, filtered);
259
260
// Storing a region crossing, non-NULL oop, card is clean.
261
// Dirty card and log.
262
__ li(tmp3, (int)G1CardTable::dirty_card_val());
263
//release(); // G1: oops are allowed to get visible after dirty marking.
264
__ stbx(tmp3, Rbase, Rcard_addr);
265
266
__ add(Rcard_addr, Rbase, Rcard_addr); // This is the address which needs to get enqueued.
267
Rbase = noreg; // end of lifetime
268
269
const Register Rqueue_index = tmp2,
270
Rqueue_buf = tmp3;
271
__ ld(Rqueue_index, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()), R16_thread);
272
__ cmpdi(CCR0, Rqueue_index, 0);
273
__ beq(CCR0, runtime); // index == 0 then jump to runtime
274
__ ld(Rqueue_buf, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()), R16_thread);
275
276
__ addi(Rqueue_index, Rqueue_index, -wordSize); // decrement index
277
__ std(Rqueue_index, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()), R16_thread);
278
279
__ stdx(Rcard_addr, Rqueue_buf, Rqueue_index); // store card
280
__ b(filtered);
281
282
__ bind(runtime);
283
284
assert(preservation_level == MacroAssembler::PRESERVATION_NONE,
285
"g1_write_barrier_post doesn't support preservation levels higher than PRESERVATION_NONE");
286
287
// Save the live input values.
288
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), Rcard_addr, R16_thread);
289
290
__ bind(filtered);
291
}
292
293
void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
294
Register base, RegisterOrConstant ind_or_offs, Register val,
295
Register tmp1, Register tmp2, Register tmp3,
296
MacroAssembler::PreservationLevel preservation_level) {
297
bool is_array = (decorators & IS_ARRAY) != 0;
298
bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
299
bool precise = is_array || on_anonymous;
300
// Load and record the previous value.
301
g1_write_barrier_pre(masm, decorators,
302
base, ind_or_offs,
303
tmp1, tmp2, tmp3,
304
preservation_level);
305
306
BarrierSetAssembler::store_at(masm, decorators,
307
type, base, ind_or_offs, val,
308
tmp1, tmp2, tmp3,
309
preservation_level);
310
311
// No need for post barrier if storing NULL
312
if (val != noreg) {
313
if (precise) {
314
if (ind_or_offs.is_constant()) {
315
__ add_const_optimized(base, base, ind_or_offs.as_constant(), tmp1);
316
} else {
317
__ add(base, ind_or_offs.as_register(), base);
318
}
319
}
320
g1_write_barrier_post(masm, decorators,
321
base, val,
322
tmp1, tmp2, tmp3,
323
preservation_level);
324
}
325
}
326
327
void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
328
Register base, RegisterOrConstant ind_or_offs, Register dst,
329
Register tmp1, Register tmp2,
330
MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null) {
331
bool on_oop = is_reference_type(type);
332
bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
333
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
334
bool on_reference = on_weak || on_phantom;
335
Label done;
336
if (on_oop && on_reference && L_handle_null == NULL) { L_handle_null = &done; }
337
// Load the value of the referent field.
338
ModRefBarrierSetAssembler::load_at(masm, decorators, type,
339
base, ind_or_offs, dst,
340
tmp1, tmp2,
341
preservation_level, L_handle_null);
342
if (on_oop && on_reference) {
343
// Generate the G1 pre-barrier code to log the value of
344
// the referent field in an SATB buffer. Note with
345
// these parameters the pre-barrier does not generate
346
// the load of the previous value
347
// We only reach here if value is not null.
348
g1_write_barrier_pre(masm, decorators | IS_NOT_NULL,
349
noreg /* obj */, (intptr_t)0, dst /* pre_val */,
350
tmp1, tmp2,
351
preservation_level);
352
}
353
__ bind(done);
354
}
355
356
void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value,
357
Register tmp1, Register tmp2,
358
MacroAssembler::PreservationLevel preservation_level) {
359
Label done, not_weak;
360
__ cmpdi(CCR0, value, 0);
361
__ beq(CCR0, done); // Use NULL as-is.
362
363
__ clrrdi(tmp1, value, JNIHandles::weak_tag_size);
364
__ andi_(tmp2, value, JNIHandles::weak_tag_mask);
365
__ ld(value, 0, tmp1); // Resolve (untagged) jobject.
366
367
__ beq(CCR0, not_weak); // Test for jweak tag.
368
__ verify_oop(value, FILE_AND_LINE);
369
g1_write_barrier_pre(masm, IN_NATIVE | ON_PHANTOM_OOP_REF,
370
noreg, noreg, value,
371
tmp1, tmp2,
372
preservation_level);
373
__ bind(not_weak);
374
__ verify_oop(value, FILE_AND_LINE);
375
__ bind(done);
376
}
377
378
#ifdef COMPILER1
379
380
#undef __
381
#define __ ce->masm()->
382
383
void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
384
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
385
// At this point we know that marking is in progress.
386
// If do_load() is true then we have to emit the
387
// load of the previous value; otherwise it has already
388
// been loaded into _pre_val.
389
390
__ bind(*stub->entry());
391
392
assert(stub->pre_val()->is_register(), "Precondition.");
393
Register pre_val_reg = stub->pre_val()->as_register();
394
395
if (stub->do_load()) {
396
ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
397
}
398
399
__ cmpdi(CCR0, pre_val_reg, 0);
400
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
401
402
address c_code = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
403
//__ load_const_optimized(R0, c_code);
404
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(c_code));
405
__ std(pre_val_reg, -8, R1_SP); // Pass pre_val on stack.
406
__ mtctr(R0);
407
__ bctrl();
408
__ b(*stub->continuation());
409
}
410
411
void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
412
G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
413
__ bind(*stub->entry());
414
415
assert(stub->addr()->is_register(), "Precondition.");
416
assert(stub->new_val()->is_register(), "Precondition.");
417
Register addr_reg = stub->addr()->as_pointer_register();
418
Register new_val_reg = stub->new_val()->as_register();
419
420
__ cmpdi(CCR0, new_val_reg, 0);
421
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
422
423
address c_code = bs->post_barrier_c1_runtime_code_blob()->code_begin();
424
//__ load_const_optimized(R0, c_code);
425
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(c_code));
426
__ mtctr(R0);
427
__ mr(R0, addr_reg); // Pass addr in R0.
428
__ bctrl();
429
__ b(*stub->continuation());
430
}
431
432
#undef __
433
#define __ sasm->
434
435
void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
436
BarrierSet* bs = BarrierSet::barrier_set();
437
438
__ set_info("g1_pre_barrier_slow_id", false);
439
440
// Using stack slots: pre_val (pre-pushed), spill tmp, spill tmp2.
441
const int stack_slots = 3;
442
Register pre_val = R0; // previous value of memory
443
Register tmp = R14;
444
Register tmp2 = R15;
445
446
Label refill, restart, marking_not_active;
447
int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
448
int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
449
int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
450
451
// Spill
452
__ std(tmp, -16, R1_SP);
453
__ std(tmp2, -24, R1_SP);
454
455
// Is marking still active?
456
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
457
__ lwz(tmp, satb_q_active_byte_offset, R16_thread);
458
} else {
459
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
460
__ lbz(tmp, satb_q_active_byte_offset, R16_thread);
461
}
462
__ cmpdi(CCR0, tmp, 0);
463
__ beq(CCR0, marking_not_active);
464
465
__ bind(restart);
466
// Load the index into the SATB buffer. SATBMarkQueue::_index is a
467
// size_t so ld_ptr is appropriate.
468
__ ld(tmp, satb_q_index_byte_offset, R16_thread);
469
470
// index == 0?
471
__ cmpdi(CCR0, tmp, 0);
472
__ beq(CCR0, refill);
473
474
__ ld(tmp2, satb_q_buf_byte_offset, R16_thread);
475
__ ld(pre_val, -8, R1_SP); // Load from stack.
476
__ addi(tmp, tmp, -oopSize);
477
478
__ std(tmp, satb_q_index_byte_offset, R16_thread);
479
__ stdx(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card>
480
481
__ bind(marking_not_active);
482
// Restore temp registers and return-from-leaf.
483
__ ld(tmp2, -24, R1_SP);
484
__ ld(tmp, -16, R1_SP);
485
__ blr();
486
487
__ bind(refill);
488
const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
489
__ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
490
__ mflr(R0);
491
__ std(R0, _abi0(lr), R1_SP);
492
__ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
493
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1SATBMarkQueueSet::handle_zero_index_for_thread), R16_thread);
494
__ pop_frame();
495
__ ld(R0, _abi0(lr), R1_SP);
496
__ mtlr(R0);
497
__ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
498
__ b(restart);
499
}
500
501
void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
502
G1BarrierSet* bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
503
504
__ set_info("g1_post_barrier_slow_id", false);
505
506
// Using stack slots: spill addr, spill tmp2
507
const int stack_slots = 2;
508
Register tmp = R0;
509
Register addr = R14;
510
Register tmp2 = R15;
511
CardTable::CardValue* byte_map_base = bs->card_table()->byte_map_base();
512
513
Label restart, refill, ret;
514
515
// Spill
516
__ std(addr, -8, R1_SP);
517
__ std(tmp2, -16, R1_SP);
518
519
__ srdi(addr, R0, CardTable::card_shift); // Addr is passed in R0.
520
__ load_const_optimized(/*cardtable*/ tmp2, byte_map_base, tmp);
521
__ add(addr, tmp2, addr);
522
__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
523
524
// Return if young card.
525
__ cmpwi(CCR0, tmp, G1CardTable::g1_young_card_val());
526
__ beq(CCR0, ret);
527
528
// Return if sequential consistent value is already dirty.
529
__ membar(Assembler::StoreLoad);
530
__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
531
532
__ cmpwi(CCR0, tmp, G1CardTable::dirty_card_val());
533
__ beq(CCR0, ret);
534
535
// Not dirty.
536
537
// First, dirty it.
538
__ li(tmp, G1CardTable::dirty_card_val());
539
__ stb(tmp, 0, addr);
540
541
int dirty_card_q_index_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
542
int dirty_card_q_buf_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
543
544
__ bind(restart);
545
546
// Get the index into the update buffer. G1DirtyCardQueue::_index is
547
// a size_t so ld_ptr is appropriate here.
548
__ ld(tmp2, dirty_card_q_index_byte_offset, R16_thread);
549
550
// index == 0?
551
__ cmpdi(CCR0, tmp2, 0);
552
__ beq(CCR0, refill);
553
554
__ ld(tmp, dirty_card_q_buf_byte_offset, R16_thread);
555
__ addi(tmp2, tmp2, -oopSize);
556
557
__ std(tmp2, dirty_card_q_index_byte_offset, R16_thread);
558
__ add(tmp2, tmp, tmp2);
559
__ std(addr, 0, tmp2); // [_buf + index] := <address_of_card>
560
561
// Restore temp registers and return-from-leaf.
562
__ bind(ret);
563
__ ld(tmp2, -16, R1_SP);
564
__ ld(addr, -8, R1_SP);
565
__ blr();
566
567
__ bind(refill);
568
const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
569
__ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
570
__ mflr(R0);
571
__ std(R0, _abi0(lr), R1_SP);
572
__ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
573
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1DirtyCardQueueSet::handle_zero_index_for_thread), R16_thread);
574
__ pop_frame();
575
__ ld(R0, _abi0(lr), R1_SP);
576
__ mtlr(R0);
577
__ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
578
__ b(restart);
579
}
580
581
#undef __
582
583
#endif // COMPILER1
584
585