Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp
66646 views
1
/*
2
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2021, 2022 SAP SE. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*/
24
25
#include "asm/register.hpp"
26
#include "precompiled.hpp"
27
#include "asm/macroAssembler.inline.hpp"
28
#include "code/codeBlob.hpp"
29
#include "code/vmreg.inline.hpp"
30
#include "gc/z/zBarrier.inline.hpp"
31
#include "gc/z/zBarrierSet.hpp"
32
#include "gc/z/zBarrierSetAssembler.hpp"
33
#include "gc/z/zBarrierSetRuntime.hpp"
34
#include "gc/z/zThreadLocalData.hpp"
35
#include "memory/resourceArea.hpp"
36
#include "register_ppc.hpp"
37
#include "runtime/sharedRuntime.hpp"
38
#include "utilities/globalDefinitions.hpp"
39
#include "utilities/macros.hpp"
40
#ifdef COMPILER1
41
#include "c1/c1_LIRAssembler.hpp"
42
#include "c1/c1_MacroAssembler.hpp"
43
#include "gc/z/c1/zBarrierSetC1.hpp"
44
#endif // COMPILER1
45
#ifdef COMPILER2
46
#include "gc/z/c2/zBarrierSetC2.hpp"
47
#endif // COMPILER2
48
49
#undef __
50
#define __ masm->
51
52
void ZBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
53
Register base, RegisterOrConstant ind_or_offs, Register dst,
54
Register tmp1, Register tmp2,
55
MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null) {
56
__ block_comment("load_at (zgc) {");
57
58
// Check whether a special gc barrier is required for this particular load
59
// (e.g. whether it's a reference load or not)
60
if (!ZBarrierSet::barrier_needed(decorators, type)) {
61
BarrierSetAssembler::load_at(masm, decorators, type, base, ind_or_offs, dst,
62
tmp1, tmp2, preservation_level, L_handle_null);
63
return;
64
}
65
66
if (ind_or_offs.is_register()) {
67
assert_different_registers(base, ind_or_offs.as_register(), tmp1, tmp2, R0, noreg);
68
assert_different_registers(dst, ind_or_offs.as_register(), tmp1, tmp2, R0, noreg);
69
} else {
70
assert_different_registers(base, tmp1, tmp2, R0, noreg);
71
assert_different_registers(dst, tmp1, tmp2, R0, noreg);
72
}
73
74
/* ==== Load the pointer using the standard implementation for the actual heap access
75
and the decompression of compressed pointers ==== */
76
// Result of 'load_at' (standard implementation) will be written back to 'dst'.
77
// As 'base' is required for the C-call, it must be reserved in case of a register clash.
78
Register saved_base = base;
79
if (base == dst) {
80
__ mr(tmp2, base);
81
saved_base = tmp2;
82
}
83
84
BarrierSetAssembler::load_at(masm, decorators, type, base, ind_or_offs, dst,
85
tmp1, noreg, preservation_level, L_handle_null);
86
87
/* ==== Check whether pointer is dirty ==== */
88
Label skip_barrier;
89
90
// Load bad mask into scratch register.
91
__ ld(tmp1, (intptr_t) ZThreadLocalData::address_bad_mask_offset(), R16_thread);
92
93
// The color bits of the to-be-tested pointer do not have to be equivalent to the 'bad_mask' testing bits.
94
// A pointer is classified as dirty if any of the color bits that also match the bad mask is set.
95
// Conversely, it follows that the logical AND of the bad mask and the pointer must be zero
96
// if the pointer is not dirty.
97
// Only dirty pointers must be processed by this barrier, so we can skip it in case the latter condition holds true.
98
__ and_(tmp1, tmp1, dst);
99
__ beq(CCR0, skip_barrier);
100
101
/* ==== Invoke barrier ==== */
102
int nbytes_save = 0;
103
104
const bool needs_frame = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR;
105
const bool preserve_gp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS;
106
const bool preserve_fp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS;
107
108
const bool preserve_R3 = dst != R3_ARG1;
109
110
if (needs_frame) {
111
if (preserve_gp_registers) {
112
nbytes_save = (preserve_fp_registers
113
? MacroAssembler::num_volatile_gp_regs + MacroAssembler::num_volatile_fp_regs
114
: MacroAssembler::num_volatile_gp_regs) * BytesPerWord;
115
nbytes_save -= preserve_R3 ? 0 : BytesPerWord;
116
__ save_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers, preserve_R3);
117
}
118
119
__ save_LR_CR(tmp1);
120
__ push_frame_reg_args(nbytes_save, tmp1);
121
}
122
123
// Setup arguments
124
if (saved_base != R3_ARG1) {
125
__ mr_if_needed(R3_ARG1, dst);
126
__ add(R4_ARG2, ind_or_offs, saved_base);
127
} else if (dst != R4_ARG2) {
128
__ add(R4_ARG2, ind_or_offs, saved_base);
129
__ mr(R3_ARG1, dst);
130
} else {
131
__ add(R0, ind_or_offs, saved_base);
132
__ mr(R3_ARG1, dst);
133
__ mr(R4_ARG2, R0);
134
}
135
136
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators));
137
138
Register result = R3_RET;
139
if (needs_frame) {
140
__ pop_frame();
141
__ restore_LR_CR(tmp1);
142
143
if (preserve_R3) {
144
__ mr(R0, R3_RET);
145
result = R0;
146
}
147
148
if (preserve_gp_registers) {
149
__ restore_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers, preserve_R3);
150
}
151
}
152
__ mr_if_needed(dst, result);
153
154
__ bind(skip_barrier);
155
__ block_comment("} load_at (zgc)");
156
}
157
158
#ifdef ASSERT
159
// The Z store barrier only verifies the pointers it is operating on and is thus a sole debugging measure.
160
void ZBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
161
Register base, RegisterOrConstant ind_or_offs, Register val,
162
Register tmp1, Register tmp2, Register tmp3,
163
MacroAssembler::PreservationLevel preservation_level) {
164
__ block_comment("store_at (zgc) {");
165
166
// If the 'val' register is 'noreg', the to-be-stored value is a null pointer.
167
if (is_reference_type(type) && val != noreg) {
168
__ ld(tmp1, in_bytes(ZThreadLocalData::address_bad_mask_offset()), R16_thread);
169
__ and_(tmp1, tmp1, val);
170
__ asm_assert_eq("Detected dirty pointer on the heap in Z store barrier");
171
}
172
173
// Store value
174
BarrierSetAssembler::store_at(masm, decorators, type, base, ind_or_offs, val, tmp1, tmp2, tmp3, preservation_level);
175
176
__ block_comment("} store_at (zgc)");
177
}
178
#endif // ASSERT
179
180
void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, DecoratorSet decorators, BasicType component_type,
181
Register src, Register dst, Register count,
182
Register preserve1, Register preserve2) {
183
__ block_comment("arraycopy_prologue (zgc) {");
184
185
/* ==== Check whether a special gc barrier is required for this particular load ==== */
186
if (!is_reference_type(component_type)) {
187
return;
188
}
189
190
Label skip_barrier;
191
192
// Fast path: Array is of length zero
193
__ cmpdi(CCR0, count, 0);
194
__ beq(CCR0, skip_barrier);
195
196
/* ==== Ensure register sanity ==== */
197
Register tmp_R11 = R11_scratch1;
198
199
assert_different_registers(src, dst, count, tmp_R11, noreg);
200
if (preserve1 != noreg) {
201
// Not technically required, but unlikely being intended.
202
assert_different_registers(preserve1, preserve2);
203
}
204
205
/* ==== Invoke barrier (slowpath) ==== */
206
int nbytes_save = 0;
207
208
{
209
assert(!noreg->is_volatile(), "sanity");
210
211
if (preserve1->is_volatile()) {
212
__ std(preserve1, -BytesPerWord * ++nbytes_save, R1_SP);
213
}
214
215
if (preserve2->is_volatile() && preserve1 != preserve2) {
216
__ std(preserve2, -BytesPerWord * ++nbytes_save, R1_SP);
217
}
218
219
__ std(src, -BytesPerWord * ++nbytes_save, R1_SP);
220
__ std(dst, -BytesPerWord * ++nbytes_save, R1_SP);
221
__ std(count, -BytesPerWord * ++nbytes_save, R1_SP);
222
223
__ save_LR_CR(tmp_R11);
224
__ push_frame_reg_args(nbytes_save, tmp_R11);
225
}
226
227
// ZBarrierSetRuntime::load_barrier_on_oop_array_addr(src, count)
228
if (count == R3_ARG1) {
229
if (src == R4_ARG2) {
230
// Arguments are provided in reverse order
231
__ mr(tmp_R11, count);
232
__ mr(R3_ARG1, src);
233
__ mr(R4_ARG2, tmp_R11);
234
} else {
235
__ mr(R4_ARG2, count);
236
__ mr(R3_ARG1, src);
237
}
238
} else {
239
__ mr_if_needed(R3_ARG1, src);
240
__ mr_if_needed(R4_ARG2, count);
241
}
242
243
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_array_addr());
244
245
__ pop_frame();
246
__ restore_LR_CR(tmp_R11);
247
248
{
249
__ ld(count, -BytesPerWord * nbytes_save--, R1_SP);
250
__ ld(dst, -BytesPerWord * nbytes_save--, R1_SP);
251
__ ld(src, -BytesPerWord * nbytes_save--, R1_SP);
252
253
if (preserve2->is_volatile() && preserve1 != preserve2) {
254
__ ld(preserve2, -BytesPerWord * nbytes_save--, R1_SP);
255
}
256
257
if (preserve1->is_volatile()) {
258
__ ld(preserve1, -BytesPerWord * nbytes_save--, R1_SP);
259
}
260
}
261
262
__ bind(skip_barrier);
263
264
__ block_comment("} arraycopy_prologue (zgc)");
265
}
266
267
void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env,
268
Register obj, Register tmp, Label& slowpath) {
269
__ block_comment("try_resolve_jobject_in_native (zgc) {");
270
271
assert_different_registers(jni_env, obj, tmp);
272
273
// Resolve the pointer using the standard implementation for weak tag handling and pointer verfication.
274
BarrierSetAssembler::try_resolve_jobject_in_native(masm, dst, jni_env, obj, tmp, slowpath);
275
276
// Check whether pointer is dirty.
277
__ ld(tmp,
278
in_bytes(ZThreadLocalData::address_bad_mask_offset() - JavaThread::jni_environment_offset()),
279
jni_env);
280
281
__ and_(tmp, obj, tmp);
282
__ bne(CCR0, slowpath);
283
284
__ block_comment("} try_resolve_jobject_in_native (zgc)");
285
}
286
287
#undef __
288
289
#ifdef COMPILER1
290
#define __ ce->masm()->
291
292
// Code emitted by LIR node "LIR_OpZLoadBarrierTest" which in turn is emitted by ZBarrierSetC1::load_barrier.
293
// The actual compare and branch instructions are represented as stand-alone LIR nodes.
294
void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
295
LIR_Opr ref) const {
296
__ block_comment("load_barrier_test (zgc) {");
297
298
__ ld(R0, in_bytes(ZThreadLocalData::address_bad_mask_offset()), R16_thread);
299
__ andr(R0, R0, ref->as_pointer_register());
300
__ cmpdi(CCR5 /* as mandated by LIR node */, R0, 0);
301
302
__ block_comment("} load_barrier_test (zgc)");
303
}
304
305
// Code emitted by code stub "ZLoadBarrierStubC1" which in turn is emitted by ZBarrierSetC1::load_barrier.
306
// Invokes the runtime stub which is defined just below.
307
void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
308
ZLoadBarrierStubC1* stub) const {
309
__ block_comment("c1_load_barrier_stub (zgc) {");
310
311
__ bind(*stub->entry());
312
313
/* ==== Determine relevant data registers and ensure register sanity ==== */
314
Register ref = stub->ref()->as_register();
315
Register ref_addr = noreg;
316
317
// Determine reference address
318
if (stub->tmp()->is_valid()) {
319
// 'tmp' register is given, so address might have an index or a displacement.
320
ce->leal(stub->ref_addr(), stub->tmp());
321
ref_addr = stub->tmp()->as_pointer_register();
322
} else {
323
// 'tmp' register is not given, so address must have neither an index nor a displacement.
324
// The address' base register is thus usable as-is.
325
assert(stub->ref_addr()->as_address_ptr()->disp() == 0, "illegal displacement");
326
assert(!stub->ref_addr()->as_address_ptr()->index()->is_valid(), "illegal index");
327
328
ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
329
}
330
331
assert_different_registers(ref, ref_addr, R0, noreg);
332
333
/* ==== Invoke stub ==== */
334
// Pass arguments via stack. The stack pointer will be bumped by the stub.
335
__ std(ref, (intptr_t) -1 * BytesPerWord, R1_SP);
336
__ std(ref_addr, (intptr_t) -2 * BytesPerWord, R1_SP);
337
338
__ load_const_optimized(R0, stub->runtime_stub());
339
__ call_stub(R0);
340
341
// The runtime stub passes the result via the R0 register, overriding the previously-loaded stub address.
342
__ mr_if_needed(ref, R0);
343
__ b(*stub->continuation());
344
345
__ block_comment("} c1_load_barrier_stub (zgc)");
346
}
347
348
#undef __
349
#define __ sasm->
350
351
// Code emitted by runtime code stub which in turn is emitted by ZBarrierSetC1::generate_c1_runtime_stubs.
352
void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
353
DecoratorSet decorators) const {
354
__ block_comment("c1_load_barrier_runtime_stub (zgc) {");
355
356
const int stack_parameters = 2;
357
const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_parameters) * BytesPerWord;
358
359
__ save_volatile_gprs(R1_SP, -nbytes_save);
360
__ save_LR_CR(R0);
361
362
// Load arguments back again from the stack.
363
__ ld(R3_ARG1, (intptr_t) -1 * BytesPerWord, R1_SP); // ref
364
__ ld(R4_ARG2, (intptr_t) -2 * BytesPerWord, R1_SP); // ref_addr
365
366
__ push_frame_reg_args(nbytes_save, R0);
367
368
__ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators));
369
370
__ verify_oop(R3_RET, "Bad pointer after barrier invocation");
371
__ mr(R0, R3_RET);
372
373
__ pop_frame();
374
__ restore_LR_CR(R3_RET);
375
__ restore_volatile_gprs(R1_SP, -nbytes_save);
376
377
__ blr();
378
379
__ block_comment("} c1_load_barrier_runtime_stub (zgc)");
380
}
381
382
#undef __
383
#endif // COMPILER1
384
385
#ifdef COMPILER2
386
387
OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) const {
388
if (!OptoReg::is_reg(opto_reg)) {
389
return OptoReg::Bad;
390
}
391
392
VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
393
if ((vm_reg->is_Register() || vm_reg ->is_FloatRegister()) && (opto_reg & 1) != 0) {
394
return OptoReg::Bad;
395
}
396
397
return opto_reg;
398
}
399
400
#define __ _masm->
401
402
class ZSaveLiveRegisters {
403
MacroAssembler* _masm;
404
RegMask _reg_mask;
405
Register _result_reg;
406
int _frame_size;
407
408
public:
409
ZSaveLiveRegisters(MacroAssembler *masm, ZLoadBarrierStubC2 *stub)
410
: _masm(masm), _reg_mask(stub->live()), _result_reg(stub->ref()) {
411
412
const int register_save_size = iterate_over_register_mask(ACTION_COUNT_ONLY) * BytesPerWord;
413
_frame_size = align_up(register_save_size, frame::alignment_in_bytes)
414
+ frame::abi_reg_args_size;
415
416
__ save_LR_CR(R0);
417
__ push_frame(_frame_size, R0);
418
419
iterate_over_register_mask(ACTION_SAVE, _frame_size);
420
}
421
422
~ZSaveLiveRegisters() {
423
iterate_over_register_mask(ACTION_RESTORE, _frame_size);
424
425
__ addi(R1_SP, R1_SP, _frame_size);
426
__ restore_LR_CR(R0);
427
}
428
429
private:
430
enum IterationAction : int {
431
ACTION_SAVE,
432
ACTION_RESTORE,
433
ACTION_COUNT_ONLY
434
};
435
436
int iterate_over_register_mask(IterationAction action, int offset = 0) {
437
int reg_save_index = 0;
438
RegMaskIterator live_regs_iterator(_reg_mask);
439
440
while(live_regs_iterator.has_next()) {
441
const OptoReg::Name opto_reg = live_regs_iterator.next();
442
443
// Filter out stack slots (spilled registers, i.e., stack-allocated registers).
444
if (!OptoReg::is_reg(opto_reg)) {
445
continue;
446
}
447
448
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
449
if (vm_reg->is_Register()) {
450
Register std_reg = vm_reg->as_Register();
451
452
// '_result_reg' will hold the end result of the operation. Its content must thus not be preserved.
453
if (std_reg == _result_reg) {
454
continue;
455
}
456
457
if (std_reg->encoding() >= R2->encoding() && std_reg->encoding() <= R12->encoding()) {
458
reg_save_index++;
459
460
if (action == ACTION_SAVE) {
461
_masm->std(std_reg, offset - reg_save_index * BytesPerWord, R1_SP);
462
} else if (action == ACTION_RESTORE) {
463
_masm->ld(std_reg, offset - reg_save_index * BytesPerWord, R1_SP);
464
} else {
465
assert(action == ACTION_COUNT_ONLY, "Sanity");
466
}
467
}
468
} else if (vm_reg->is_FloatRegister()) {
469
FloatRegister fp_reg = vm_reg->as_FloatRegister();
470
if (fp_reg->encoding() >= F0->encoding() && fp_reg->encoding() <= F13->encoding()) {
471
reg_save_index++;
472
473
if (action == ACTION_SAVE) {
474
_masm->stfd(fp_reg, offset - reg_save_index * BytesPerWord, R1_SP);
475
} else if (action == ACTION_RESTORE) {
476
_masm->lfd(fp_reg, offset - reg_save_index * BytesPerWord, R1_SP);
477
} else {
478
assert(action == ACTION_COUNT_ONLY, "Sanity");
479
}
480
}
481
} else if (vm_reg->is_ConditionRegister()) {
482
// NOP. Conditions registers are covered by save_LR_CR
483
} else if (vm_reg->is_VectorSRegister()) {
484
assert(SuperwordUseVSX, "or should not reach here");
485
VectorSRegister vs_reg = vm_reg->as_VectorSRegister();
486
if (vs_reg->encoding() >= VSR32->encoding() && vs_reg->encoding() <= VSR51->encoding()) {
487
reg_save_index += 2;
488
489
Register spill_addr = R0;
490
if (action == ACTION_SAVE) {
491
_masm->addi(spill_addr, R1_SP, offset - reg_save_index * BytesPerWord);
492
_masm->stxvd2x(vs_reg, spill_addr);
493
} else if (action == ACTION_RESTORE) {
494
_masm->addi(spill_addr, R1_SP, offset - reg_save_index * BytesPerWord);
495
_masm->lxvd2x(vs_reg, spill_addr);
496
} else {
497
assert(action == ACTION_COUNT_ONLY, "Sanity");
498
}
499
}
500
} else {
501
if (vm_reg->is_SpecialRegister()) {
502
fatal("Special registers are unsupported. Found register %s", vm_reg->name());
503
} else {
504
fatal("Register type is not known");
505
}
506
}
507
}
508
509
return reg_save_index;
510
}
511
};
512
513
#undef __
514
#define __ _masm->
515
516
class ZSetupArguments {
517
MacroAssembler* const _masm;
518
const Register _ref;
519
const Address _ref_addr;
520
521
public:
522
ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
523
_masm(masm),
524
_ref(stub->ref()),
525
_ref_addr(stub->ref_addr()) {
526
527
// Desired register/argument configuration:
528
// _ref: R3_ARG1
529
// _ref_addr: R4_ARG2
530
531
// '_ref_addr' can be unspecified. In that case, the barrier will not heal the reference.
532
if (_ref_addr.base() == noreg) {
533
assert_different_registers(_ref, R0, noreg);
534
535
__ mr_if_needed(R3_ARG1, _ref);
536
__ li(R4_ARG2, 0);
537
} else {
538
assert_different_registers(_ref, _ref_addr.base(), R0, noreg);
539
assert(!_ref_addr.index()->is_valid(), "reference addresses must not contain an index component");
540
541
if (_ref != R4_ARG2) {
542
// Calculate address first as the address' base register might clash with R4_ARG2
543
__ add(R4_ARG2, (intptr_t) _ref_addr.disp(), _ref_addr.base());
544
__ mr_if_needed(R3_ARG1, _ref);
545
} else if (_ref_addr.base() != R3_ARG1) {
546
__ mr(R3_ARG1, _ref);
547
__ add(R4_ARG2, (intptr_t) _ref_addr.disp(), _ref_addr.base()); // Cloberring _ref
548
} else {
549
// Arguments are provided in inverse order (i.e. _ref == R4_ARG2, _ref_addr == R3_ARG1)
550
__ mr(R0, _ref);
551
__ add(R4_ARG2, (intptr_t) _ref_addr.disp(), _ref_addr.base());
552
__ mr(R3_ARG1, R0);
553
}
554
}
555
}
556
};
557
558
#undef __
559
#define __ masm->
560
561
void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const {
562
__ block_comment("generate_c2_load_barrier_stub (zgc) {");
563
564
__ bind(*stub->entry());
565
566
Register ref = stub->ref();
567
Address ref_addr = stub->ref_addr();
568
569
assert_different_registers(ref, ref_addr.base());
570
571
{
572
ZSaveLiveRegisters save_live_registers(masm, stub);
573
ZSetupArguments setup_arguments(masm, stub);
574
575
__ call_VM_leaf(stub->slow_path());
576
__ mr_if_needed(ref, R3_RET);
577
}
578
579
__ b(*stub->continuation());
580
581
__ block_comment("} generate_c2_load_barrier_stub (zgc)");
582
}
583
584
#undef __
585
#endif // COMPILER2
586
587