Path: blob/master/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp
40948 views
/*1* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.2* Copyright (c) 2018, 2021 SAP SE. All rights reserved.3* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.4*5* This code is free software; you can redistribute it and/or modify it6* under the terms of the GNU General Public License version 2 only, as7* published by the Free Software Foundation.8*9* This code is distributed in the hope that it will be useful, but WITHOUT10* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License12* version 2 for more details (a copy is included in the LICENSE file that13* accompanied this code).14*15* You should have received a copy of the GNU General Public License version16* 2 along with this work; if not, write to the Free Software Foundation,17* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.18*19* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA20* or visit www.oracle.com if you need additional information or have any21* questions.22*23*/2425#include "precompiled.hpp"26#include "asm/macroAssembler.inline.hpp"27#include "gc/g1/g1BarrierSet.hpp"28#include "gc/g1/g1BarrierSetAssembler.hpp"29#include "gc/g1/g1BarrierSetRuntime.hpp"30#include "gc/g1/g1CardTable.hpp"31#include "gc/g1/g1DirtyCardQueue.hpp"32#include "gc/g1/g1SATBMarkQueueSet.hpp"33#include "gc/g1/g1ThreadLocalData.hpp"34#include "gc/g1/heapRegion.hpp"35#include "interpreter/interp_masm.hpp"36#include "runtime/jniHandles.hpp"37#include "runtime/sharedRuntime.hpp"38#ifdef COMPILER139#include "c1/c1_LIRAssembler.hpp"40#include "c1/c1_MacroAssembler.hpp"41#include "gc/g1/c1/g1BarrierSetC1.hpp"42#endif4344#define __ masm->4546void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,47Register from, Register to, Register count,48Register preserve1, Register preserve2) {49bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;50// With G1, don't generate the call if we statically know that the target in uninitialized51if (!dest_uninitialized) {52int spill_slots = 3;53if (preserve1 != noreg) { spill_slots++; }54if (preserve2 != noreg) { spill_slots++; }55const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);56Label filtered;5758// Is marking active?59if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {60__ lwz(R0, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()), R16_thread);61} else {62guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");63__ lbz(R0, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()), R16_thread);64}65__ cmpdi(CCR0, R0, 0);66__ beq(CCR0, filtered);6768__ save_LR_CR(R0);69__ push_frame(frame_size, R0);70int slot_nr = 0;71__ std(from, frame_size - (++slot_nr) * wordSize, R1_SP);72__ std(to, frame_size - (++slot_nr) * wordSize, R1_SP);73__ std(count, frame_size - (++slot_nr) * wordSize, R1_SP);74if (preserve1 != noreg) { __ std(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }75if (preserve2 != noreg) { __ std(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }7677if (UseCompressedOops) {78__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), to, count);79} else {80__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), to, count);81}8283slot_nr = 0;84__ ld(from, frame_size - (++slot_nr) * wordSize, R1_SP);85__ ld(to, frame_size - (++slot_nr) * wordSize, R1_SP);86__ ld(count, frame_size - (++slot_nr) * wordSize, R1_SP);87if (preserve1 != noreg) { __ ld(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }88if (preserve2 != noreg) { __ ld(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }89__ addi(R1_SP, R1_SP, frame_size); // pop_frame()90__ restore_LR_CR(R0);9192__ bind(filtered);93}94}9596void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,97Register addr, Register count, Register preserve) {98int spill_slots = (preserve != noreg) ? 1 : 0;99const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);100101__ save_LR_CR(R0);102__ push_frame(frame_size, R0);103if (preserve != noreg) { __ std(preserve, frame_size - 1 * wordSize, R1_SP); }104__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry), addr, count);105if (preserve != noreg) { __ ld(preserve, frame_size - 1 * wordSize, R1_SP); }106__ addi(R1_SP, R1_SP, frame_size); // pop_frame();107__ restore_LR_CR(R0);108}109110void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, DecoratorSet decorators,111Register obj, RegisterOrConstant ind_or_offs, Register pre_val,112Register tmp1, Register tmp2,113MacroAssembler::PreservationLevel preservation_level) {114bool not_null = (decorators & IS_NOT_NULL) != 0,115preloaded = obj == noreg;116Register nv_save = noreg;117118if (preloaded) {119// We are not loading the previous value so make120// sure that we don't trash the value in pre_val121// with the code below.122assert_different_registers(pre_val, tmp1, tmp2);123if (pre_val->is_volatile()) {124nv_save = !tmp1->is_volatile() ? tmp1 : tmp2;125assert(!nv_save->is_volatile(), "need one nv temp register if pre_val lives in volatile register");126}127}128129Label runtime, filtered;130131// Is marking active?132if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {133__ lwz(tmp1, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()), R16_thread);134} else {135guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");136__ lbz(tmp1, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()), R16_thread);137}138__ cmpdi(CCR0, tmp1, 0);139__ beq(CCR0, filtered);140141// Do we need to load the previous value?142if (!preloaded) {143// Load the previous value...144if (UseCompressedOops) {145__ lwz(pre_val, ind_or_offs, obj);146} else {147__ ld(pre_val, ind_or_offs, obj);148}149// Previous value has been loaded into Rpre_val.150}151assert(pre_val != noreg, "must have a real register");152153// Is the previous value null?154if (preloaded && not_null) {155#ifdef ASSERT156__ cmpdi(CCR0, pre_val, 0);157__ asm_assert_ne("null oop not allowed (G1 pre)"); // Checked by caller.158#endif159} else {160__ cmpdi(CCR0, pre_val, 0);161__ beq(CCR0, filtered);162}163164if (!preloaded && UseCompressedOops) {165__ decode_heap_oop_not_null(pre_val);166}167168// OK, it's not filtered, so we'll need to call enqueue. In the normal169// case, pre_val will be a scratch G-reg, but there are some cases in170// which it's an O-reg. In the first case, do a normal call. In the171// latter, do a save here and call the frameless version.172173// Can we store original value in the thread's buffer?174// Is index == 0?175// (The index field is typed as size_t.)176const Register Rbuffer = tmp1, Rindex = tmp2;177178__ ld(Rindex, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()), R16_thread);179__ cmpdi(CCR0, Rindex, 0);180__ beq(CCR0, runtime); // If index == 0, goto runtime.181__ ld(Rbuffer, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()), R16_thread);182183__ addi(Rindex, Rindex, -wordSize); // Decrement index.184__ std(Rindex, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()), R16_thread);185186// Record the previous value.187__ stdx(pre_val, Rbuffer, Rindex);188__ b(filtered);189190__ bind(runtime);191192// Determine necessary runtime invocation preservation measures193const bool needs_frame = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR;194assert(preservation_level <= MacroAssembler::PRESERVATION_FRAME_LR,195"g1_write_barrier_pre doesn't support preservation levels higher than PRESERVATION_FRAME_LR");196197// May need to preserve LR. Also needed if current frame is not compatible with C calling convention.198if (needs_frame) {199__ save_LR_CR(tmp1);200__ push_frame_reg_args(0, tmp2);201}202203if (pre_val->is_volatile() && preloaded) { __ mr(nv_save, pre_val); } // Save pre_val across C call if it was preloaded.204__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, R16_thread);205if (pre_val->is_volatile() && preloaded) { __ mr(pre_val, nv_save); } // restore206207if (needs_frame) {208__ pop_frame();209__ restore_LR_CR(tmp1);210}211212__ bind(filtered);213}214215void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, DecoratorSet decorators,216Register store_addr, Register new_val,217Register tmp1, Register tmp2, Register tmp3,218MacroAssembler::PreservationLevel preservation_level) {219bool not_null = (decorators & IS_NOT_NULL) != 0;220221Label runtime, filtered;222assert_different_registers(store_addr, new_val, tmp1, tmp2);223224CardTableBarrierSet* ct = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());225226// Does store cross heap regions?227__ xorr(tmp1, store_addr, new_val);228__ srdi_(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes);229__ beq(CCR0, filtered);230231// Crosses regions, storing NULL?232if (not_null) {233#ifdef ASSERT234__ cmpdi(CCR0, new_val, 0);235__ asm_assert_ne("null oop not allowed (G1 post)"); // Checked by caller.236#endif237} else {238__ cmpdi(CCR0, new_val, 0);239__ beq(CCR0, filtered);240}241242// Storing region crossing non-NULL, is card already dirty?243const Register Rcard_addr = tmp1;244Register Rbase = tmp2;245__ load_const_optimized(Rbase, (address)(ct->card_table()->byte_map_base()), /*temp*/ tmp3);246247__ srdi(Rcard_addr, store_addr, CardTable::card_shift);248249// Get the address of the card.250__ lbzx(/*card value*/ tmp3, Rbase, Rcard_addr);251__ cmpwi(CCR0, tmp3, (int)G1CardTable::g1_young_card_val());252__ beq(CCR0, filtered);253254__ membar(Assembler::StoreLoad);255__ lbzx(/*card value*/ tmp3, Rbase, Rcard_addr); // Reload after membar.256__ cmpwi(CCR0, tmp3 /* card value */, (int)G1CardTable::dirty_card_val());257__ beq(CCR0, filtered);258259// Storing a region crossing, non-NULL oop, card is clean.260// Dirty card and log.261__ li(tmp3, (int)G1CardTable::dirty_card_val());262//release(); // G1: oops are allowed to get visible after dirty marking.263__ stbx(tmp3, Rbase, Rcard_addr);264265__ add(Rcard_addr, Rbase, Rcard_addr); // This is the address which needs to get enqueued.266Rbase = noreg; // end of lifetime267268const Register Rqueue_index = tmp2,269Rqueue_buf = tmp3;270__ ld(Rqueue_index, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()), R16_thread);271__ cmpdi(CCR0, Rqueue_index, 0);272__ beq(CCR0, runtime); // index == 0 then jump to runtime273__ ld(Rqueue_buf, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()), R16_thread);274275__ addi(Rqueue_index, Rqueue_index, -wordSize); // decrement index276__ std(Rqueue_index, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()), R16_thread);277278__ stdx(Rcard_addr, Rqueue_buf, Rqueue_index); // store card279__ b(filtered);280281__ bind(runtime);282283assert(preservation_level == MacroAssembler::PRESERVATION_NONE,284"g1_write_barrier_post doesn't support preservation levels higher than PRESERVATION_NONE");285286// Save the live input values.287__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), Rcard_addr, R16_thread);288289__ bind(filtered);290}291292void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,293Register base, RegisterOrConstant ind_or_offs, Register val,294Register tmp1, Register tmp2, Register tmp3,295MacroAssembler::PreservationLevel preservation_level) {296bool is_array = (decorators & IS_ARRAY) != 0;297bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;298bool precise = is_array || on_anonymous;299// Load and record the previous value.300g1_write_barrier_pre(masm, decorators,301base, ind_or_offs,302tmp1, tmp2, tmp3,303preservation_level);304305BarrierSetAssembler::store_at(masm, decorators,306type, base, ind_or_offs, val,307tmp1, tmp2, tmp3,308preservation_level);309310// No need for post barrier if storing NULL311if (val != noreg) {312if (precise) {313if (ind_or_offs.is_constant()) {314__ add_const_optimized(base, base, ind_or_offs.as_constant(), tmp1);315} else {316__ add(base, ind_or_offs.as_register(), base);317}318}319g1_write_barrier_post(masm, decorators,320base, val,321tmp1, tmp2, tmp3,322preservation_level);323}324}325326void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,327Register base, RegisterOrConstant ind_or_offs, Register dst,328Register tmp1, Register tmp2,329MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null) {330bool on_oop = is_reference_type(type);331bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;332bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;333bool on_reference = on_weak || on_phantom;334Label done;335if (on_oop && on_reference && L_handle_null == NULL) { L_handle_null = &done; }336// Load the value of the referent field.337ModRefBarrierSetAssembler::load_at(masm, decorators, type,338base, ind_or_offs, dst,339tmp1, tmp2,340preservation_level, L_handle_null);341if (on_oop && on_reference) {342// Generate the G1 pre-barrier code to log the value of343// the referent field in an SATB buffer. Note with344// these parameters the pre-barrier does not generate345// the load of the previous value346// We only reach here if value is not null.347g1_write_barrier_pre(masm, decorators | IS_NOT_NULL,348noreg /* obj */, (intptr_t)0, dst /* pre_val */,349tmp1, tmp2,350preservation_level);351}352__ bind(done);353}354355void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value,356Register tmp1, Register tmp2,357MacroAssembler::PreservationLevel preservation_level) {358Label done, not_weak;359__ cmpdi(CCR0, value, 0);360__ beq(CCR0, done); // Use NULL as-is.361362__ clrrdi(tmp1, value, JNIHandles::weak_tag_size);363__ andi_(tmp2, value, JNIHandles::weak_tag_mask);364__ ld(value, 0, tmp1); // Resolve (untagged) jobject.365366__ beq(CCR0, not_weak); // Test for jweak tag.367__ verify_oop(value, FILE_AND_LINE);368g1_write_barrier_pre(masm, IN_NATIVE | ON_PHANTOM_OOP_REF,369noreg, noreg, value,370tmp1, tmp2,371preservation_level);372__ bind(not_weak);373__ verify_oop(value, FILE_AND_LINE);374__ bind(done);375}376377#ifdef COMPILER1378379#undef __380#define __ ce->masm()->381382void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {383G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();384// At this point we know that marking is in progress.385// If do_load() is true then we have to emit the386// load of the previous value; otherwise it has already387// been loaded into _pre_val.388389__ bind(*stub->entry());390391assert(stub->pre_val()->is_register(), "Precondition.");392Register pre_val_reg = stub->pre_val()->as_register();393394if (stub->do_load()) {395ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);396}397398__ cmpdi(CCR0, pre_val_reg, 0);399__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());400401address c_code = bs->pre_barrier_c1_runtime_code_blob()->code_begin();402//__ load_const_optimized(R0, c_code);403__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(c_code));404__ std(pre_val_reg, -8, R1_SP); // Pass pre_val on stack.405__ mtctr(R0);406__ bctrl();407__ b(*stub->continuation());408}409410void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {411G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();412__ bind(*stub->entry());413414assert(stub->addr()->is_register(), "Precondition.");415assert(stub->new_val()->is_register(), "Precondition.");416Register addr_reg = stub->addr()->as_pointer_register();417Register new_val_reg = stub->new_val()->as_register();418419__ cmpdi(CCR0, new_val_reg, 0);420__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());421422address c_code = bs->post_barrier_c1_runtime_code_blob()->code_begin();423//__ load_const_optimized(R0, c_code);424__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(c_code));425__ mtctr(R0);426__ mr(R0, addr_reg); // Pass addr in R0.427__ bctrl();428__ b(*stub->continuation());429}430431#undef __432#define __ sasm->433434void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {435BarrierSet* bs = BarrierSet::barrier_set();436437__ set_info("g1_pre_barrier_slow_id", false);438439// Using stack slots: pre_val (pre-pushed), spill tmp, spill tmp2.440const int stack_slots = 3;441Register pre_val = R0; // previous value of memory442Register tmp = R14;443Register tmp2 = R15;444445Label refill, restart, marking_not_active;446int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());447int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());448int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());449450// Spill451__ std(tmp, -16, R1_SP);452__ std(tmp2, -24, R1_SP);453454// Is marking still active?455if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {456__ lwz(tmp, satb_q_active_byte_offset, R16_thread);457} else {458assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");459__ lbz(tmp, satb_q_active_byte_offset, R16_thread);460}461__ cmpdi(CCR0, tmp, 0);462__ beq(CCR0, marking_not_active);463464__ bind(restart);465// Load the index into the SATB buffer. SATBMarkQueue::_index is a466// size_t so ld_ptr is appropriate.467__ ld(tmp, satb_q_index_byte_offset, R16_thread);468469// index == 0?470__ cmpdi(CCR0, tmp, 0);471__ beq(CCR0, refill);472473__ ld(tmp2, satb_q_buf_byte_offset, R16_thread);474__ ld(pre_val, -8, R1_SP); // Load from stack.475__ addi(tmp, tmp, -oopSize);476477__ std(tmp, satb_q_index_byte_offset, R16_thread);478__ stdx(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card>479480__ bind(marking_not_active);481// Restore temp registers and return-from-leaf.482__ ld(tmp2, -24, R1_SP);483__ ld(tmp, -16, R1_SP);484__ blr();485486__ bind(refill);487const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;488__ save_volatile_gprs(R1_SP, -nbytes_save); // except R0489__ mflr(R0);490__ std(R0, _abi0(lr), R1_SP);491__ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call492__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1SATBMarkQueueSet::handle_zero_index_for_thread), R16_thread);493__ pop_frame();494__ ld(R0, _abi0(lr), R1_SP);495__ mtlr(R0);496__ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0497__ b(restart);498}499500void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {501G1BarrierSet* bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());502503__ set_info("g1_post_barrier_slow_id", false);504505// Using stack slots: spill addr, spill tmp2506const int stack_slots = 2;507Register tmp = R0;508Register addr = R14;509Register tmp2 = R15;510CardTable::CardValue* byte_map_base = bs->card_table()->byte_map_base();511512Label restart, refill, ret;513514// Spill515__ std(addr, -8, R1_SP);516__ std(tmp2, -16, R1_SP);517518__ srdi(addr, R0, CardTable::card_shift); // Addr is passed in R0.519__ load_const_optimized(/*cardtable*/ tmp2, byte_map_base, tmp);520__ add(addr, tmp2, addr);521__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]522523// Return if young card.524__ cmpwi(CCR0, tmp, G1CardTable::g1_young_card_val());525__ beq(CCR0, ret);526527// Return if sequential consistent value is already dirty.528__ membar(Assembler::StoreLoad);529__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]530531__ cmpwi(CCR0, tmp, G1CardTable::dirty_card_val());532__ beq(CCR0, ret);533534// Not dirty.535536// First, dirty it.537__ li(tmp, G1CardTable::dirty_card_val());538__ stb(tmp, 0, addr);539540int dirty_card_q_index_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());541int dirty_card_q_buf_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());542543__ bind(restart);544545// Get the index into the update buffer. G1DirtyCardQueue::_index is546// a size_t so ld_ptr is appropriate here.547__ ld(tmp2, dirty_card_q_index_byte_offset, R16_thread);548549// index == 0?550__ cmpdi(CCR0, tmp2, 0);551__ beq(CCR0, refill);552553__ ld(tmp, dirty_card_q_buf_byte_offset, R16_thread);554__ addi(tmp2, tmp2, -oopSize);555556__ std(tmp2, dirty_card_q_index_byte_offset, R16_thread);557__ add(tmp2, tmp, tmp2);558__ std(addr, 0, tmp2); // [_buf + index] := <address_of_card>559560// Restore temp registers and return-from-leaf.561__ bind(ret);562__ ld(tmp2, -16, R1_SP);563__ ld(addr, -8, R1_SP);564__ blr();565566__ bind(refill);567const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;568__ save_volatile_gprs(R1_SP, -nbytes_save); // except R0569__ mflr(R0);570__ std(R0, _abi0(lr), R1_SP);571__ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call572__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1DirtyCardQueueSet::handle_zero_index_for_thread), R16_thread);573__ pop_frame();574__ ld(R0, _abi0(lr), R1_SP);575__ mtlr(R0);576__ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0577__ b(restart);578}579580#undef __581582#endif // COMPILER1583584585