Path: blob/master/src/hotspot/cpu/s390/macroAssembler_s390.cpp
40930 views
/*1* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.2* Copyright (c) 2016, 2019 SAP SE. All rights reserved.3* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.4*5* This code is free software; you can redistribute it and/or modify it6* under the terms of the GNU General Public License version 2 only, as7* published by the Free Software Foundation.8*9* This code is distributed in the hope that it will be useful, but WITHOUT10* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License12* version 2 for more details (a copy is included in the LICENSE file that13* accompanied this code).14*15* You should have received a copy of the GNU General Public License version16* 2 along with this work; if not, write to the Free Software Foundation,17* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.18*19* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA20* or visit www.oracle.com if you need additional information or have any21* questions.22*23*/2425#include "precompiled.hpp"26#include "asm/codeBuffer.hpp"27#include "asm/macroAssembler.inline.hpp"28#include "compiler/disassembler.hpp"29#include "gc/shared/barrierSet.hpp"30#include "gc/shared/barrierSetAssembler.hpp"31#include "gc/shared/collectedHeap.inline.hpp"32#include "interpreter/interpreter.hpp"33#include "gc/shared/cardTableBarrierSet.hpp"34#include "memory/resourceArea.hpp"35#include "memory/universe.hpp"36#include "oops/accessDecorators.hpp"37#include "oops/compressedOops.inline.hpp"38#include "oops/klass.inline.hpp"39#include "prims/methodHandles.hpp"40#include "registerSaver_s390.hpp"41#include "runtime/biasedLocking.hpp"42#include "runtime/icache.hpp"43#include "runtime/interfaceSupport.inline.hpp"44#include "runtime/objectMonitor.hpp"45#include "runtime/os.hpp"46#include "runtime/safepoint.hpp"47#include "runtime/safepointMechanism.hpp"48#include "runtime/sharedRuntime.hpp"49#include "runtime/stubRoutines.hpp"50#include "utilities/events.hpp"51#include "utilities/macros.hpp"52#include "utilities/powerOfTwo.hpp"5354#include <ucontext.h>5556#define BLOCK_COMMENT(str) block_comment(str)57#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")5859// Move 32-bit register if destination and source are different.60void MacroAssembler::lr_if_needed(Register rd, Register rs) {61if (rs != rd) { z_lr(rd, rs); }62}6364// Move register if destination and source are different.65void MacroAssembler::lgr_if_needed(Register rd, Register rs) {66if (rs != rd) { z_lgr(rd, rs); }67}6869// Zero-extend 32-bit register into 64-bit register if destination and source are different.70void MacroAssembler::llgfr_if_needed(Register rd, Register rs) {71if (rs != rd) { z_llgfr(rd, rs); }72}7374// Move float register if destination and source are different.75void MacroAssembler::ldr_if_needed(FloatRegister rd, FloatRegister rs) {76if (rs != rd) { z_ldr(rd, rs); }77}7879// Move integer register if destination and source are different.80// It is assumed that shorter-than-int types are already81// appropriately sign-extended.82void MacroAssembler::move_reg_if_needed(Register dst, BasicType dst_type, Register src,83BasicType src_type) {84assert((dst_type != T_FLOAT) && (dst_type != T_DOUBLE), "use move_freg for float types");85assert((src_type != T_FLOAT) && (src_type != T_DOUBLE), "use move_freg for float types");8687if (dst_type == src_type) {88lgr_if_needed(dst, src); // Just move all 64 bits.89return;90}9192switch (dst_type) {93// Do not support these types for now.94// case T_BOOLEAN:95case T_BYTE: // signed byte96switch (src_type) {97case T_INT:98z_lgbr(dst, src);99break;100default:101ShouldNotReachHere();102}103return;104105case T_CHAR:106case T_SHORT:107switch (src_type) {108case T_INT:109if (dst_type == T_CHAR) {110z_llghr(dst, src);111} else {112z_lghr(dst, src);113}114break;115default:116ShouldNotReachHere();117}118return;119120case T_INT:121switch (src_type) {122case T_BOOLEAN:123case T_BYTE:124case T_CHAR:125case T_SHORT:126case T_INT:127case T_LONG:128case T_OBJECT:129case T_ARRAY:130case T_VOID:131case T_ADDRESS:132lr_if_needed(dst, src);133// llgfr_if_needed(dst, src); // zero-extend (in case we need to find a bug).134return;135136default:137assert(false, "non-integer src type");138return;139}140case T_LONG:141switch (src_type) {142case T_BOOLEAN:143case T_BYTE:144case T_CHAR:145case T_SHORT:146case T_INT:147z_lgfr(dst, src); // sign extension148return;149150case T_LONG:151case T_OBJECT:152case T_ARRAY:153case T_VOID:154case T_ADDRESS:155lgr_if_needed(dst, src);156return;157158default:159assert(false, "non-integer src type");160return;161}162return;163case T_OBJECT:164case T_ARRAY:165case T_VOID:166case T_ADDRESS:167switch (src_type) {168// These types don't make sense to be converted to pointers:169// case T_BOOLEAN:170// case T_BYTE:171// case T_CHAR:172// case T_SHORT:173174case T_INT:175z_llgfr(dst, src); // zero extension176return;177178case T_LONG:179case T_OBJECT:180case T_ARRAY:181case T_VOID:182case T_ADDRESS:183lgr_if_needed(dst, src);184return;185186default:187assert(false, "non-integer src type");188return;189}190return;191default:192assert(false, "non-integer dst type");193return;194}195}196197// Move float register if destination and source are different.198void MacroAssembler::move_freg_if_needed(FloatRegister dst, BasicType dst_type,199FloatRegister src, BasicType src_type) {200assert((dst_type == T_FLOAT) || (dst_type == T_DOUBLE), "use move_reg for int types");201assert((src_type == T_FLOAT) || (src_type == T_DOUBLE), "use move_reg for int types");202if (dst_type == src_type) {203ldr_if_needed(dst, src); // Just move all 64 bits.204} else {205switch (dst_type) {206case T_FLOAT:207assert(src_type == T_DOUBLE, "invalid float type combination");208z_ledbr(dst, src);209return;210case T_DOUBLE:211assert(src_type == T_FLOAT, "invalid float type combination");212z_ldebr(dst, src);213return;214default:215assert(false, "non-float dst type");216return;217}218}219}220221// Optimized emitter for reg to mem operations.222// Uses modern instructions if running on modern hardware, classic instructions223// otherwise. Prefers (usually shorter) classic instructions if applicable.224// Data register (reg) cannot be used as work register.225//226// Don't rely on register locking, instead pass a scratch register (Z_R0 by default).227// CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs!228void MacroAssembler::freg2mem_opt(FloatRegister reg,229int64_t disp,230Register index,231Register base,232void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),233void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),234Register scratch) {235index = (index == noreg) ? Z_R0 : index;236if (Displacement::is_shortDisp(disp)) {237(this->*classic)(reg, disp, index, base);238} else {239if (Displacement::is_validDisp(disp)) {240(this->*modern)(reg, disp, index, base);241} else {242if (scratch != Z_R0 && scratch != Z_R1) {243(this->*modern)(reg, disp, index, base); // Will fail with disp out of range.244} else {245if (scratch != Z_R0) { // scratch == Z_R1246if ((scratch == index) || (index == base)) {247(this->*modern)(reg, disp, index, base); // Will fail with disp out of range.248} else {249add2reg(scratch, disp, base);250(this->*classic)(reg, 0, index, scratch);251if (base == scratch) {252add2reg(base, -disp); // Restore base.253}254}255} else { // scratch == Z_R0256z_lgr(scratch, base);257add2reg(base, disp);258(this->*classic)(reg, 0, index, base);259z_lgr(base, scratch); // Restore base.260}261}262}263}264}265266void MacroAssembler::freg2mem_opt(FloatRegister reg, const Address &a, bool is_double) {267if (is_double) {268freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stdy), CLASSIC_FFUN(z_std));269} else {270freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stey), CLASSIC_FFUN(z_ste));271}272}273274// Optimized emitter for mem to reg operations.275// Uses modern instructions if running on modern hardware, classic instructions276// otherwise. Prefers (usually shorter) classic instructions if applicable.277// data register (reg) cannot be used as work register.278//279// Don't rely on register locking, instead pass a scratch register (Z_R0 by default).280// CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs!281void MacroAssembler::mem2freg_opt(FloatRegister reg,282int64_t disp,283Register index,284Register base,285void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),286void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),287Register scratch) {288index = (index == noreg) ? Z_R0 : index;289if (Displacement::is_shortDisp(disp)) {290(this->*classic)(reg, disp, index, base);291} else {292if (Displacement::is_validDisp(disp)) {293(this->*modern)(reg, disp, index, base);294} else {295if (scratch != Z_R0 && scratch != Z_R1) {296(this->*modern)(reg, disp, index, base); // Will fail with disp out of range.297} else {298if (scratch != Z_R0) { // scratch == Z_R1299if ((scratch == index) || (index == base)) {300(this->*modern)(reg, disp, index, base); // Will fail with disp out of range.301} else {302add2reg(scratch, disp, base);303(this->*classic)(reg, 0, index, scratch);304if (base == scratch) {305add2reg(base, -disp); // Restore base.306}307}308} else { // scratch == Z_R0309z_lgr(scratch, base);310add2reg(base, disp);311(this->*classic)(reg, 0, index, base);312z_lgr(base, scratch); // Restore base.313}314}315}316}317}318319void MacroAssembler::mem2freg_opt(FloatRegister reg, const Address &a, bool is_double) {320if (is_double) {321mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ldy), CLASSIC_FFUN(z_ld));322} else {323mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ley), CLASSIC_FFUN(z_le));324}325}326327// Optimized emitter for reg to mem operations.328// Uses modern instructions if running on modern hardware, classic instructions329// otherwise. Prefers (usually shorter) classic instructions if applicable.330// Data register (reg) cannot be used as work register.331//332// Don't rely on register locking, instead pass a scratch register333// (Z_R0 by default)334// CAUTION! passing registers >= Z_R2 may produce bad results on old CPUs!335void MacroAssembler::reg2mem_opt(Register reg,336int64_t disp,337Register index,338Register base,339void (MacroAssembler::*modern) (Register, int64_t, Register, Register),340void (MacroAssembler::*classic)(Register, int64_t, Register, Register),341Register scratch) {342index = (index == noreg) ? Z_R0 : index;343if (Displacement::is_shortDisp(disp)) {344(this->*classic)(reg, disp, index, base);345} else {346if (Displacement::is_validDisp(disp)) {347(this->*modern)(reg, disp, index, base);348} else {349if (scratch != Z_R0 && scratch != Z_R1) {350(this->*modern)(reg, disp, index, base); // Will fail with disp out of range.351} else {352if (scratch != Z_R0) { // scratch == Z_R1353if ((scratch == index) || (index == base)) {354(this->*modern)(reg, disp, index, base); // Will fail with disp out of range.355} else {356add2reg(scratch, disp, base);357(this->*classic)(reg, 0, index, scratch);358if (base == scratch) {359add2reg(base, -disp); // Restore base.360}361}362} else { // scratch == Z_R0363if ((scratch == reg) || (scratch == base) || (reg == base)) {364(this->*modern)(reg, disp, index, base); // Will fail with disp out of range.365} else {366z_lgr(scratch, base);367add2reg(base, disp);368(this->*classic)(reg, 0, index, base);369z_lgr(base, scratch); // Restore base.370}371}372}373}374}375}376377int MacroAssembler::reg2mem_opt(Register reg, const Address &a, bool is_double) {378int store_offset = offset();379if (is_double) {380reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_stg), CLASSIC_IFUN(z_stg));381} else {382reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_sty), CLASSIC_IFUN(z_st));383}384return store_offset;385}386387// Optimized emitter for mem to reg operations.388// Uses modern instructions if running on modern hardware, classic instructions389// otherwise. Prefers (usually shorter) classic instructions if applicable.390// Data register (reg) will be used as work register where possible.391void MacroAssembler::mem2reg_opt(Register reg,392int64_t disp,393Register index,394Register base,395void (MacroAssembler::*modern) (Register, int64_t, Register, Register),396void (MacroAssembler::*classic)(Register, int64_t, Register, Register)) {397index = (index == noreg) ? Z_R0 : index;398if (Displacement::is_shortDisp(disp)) {399(this->*classic)(reg, disp, index, base);400} else {401if (Displacement::is_validDisp(disp)) {402(this->*modern)(reg, disp, index, base);403} else {404if ((reg == index) && (reg == base)) {405z_sllg(reg, reg, 1);406add2reg(reg, disp);407(this->*classic)(reg, 0, noreg, reg);408} else if ((reg == index) && (reg != Z_R0)) {409add2reg(reg, disp);410(this->*classic)(reg, 0, reg, base);411} else if (reg == base) {412add2reg(reg, disp);413(this->*classic)(reg, 0, index, reg);414} else if (reg != Z_R0) {415add2reg(reg, disp, base);416(this->*classic)(reg, 0, index, reg);417} else { // reg == Z_R0 && reg != base here418add2reg(base, disp);419(this->*classic)(reg, 0, index, base);420add2reg(base, -disp);421}422}423}424}425426void MacroAssembler::mem2reg_opt(Register reg, const Address &a, bool is_double) {427if (is_double) {428z_lg(reg, a);429} else {430mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_ly), CLASSIC_IFUN(z_l));431}432}433434void MacroAssembler::mem2reg_signed_opt(Register reg, const Address &a) {435mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_lgf), CLASSIC_IFUN(z_lgf));436}437438void MacroAssembler::and_imm(Register r, long mask,439Register tmp /* = Z_R0 */,440bool wide /* = false */) {441assert(wide || Immediate::is_simm32(mask), "mask value too large");442443if (!wide) {444z_nilf(r, mask);445return;446}447448assert(r != tmp, " need a different temporary register !");449load_const_optimized(tmp, mask);450z_ngr(r, tmp);451}452453// Calculate the 1's complement.454// Note: The condition code is neither preserved nor correctly set by this code!!!455// Note: (wide == false) does not protect the high order half of the target register456// from alteration. It only serves as optimization hint for 32-bit results.457void MacroAssembler::not_(Register r1, Register r2, bool wide) {458459if ((r2 == noreg) || (r2 == r1)) { // Calc 1's complement in place.460z_xilf(r1, -1);461if (wide) {462z_xihf(r1, -1);463}464} else { // Distinct src and dst registers.465load_const_optimized(r1, -1);466z_xgr(r1, r2);467}468}469470unsigned long MacroAssembler::create_mask(int lBitPos, int rBitPos) {471assert(lBitPos >= 0, "zero is leftmost bit position");472assert(rBitPos <= 63, "63 is rightmost bit position");473assert(lBitPos <= rBitPos, "inverted selection interval");474return (lBitPos == 0 ? (unsigned long)(-1L) : ((1UL<<(63-lBitPos+1))-1)) & (~((1UL<<(63-rBitPos))-1));475}476477// Helper function for the "Rotate_then_<logicalOP>" emitters.478// Rotate src, then mask register contents such that only bits in range survive.479// For oneBits == false, all bits not in range are set to 0. Useful for deleting all bits outside range.480// For oneBits == true, all bits not in range are set to 1. Useful for preserving all bits outside range.481// The caller must ensure that the selected range only contains bits with defined value.482void MacroAssembler::rotate_then_mask(Register dst, Register src, int lBitPos, int rBitPos,483int nRotate, bool src32bit, bool dst32bit, bool oneBits) {484assert(!(dst32bit && lBitPos < 32), "selection interval out of range for int destination");485bool sll4rll = (nRotate >= 0) && (nRotate <= (63-rBitPos)); // Substitute SLL(G) for RLL(G).486bool srl4rll = (nRotate < 0) && (-nRotate <= lBitPos); // Substitute SRL(G) for RLL(G).487// Pre-determine which parts of dst will be zero after shift/rotate.488bool llZero = sll4rll && (nRotate >= 16);489bool lhZero = (sll4rll && (nRotate >= 32)) || (srl4rll && (nRotate <= -48));490bool lfZero = llZero && lhZero;491bool hlZero = (sll4rll && (nRotate >= 48)) || (srl4rll && (nRotate <= -32));492bool hhZero = (srl4rll && (nRotate <= -16));493bool hfZero = hlZero && hhZero;494495// rotate then mask src operand.496// if oneBits == true, all bits outside selected range are 1s.497// if oneBits == false, all bits outside selected range are 0s.498if (src32bit) { // There might be garbage in the upper 32 bits which will get masked away.499if (dst32bit) {500z_rll(dst, src, nRotate); // Copy and rotate, upper half of reg remains undisturbed.501} else {502if (sll4rll) { z_sllg(dst, src, nRotate); }503else if (srl4rll) { z_srlg(dst, src, -nRotate); }504else { z_rllg(dst, src, nRotate); }505}506} else {507if (sll4rll) { z_sllg(dst, src, nRotate); }508else if (srl4rll) { z_srlg(dst, src, -nRotate); }509else { z_rllg(dst, src, nRotate); }510}511512unsigned long range_mask = create_mask(lBitPos, rBitPos);513unsigned int range_mask_h = (unsigned int)(range_mask >> 32);514unsigned int range_mask_l = (unsigned int)range_mask;515unsigned short range_mask_hh = (unsigned short)(range_mask >> 48);516unsigned short range_mask_hl = (unsigned short)(range_mask >> 32);517unsigned short range_mask_lh = (unsigned short)(range_mask >> 16);518unsigned short range_mask_ll = (unsigned short)range_mask;519// Works for z9 and newer H/W.520if (oneBits) {521if ((~range_mask_l) != 0) { z_oilf(dst, ~range_mask_l); } // All bits outside range become 1s.522if (((~range_mask_h) != 0) && !dst32bit) { z_oihf(dst, ~range_mask_h); }523} else {524// All bits outside range become 0s525if (((~range_mask_l) != 0) && !lfZero) {526z_nilf(dst, range_mask_l);527}528if (((~range_mask_h) != 0) && !dst32bit && !hfZero) {529z_nihf(dst, range_mask_h);530}531}532}533534// Rotate src, then insert selected range from rotated src into dst.535// Clear dst before, if requested.536void MacroAssembler::rotate_then_insert(Register dst, Register src, int lBitPos, int rBitPos,537int nRotate, bool clear_dst) {538// This version does not depend on src being zero-extended int2long.539nRotate &= 0x003f; // For risbg, pretend it's an unsigned value.540z_risbg(dst, src, lBitPos, rBitPos, nRotate, clear_dst); // Rotate, then insert selected, clear the rest.541}542543// Rotate src, then and selected range from rotated src into dst.544// Set condition code only if so requested. Otherwise it is unpredictable.545// See performance note in macroAssembler_s390.hpp for important information.546void MacroAssembler::rotate_then_and(Register dst, Register src, int lBitPos, int rBitPos,547int nRotate, bool test_only) {548guarantee(!test_only, "Emitter not fit for test_only instruction variant.");549// This version does not depend on src being zero-extended int2long.550nRotate &= 0x003f; // For risbg, pretend it's an unsigned value.551z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.552}553554// Rotate src, then or selected range from rotated src into dst.555// Set condition code only if so requested. Otherwise it is unpredictable.556// See performance note in macroAssembler_s390.hpp for important information.557void MacroAssembler::rotate_then_or(Register dst, Register src, int lBitPos, int rBitPos,558int nRotate, bool test_only) {559guarantee(!test_only, "Emitter not fit for test_only instruction variant.");560// This version does not depend on src being zero-extended int2long.561nRotate &= 0x003f; // For risbg, pretend it's an unsigned value.562z_rosbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.563}564565// Rotate src, then xor selected range from rotated src into dst.566// Set condition code only if so requested. Otherwise it is unpredictable.567// See performance note in macroAssembler_s390.hpp for important information.568void MacroAssembler::rotate_then_xor(Register dst, Register src, int lBitPos, int rBitPos,569int nRotate, bool test_only) {570guarantee(!test_only, "Emitter not fit for test_only instruction variant.");571// This version does not depend on src being zero-extended int2long.572nRotate &= 0x003f; // For risbg, pretend it's an unsigned value.573z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.574}575576void MacroAssembler::add64(Register r1, RegisterOrConstant inc) {577if (inc.is_register()) {578z_agr(r1, inc.as_register());579} else { // constant580intptr_t imm = inc.as_constant();581add2reg(r1, imm);582}583}584// Helper function to multiply the 64bit contents of a register by a 16bit constant.585// The optimization tries to avoid the mghi instruction, since it uses the FPU for586// calculation and is thus rather slow.587//588// There is no handling for special cases, e.g. cval==0 or cval==1.589//590// Returns len of generated code block.591unsigned int MacroAssembler::mul_reg64_const16(Register rval, Register work, int cval) {592int block_start = offset();593594bool sign_flip = cval < 0;595cval = sign_flip ? -cval : cval;596597BLOCK_COMMENT("Reg64*Con16 {");598599int bit1 = cval & -cval;600if (bit1 == cval) {601z_sllg(rval, rval, exact_log2(bit1));602if (sign_flip) { z_lcgr(rval, rval); }603} else {604int bit2 = (cval-bit1) & -(cval-bit1);605if ((bit1+bit2) == cval) {606z_sllg(work, rval, exact_log2(bit1));607z_sllg(rval, rval, exact_log2(bit2));608z_agr(rval, work);609if (sign_flip) { z_lcgr(rval, rval); }610} else {611if (sign_flip) { z_mghi(rval, -cval); }612else { z_mghi(rval, cval); }613}614}615BLOCK_COMMENT("} Reg64*Con16");616617int block_end = offset();618return block_end - block_start;619}620621// Generic operation r1 := r2 + imm.622//623// Should produce the best code for each supported CPU version.624// r2 == noreg yields r1 := r1 + imm625// imm == 0 emits either no instruction or r1 := r2 !626// NOTES: 1) Don't use this function where fixed sized627// instruction sequences are required!!!628// 2) Don't use this function if condition code629// setting is required!630// 3) Despite being declared as int64_t, the parameter imm631// must be a simm_32 value (= signed 32-bit integer).632void MacroAssembler::add2reg(Register r1, int64_t imm, Register r2) {633assert(Immediate::is_simm32(imm), "probably an implicit conversion went wrong");634635if (r2 == noreg) { r2 = r1; }636637// Handle special case imm == 0.638if (imm == 0) {639lgr_if_needed(r1, r2);640// Nothing else to do.641return;642}643644if (!PreferLAoverADD || (r2 == Z_R0)) {645bool distinctOpnds = VM_Version::has_DistinctOpnds();646647// Can we encode imm in 16 bits signed?648if (Immediate::is_simm16(imm)) {649if (r1 == r2) {650z_aghi(r1, imm);651return;652}653if (distinctOpnds) {654z_aghik(r1, r2, imm);655return;656}657z_lgr(r1, r2);658z_aghi(r1, imm);659return;660}661} else {662// Can we encode imm in 12 bits unsigned?663if (Displacement::is_shortDisp(imm)) {664z_la(r1, imm, r2);665return;666}667// Can we encode imm in 20 bits signed?668if (Displacement::is_validDisp(imm)) {669// Always use LAY instruction, so we don't need the tmp register.670z_lay(r1, imm, r2);671return;672}673674}675676// Can handle it (all possible values) with long immediates.677lgr_if_needed(r1, r2);678z_agfi(r1, imm);679}680681// Generic operation r := b + x + d682//683// Addition of several operands with address generation semantics - sort of:684// - no restriction on the registers. Any register will do for any operand.685// - x == noreg: operand will be disregarded.686// - b == noreg: will use (contents of) result reg as operand (r := r + d).687// - x == Z_R0: just disregard688// - b == Z_R0: use as operand. This is not address generation semantics!!!689//690// The same restrictions as on add2reg() are valid!!!691void MacroAssembler::add2reg_with_index(Register r, int64_t d, Register x, Register b) {692assert(Immediate::is_simm32(d), "probably an implicit conversion went wrong");693694if (x == noreg) { x = Z_R0; }695if (b == noreg) { b = r; }696697// Handle special case x == R0.698if (x == Z_R0) {699// Can simply add the immediate value to the base register.700add2reg(r, d, b);701return;702}703704if (!PreferLAoverADD || (b == Z_R0)) {705bool distinctOpnds = VM_Version::has_DistinctOpnds();706// Handle special case d == 0.707if (d == 0) {708if (b == x) { z_sllg(r, b, 1); return; }709if (r == x) { z_agr(r, b); return; }710if (r == b) { z_agr(r, x); return; }711if (distinctOpnds) { z_agrk(r, x, b); return; }712z_lgr(r, b);713z_agr(r, x);714} else {715if (x == b) { z_sllg(r, x, 1); }716else if (r == x) { z_agr(r, b); }717else if (r == b) { z_agr(r, x); }718else if (distinctOpnds) { z_agrk(r, x, b); }719else {720z_lgr(r, b);721z_agr(r, x);722}723add2reg(r, d);724}725} else {726// Can we encode imm in 12 bits unsigned?727if (Displacement::is_shortDisp(d)) {728z_la(r, d, x, b);729return;730}731// Can we encode imm in 20 bits signed?732if (Displacement::is_validDisp(d)) {733z_lay(r, d, x, b);734return;735}736z_la(r, 0, x, b);737add2reg(r, d);738}739}740741// Generic emitter (32bit) for direct memory increment.742// For optimal code, do not specify Z_R0 as temp register.743void MacroAssembler::add2mem_32(const Address &a, int64_t imm, Register tmp) {744if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) {745z_asi(a, imm);746} else {747z_lgf(tmp, a);748add2reg(tmp, imm);749z_st(tmp, a);750}751}752753void MacroAssembler::add2mem_64(const Address &a, int64_t imm, Register tmp) {754if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) {755z_agsi(a, imm);756} else {757z_lg(tmp, a);758add2reg(tmp, imm);759z_stg(tmp, a);760}761}762763void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) {764switch (size_in_bytes) {765case 8: z_lg(dst, src); break;766case 4: is_signed ? z_lgf(dst, src) : z_llgf(dst, src); break;767case 2: is_signed ? z_lgh(dst, src) : z_llgh(dst, src); break;768case 1: is_signed ? z_lgb(dst, src) : z_llgc(dst, src); break;769default: ShouldNotReachHere();770}771}772773void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) {774switch (size_in_bytes) {775case 8: z_stg(src, dst); break;776case 4: z_st(src, dst); break;777case 2: z_sth(src, dst); break;778case 1: z_stc(src, dst); break;779default: ShouldNotReachHere();780}781}782783// Split a si20 offset (20bit, signed) into an ui12 offset (12bit, unsigned) and784// a high-order summand in register tmp.785//786// return value: < 0: No split required, si20 actually has property uimm12.787// >= 0: Split performed. Use return value as uimm12 displacement and788// tmp as index register.789int MacroAssembler::split_largeoffset(int64_t si20_offset, Register tmp, bool fixed_codelen, bool accumulate) {790assert(Immediate::is_simm20(si20_offset), "sanity");791int lg_off = (int)si20_offset & 0x0fff; // Punch out low-order 12 bits, always positive.792int ll_off = (int)si20_offset & ~0x0fff; // Force low-order 12 bits to zero.793assert((Displacement::is_shortDisp(si20_offset) && (ll_off == 0)) ||794!Displacement::is_shortDisp(si20_offset), "unexpected offset values");795assert((lg_off+ll_off) == si20_offset, "offset splitup error");796797Register work = accumulate? Z_R0 : tmp;798799if (fixed_codelen) { // Len of code = 10 = 4 + 6.800z_lghi(work, ll_off>>12); // Implicit sign extension.801z_slag(work, work, 12);802} else { // Len of code = 0..10.803if (ll_off == 0) { return -1; }804// ll_off has 8 significant bits (at most) plus sign.805if ((ll_off & 0x0000f000) == 0) { // Non-zero bits only in upper halfbyte.806z_llilh(work, ll_off >> 16);807if (ll_off < 0) { // Sign-extension required.808z_lgfr(work, work);809}810} else {811if ((ll_off & 0x000f0000) == 0) { // Non-zero bits only in lower halfbyte.812z_llill(work, ll_off);813} else { // Non-zero bits in both halfbytes.814z_lghi(work, ll_off>>12); // Implicit sign extension.815z_slag(work, work, 12);816}817}818}819if (accumulate) { z_algr(tmp, work); } // len of code += 4820return lg_off;821}822823void MacroAssembler::load_float_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) {824if (Displacement::is_validDisp(si20)) {825z_ley(t, si20, a);826} else {827// Fixed_codelen = true is a simple way to ensure that the size of load_float_largeoffset828// does not depend on si20 (scratch buffer emit size == code buffer emit size for constant829// pool loads).830bool accumulate = true;831bool fixed_codelen = true;832Register work;833834if (fixed_codelen) {835z_lgr(tmp, a); // Lgr_if_needed not applicable due to fixed_codelen.836} else {837accumulate = (a == tmp);838}839work = tmp;840841int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate);842if (disp12 < 0) {843z_le(t, si20, work);844} else {845if (accumulate) {846z_le(t, disp12, work);847} else {848z_le(t, disp12, work, a);849}850}851}852}853854void MacroAssembler::load_double_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) {855if (Displacement::is_validDisp(si20)) {856z_ldy(t, si20, a);857} else {858// Fixed_codelen = true is a simple way to ensure that the size of load_double_largeoffset859// does not depend on si20 (scratch buffer emit size == code buffer emit size for constant860// pool loads).861bool accumulate = true;862bool fixed_codelen = true;863Register work;864865if (fixed_codelen) {866z_lgr(tmp, a); // Lgr_if_needed not applicable due to fixed_codelen.867} else {868accumulate = (a == tmp);869}870work = tmp;871872int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate);873if (disp12 < 0) {874z_ld(t, si20, work);875} else {876if (accumulate) {877z_ld(t, disp12, work);878} else {879z_ld(t, disp12, work, a);880}881}882}883}884885// PCrelative TOC access.886// Returns distance (in bytes) from current position to start of consts section.887// Returns 0 (zero) if no consts section exists or if it has size zero.888long MacroAssembler::toc_distance() {889CodeSection* cs = code()->consts();890return (long)((cs != NULL) ? cs->start()-pc() : 0);891}892893// Implementation on x86/sparc assumes that constant and instruction section are894// adjacent, but this doesn't hold. Two special situations may occur, that we must895// be able to handle:896// 1. const section may be located apart from the inst section.897// 2. const section may be empty898// In both cases, we use the const section's start address to compute the "TOC",899// this seems to occur only temporarily; in the final step we always seem to end up900// with the pc-relatice variant.901//902// PC-relative offset could be +/-2**32 -> use long for disp903// Furthermore: makes no sense to have special code for904// adjacent const and inst sections.905void MacroAssembler::load_toc(Register Rtoc) {906// Simply use distance from start of const section (should be patched in the end).907long disp = toc_distance();908909RelocationHolder rspec = internal_word_Relocation::spec(pc() + disp);910relocate(rspec);911z_larl(Rtoc, RelAddr::pcrel_off32(disp)); // Offset is in halfwords.912}913914// PCrelative TOC access.915// Load from anywhere pcrelative (with relocation of load instr)916void MacroAssembler::load_long_pcrelative(Register Rdst, address dataLocation) {917address pc = this->pc();918ptrdiff_t total_distance = dataLocation - pc;919RelocationHolder rspec = internal_word_Relocation::spec(dataLocation);920921assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory");922assert(total_distance != 0, "sanity");923924// Some extra safety net.925if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {926guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance);927}928929(this)->relocate(rspec, relocInfo::pcrel_addr_format);930z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance));931}932933934// PCrelative TOC access.935// Load from anywhere pcrelative (with relocation of load instr)936// loaded addr has to be relocated when added to constant pool.937void MacroAssembler::load_addr_pcrelative(Register Rdst, address addrLocation) {938address pc = this->pc();939ptrdiff_t total_distance = addrLocation - pc;940RelocationHolder rspec = internal_word_Relocation::spec(addrLocation);941942assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory");943944// Some extra safety net.945if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {946guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance);947}948949(this)->relocate(rspec, relocInfo::pcrel_addr_format);950z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance));951}952953// Generic operation: load a value from memory and test.954// CondCode indicates the sign (<0, ==0, >0) of the loaded value.955void MacroAssembler::load_and_test_byte(Register dst, const Address &a) {956z_lb(dst, a);957z_ltr(dst, dst);958}959960void MacroAssembler::load_and_test_short(Register dst, const Address &a) {961int64_t disp = a.disp20();962if (Displacement::is_shortDisp(disp)) {963z_lh(dst, a);964} else if (Displacement::is_longDisp(disp)) {965z_lhy(dst, a);966} else {967guarantee(false, "displacement out of range");968}969z_ltr(dst, dst);970}971972void MacroAssembler::load_and_test_int(Register dst, const Address &a) {973z_lt(dst, a);974}975976void MacroAssembler::load_and_test_int2long(Register dst, const Address &a) {977z_ltgf(dst, a);978}979980void MacroAssembler::load_and_test_long(Register dst, const Address &a) {981z_ltg(dst, a);982}983984// Test a bit in memory.985void MacroAssembler::testbit(const Address &a, unsigned int bit) {986assert(a.index() == noreg, "no index reg allowed in testbit");987if (bit <= 7) {988z_tm(a.disp() + 3, a.base(), 1 << bit);989} else if (bit <= 15) {990z_tm(a.disp() + 2, a.base(), 1 << (bit - 8));991} else if (bit <= 23) {992z_tm(a.disp() + 1, a.base(), 1 << (bit - 16));993} else if (bit <= 31) {994z_tm(a.disp() + 0, a.base(), 1 << (bit - 24));995} else {996ShouldNotReachHere();997}998}9991000// Test a bit in a register. Result is reflected in CC.1001void MacroAssembler::testbit(Register r, unsigned int bitPos) {1002if (bitPos < 16) {1003z_tmll(r, 1U<<bitPos);1004} else if (bitPos < 32) {1005z_tmlh(r, 1U<<(bitPos-16));1006} else if (bitPos < 48) {1007z_tmhl(r, 1U<<(bitPos-32));1008} else if (bitPos < 64) {1009z_tmhh(r, 1U<<(bitPos-48));1010} else {1011ShouldNotReachHere();1012}1013}10141015void MacroAssembler::prefetch_read(Address a) {1016z_pfd(1, a.disp20(), a.indexOrR0(), a.base());1017}1018void MacroAssembler::prefetch_update(Address a) {1019z_pfd(2, a.disp20(), a.indexOrR0(), a.base());1020}10211022// Clear a register, i.e. load const zero into reg.1023// Return len (in bytes) of generated instruction(s).1024// whole_reg: Clear 64 bits if true, 32 bits otherwise.1025// set_cc: Use instruction that sets the condition code, if true.1026int MacroAssembler::clear_reg(Register r, bool whole_reg, bool set_cc) {1027unsigned int start_off = offset();1028if (whole_reg) {1029set_cc ? z_xgr(r, r) : z_laz(r, 0, Z_R0);1030} else { // Only 32bit register.1031set_cc ? z_xr(r, r) : z_lhi(r, 0);1032}1033return offset() - start_off;1034}10351036#ifdef ASSERT1037int MacroAssembler::preset_reg(Register r, unsigned long pattern, int pattern_len) {1038switch (pattern_len) {1039case 1:1040pattern = (pattern & 0x000000ff) | ((pattern & 0x000000ff)<<8);1041case 2:1042pattern = (pattern & 0x0000ffff) | ((pattern & 0x0000ffff)<<16);1043case 4:1044pattern = (pattern & 0xffffffffL) | ((pattern & 0xffffffffL)<<32);1045case 8:1046return load_const_optimized_rtn_len(r, pattern, true);1047break;1048default:1049guarantee(false, "preset_reg: bad len");1050}1051return 0;1052}1053#endif10541055// addr: Address descriptor of memory to clear index register will not be used !1056// size: Number of bytes to clear.1057// !!! DO NOT USE THEM FOR ATOMIC MEMORY CLEARING !!!1058// !!! Use store_const() instead !!!1059void MacroAssembler::clear_mem(const Address& addr, unsigned size) {1060guarantee(size <= 256, "MacroAssembler::clear_mem: size too large");10611062if (size == 1) {1063z_mvi(addr, 0);1064return;1065}10661067switch (size) {1068case 2: z_mvhhi(addr, 0);1069return;1070case 4: z_mvhi(addr, 0);1071return;1072case 8: z_mvghi(addr, 0);1073return;1074default: ; // Fallthru to xc.1075}10761077z_xc(addr, size, addr);1078}10791080void MacroAssembler::align(int modulus) {1081while (offset() % modulus != 0) z_nop();1082}10831084// Special version for non-relocateable code if required alignment1085// is larger than CodeEntryAlignment.1086void MacroAssembler::align_address(int modulus) {1087while ((uintptr_t)pc() % modulus != 0) z_nop();1088}10891090Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,1091Register temp_reg,1092int64_t extra_slot_offset) {1093// On Z, we can have index and disp in an Address. So don't call argument_offset,1094// which issues an unnecessary add instruction.1095int stackElementSize = Interpreter::stackElementSize;1096int64_t offset = extra_slot_offset * stackElementSize;1097const Register argbase = Z_esp;1098if (arg_slot.is_constant()) {1099offset += arg_slot.as_constant() * stackElementSize;1100return Address(argbase, offset);1101}1102// else1103assert(temp_reg != noreg, "must specify");1104assert(temp_reg != Z_ARG1, "base and index are conflicting");1105z_sllg(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize)); // tempreg = arg_slot << 31106return Address(argbase, temp_reg, offset);1107}110811091110//===================================================================1111//=== START C O N S T A N T S I N C O D E S T R E A M ===1112//===================================================================1113//=== P A T CH A B L E C O N S T A N T S ===1114//===================================================================111511161117//---------------------------------------------------1118// Load (patchable) constant into register1119//---------------------------------------------------112011211122// Load absolute address (and try to optimize).1123// Note: This method is usable only for position-fixed code,1124// referring to a position-fixed target location.1125// If not so, relocations and patching must be used.1126void MacroAssembler::load_absolute_address(Register d, address addr) {1127assert(addr != NULL, "should not happen");1128BLOCK_COMMENT("load_absolute_address:");1129if (addr == NULL) {1130z_larl(d, pc()); // Dummy emit for size calc.1131return;1132}11331134if (RelAddr::is_in_range_of_RelAddr32(addr, pc())) {1135z_larl(d, addr);1136return;1137}11381139load_const_optimized(d, (long)addr);1140}11411142// Load a 64bit constant.1143// Patchable code sequence, but not atomically patchable.1144// Make sure to keep code size constant -> no value-dependent optimizations.1145// Do not kill condition code.1146void MacroAssembler::load_const(Register t, long x) {1147// Note: Right shift is only cleanly defined for unsigned types1148// or for signed types with nonnegative values.1149Assembler::z_iihf(t, (long)((unsigned long)x >> 32));1150Assembler::z_iilf(t, (long)((unsigned long)x & 0xffffffffUL));1151}11521153// Load a 32bit constant into a 64bit register, sign-extend or zero-extend.1154// Patchable code sequence, but not atomically patchable.1155// Make sure to keep code size constant -> no value-dependent optimizations.1156// Do not kill condition code.1157void MacroAssembler::load_const_32to64(Register t, int64_t x, bool sign_extend) {1158if (sign_extend) { Assembler::z_lgfi(t, x); }1159else { Assembler::z_llilf(t, x); }1160}11611162// Load narrow oop constant, no decompression.1163void MacroAssembler::load_narrow_oop(Register t, narrowOop a) {1164assert(UseCompressedOops, "must be on to call this method");1165load_const_32to64(t, CompressedOops::narrow_oop_value(a), false /*sign_extend*/);1166}11671168// Load narrow klass constant, compression required.1169void MacroAssembler::load_narrow_klass(Register t, Klass* k) {1170assert(UseCompressedClassPointers, "must be on to call this method");1171narrowKlass encoded_k = CompressedKlassPointers::encode(k);1172load_const_32to64(t, encoded_k, false /*sign_extend*/);1173}11741175//------------------------------------------------------1176// Compare (patchable) constant with register.1177//------------------------------------------------------11781179// Compare narrow oop in reg with narrow oop constant, no decompression.1180void MacroAssembler::compare_immediate_narrow_oop(Register oop1, narrowOop oop2) {1181assert(UseCompressedOops, "must be on to call this method");11821183Assembler::z_clfi(oop1, CompressedOops::narrow_oop_value(oop2));1184}11851186// Compare narrow oop in reg with narrow oop constant, no decompression.1187void MacroAssembler::compare_immediate_narrow_klass(Register klass1, Klass* klass2) {1188assert(UseCompressedClassPointers, "must be on to call this method");1189narrowKlass encoded_k = CompressedKlassPointers::encode(klass2);11901191Assembler::z_clfi(klass1, encoded_k);1192}11931194//----------------------------------------------------------1195// Check which kind of load_constant we have here.1196//----------------------------------------------------------11971198// Detection of CPU version dependent load_const sequence.1199// The detection is valid only for code sequences generated by load_const,1200// not load_const_optimized.1201bool MacroAssembler::is_load_const(address a) {1202unsigned long inst1, inst2;1203unsigned int len1, len2;12041205len1 = get_instruction(a, &inst1);1206len2 = get_instruction(a + len1, &inst2);12071208return is_z_iihf(inst1) && is_z_iilf(inst2);1209}12101211// Detection of CPU version dependent load_const_32to64 sequence.1212// Mostly used for narrow oops and narrow Klass pointers.1213// The detection is valid only for code sequences generated by load_const_32to64.1214bool MacroAssembler::is_load_const_32to64(address pos) {1215unsigned long inst1, inst2;1216unsigned int len1;12171218len1 = get_instruction(pos, &inst1);1219return is_z_llilf(inst1);1220}12211222// Detection of compare_immediate_narrow sequence.1223// The detection is valid only for code sequences generated by compare_immediate_narrow_oop.1224bool MacroAssembler::is_compare_immediate32(address pos) {1225return is_equal(pos, CLFI_ZOPC, RIL_MASK);1226}12271228// Detection of compare_immediate_narrow sequence.1229// The detection is valid only for code sequences generated by compare_immediate_narrow_oop.1230bool MacroAssembler::is_compare_immediate_narrow_oop(address pos) {1231return is_compare_immediate32(pos);1232}12331234// Detection of compare_immediate_narrow sequence.1235// The detection is valid only for code sequences generated by compare_immediate_narrow_klass.1236bool MacroAssembler::is_compare_immediate_narrow_klass(address pos) {1237return is_compare_immediate32(pos);1238}12391240//-----------------------------------1241// patch the load_constant1242//-----------------------------------12431244// CPU-version dependend patching of load_const.1245void MacroAssembler::patch_const(address a, long x) {1246assert(is_load_const(a), "not a load of a constant");1247// Note: Right shift is only cleanly defined for unsigned types1248// or for signed types with nonnegative values.1249set_imm32((address)a, (long)((unsigned long)x >> 32));1250set_imm32((address)(a + 6), (long)((unsigned long)x & 0xffffffffUL));1251}12521253// Patching the value of CPU version dependent load_const_32to64 sequence.1254// The passed ptr MUST be in compressed format!1255int MacroAssembler::patch_load_const_32to64(address pos, int64_t np) {1256assert(is_load_const_32to64(pos), "not a load of a narrow ptr (oop or klass)");12571258set_imm32(pos, np);1259return 6;1260}12611262// Patching the value of CPU version dependent compare_immediate_narrow sequence.1263// The passed ptr MUST be in compressed format!1264int MacroAssembler::patch_compare_immediate_32(address pos, int64_t np) {1265assert(is_compare_immediate32(pos), "not a compressed ptr compare");12661267set_imm32(pos, np);1268return 6;1269}12701271// Patching the immediate value of CPU version dependent load_narrow_oop sequence.1272// The passed ptr must NOT be in compressed format!1273int MacroAssembler::patch_load_narrow_oop(address pos, oop o) {1274assert(UseCompressedOops, "Can only patch compressed oops");1275return patch_load_const_32to64(pos, CompressedOops::narrow_oop_value(o));1276}12771278// Patching the immediate value of CPU version dependent load_narrow_klass sequence.1279// The passed ptr must NOT be in compressed format!1280int MacroAssembler::patch_load_narrow_klass(address pos, Klass* k) {1281assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");12821283narrowKlass nk = CompressedKlassPointers::encode(k);1284return patch_load_const_32to64(pos, nk);1285}12861287// Patching the immediate value of CPU version dependent compare_immediate_narrow_oop sequence.1288// The passed ptr must NOT be in compressed format!1289int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) {1290assert(UseCompressedOops, "Can only patch compressed oops");1291return patch_compare_immediate_32(pos, CompressedOops::narrow_oop_value(o));1292}12931294// Patching the immediate value of CPU version dependent compare_immediate_narrow_klass sequence.1295// The passed ptr must NOT be in compressed format!1296int MacroAssembler::patch_compare_immediate_narrow_klass(address pos, Klass* k) {1297assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");12981299narrowKlass nk = CompressedKlassPointers::encode(k);1300return patch_compare_immediate_32(pos, nk);1301}13021303//------------------------------------------------------------------------1304// Extract the constant from a load_constant instruction stream.1305//------------------------------------------------------------------------13061307// Get constant from a load_const sequence.1308long MacroAssembler::get_const(address a) {1309assert(is_load_const(a), "not a load of a constant");1310unsigned long x;1311x = (((unsigned long) (get_imm32(a,0) & 0xffffffff)) << 32);1312x |= (((unsigned long) (get_imm32(a,1) & 0xffffffff)));1313return (long) x;1314}13151316//--------------------------------------1317// Store a constant in memory.1318//--------------------------------------13191320// General emitter to move a constant to memory.1321// The store is atomic.1322// o Address must be given in RS format (no index register)1323// o Displacement should be 12bit unsigned for efficiency. 20bit signed also supported.1324// o Constant can be 1, 2, 4, or 8 bytes, signed or unsigned.1325// o Memory slot can be 1, 2, 4, or 8 bytes, signed or unsigned.1326// o Memory slot must be at least as wide as constant, will assert otherwise.1327// o Signed constants will sign-extend, unsigned constants will zero-extend to slot width.1328int MacroAssembler::store_const(const Address &dest, long imm,1329unsigned int lm, unsigned int lc,1330Register scratch) {1331int64_t disp = dest.disp();1332Register base = dest.base();1333assert(!dest.has_index(), "not supported");1334assert((lm==1)||(lm==2)||(lm==4)||(lm==8), "memory length not supported");1335assert((lc==1)||(lc==2)||(lc==4)||(lc==8), "constant length not supported");1336assert(lm>=lc, "memory slot too small");1337assert(lc==8 || Immediate::is_simm(imm, lc*8), "const out of range");1338assert(Displacement::is_validDisp(disp), "displacement out of range");13391340bool is_shortDisp = Displacement::is_shortDisp(disp);1341int store_offset = -1;13421343// For target len == 1 it's easy.1344if (lm == 1) {1345store_offset = offset();1346if (is_shortDisp) {1347z_mvi(disp, base, imm);1348return store_offset;1349} else {1350z_mviy(disp, base, imm);1351return store_offset;1352}1353}13541355// All the "good stuff" takes an unsigned displacement.1356if (is_shortDisp) {1357// NOTE: Cannot use clear_mem for imm==0, because it is not atomic.13581359store_offset = offset();1360switch (lm) {1361case 2: // Lc == 1 handled correctly here, even for unsigned. Instruction does no widening.1362z_mvhhi(disp, base, imm);1363return store_offset;1364case 4:1365if (Immediate::is_simm16(imm)) {1366z_mvhi(disp, base, imm);1367return store_offset;1368}1369break;1370case 8:1371if (Immediate::is_simm16(imm)) {1372z_mvghi(disp, base, imm);1373return store_offset;1374}1375break;1376default:1377ShouldNotReachHere();1378break;1379}1380}13811382// Can't optimize, so load value and store it.1383guarantee(scratch != noreg, " need a scratch register here !");1384if (imm != 0) {1385load_const_optimized(scratch, imm); // Preserves CC anyway.1386} else {1387// Leave CC alone!!1388(void) clear_reg(scratch, true, false); // Indicate unused result.1389}13901391store_offset = offset();1392if (is_shortDisp) {1393switch (lm) {1394case 2:1395z_sth(scratch, disp, Z_R0, base);1396return store_offset;1397case 4:1398z_st(scratch, disp, Z_R0, base);1399return store_offset;1400case 8:1401z_stg(scratch, disp, Z_R0, base);1402return store_offset;1403default:1404ShouldNotReachHere();1405break;1406}1407} else {1408switch (lm) {1409case 2:1410z_sthy(scratch, disp, Z_R0, base);1411return store_offset;1412case 4:1413z_sty(scratch, disp, Z_R0, base);1414return store_offset;1415case 8:1416z_stg(scratch, disp, Z_R0, base);1417return store_offset;1418default:1419ShouldNotReachHere();1420break;1421}1422}1423return -1; // should not reach here1424}14251426//===================================================================1427//=== N O T P A T CH A B L E C O N S T A N T S ===1428//===================================================================14291430// Load constant x into register t with a fast instrcution sequence1431// depending on the bits in x. Preserves CC under all circumstances.1432int MacroAssembler::load_const_optimized_rtn_len(Register t, long x, bool emit) {1433if (x == 0) {1434int len;1435if (emit) {1436len = clear_reg(t, true, false);1437} else {1438len = 4;1439}1440return len;1441}14421443if (Immediate::is_simm16(x)) {1444if (emit) { z_lghi(t, x); }1445return 4;1446}14471448// 64 bit value: | part1 | part2 | part3 | part4 |1449// At least one part is not zero!1450// Note: Right shift is only cleanly defined for unsigned types1451// or for signed types with nonnegative values.1452int part1 = (int)((unsigned long)x >> 48) & 0x0000ffff;1453int part2 = (int)((unsigned long)x >> 32) & 0x0000ffff;1454int part3 = (int)((unsigned long)x >> 16) & 0x0000ffff;1455int part4 = (int)x & 0x0000ffff;1456int part12 = (int)((unsigned long)x >> 32);1457int part34 = (int)x;14581459// Lower word only (unsigned).1460if (part12 == 0) {1461if (part3 == 0) {1462if (emit) z_llill(t, part4);1463return 4;1464}1465if (part4 == 0) {1466if (emit) z_llilh(t, part3);1467return 4;1468}1469if (emit) z_llilf(t, part34);1470return 6;1471}14721473// Upper word only.1474if (part34 == 0) {1475if (part1 == 0) {1476if (emit) z_llihl(t, part2);1477return 4;1478}1479if (part2 == 0) {1480if (emit) z_llihh(t, part1);1481return 4;1482}1483if (emit) z_llihf(t, part12);1484return 6;1485}14861487// Lower word only (signed).1488if ((part1 == 0x0000ffff) && (part2 == 0x0000ffff) && ((part3 & 0x00008000) != 0)) {1489if (emit) z_lgfi(t, part34);1490return 6;1491}14921493int len = 0;14941495if ((part1 == 0) || (part2 == 0)) {1496if (part1 == 0) {1497if (emit) z_llihl(t, part2);1498len += 4;1499} else {1500if (emit) z_llihh(t, part1);1501len += 4;1502}1503} else {1504if (emit) z_llihf(t, part12);1505len += 6;1506}15071508if ((part3 == 0) || (part4 == 0)) {1509if (part3 == 0) {1510if (emit) z_iill(t, part4);1511len += 4;1512} else {1513if (emit) z_iilh(t, part3);1514len += 4;1515}1516} else {1517if (emit) z_iilf(t, part34);1518len += 6;1519}1520return len;1521}15221523//=====================================================================1524//=== H I G H E R L E V E L B R A N C H E M I T T E R S ===1525//=====================================================================15261527// Note: In the worst case, one of the scratch registers is destroyed!!!1528void MacroAssembler::compare32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {1529// Right operand is constant.1530if (x2.is_constant()) {1531jlong value = x2.as_constant();1532compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/true);1533return;1534}15351536// Right operand is in register.1537compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/true);1538}15391540// Note: In the worst case, one of the scratch registers is destroyed!!!1541void MacroAssembler::compareU32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {1542// Right operand is constant.1543if (x2.is_constant()) {1544jlong value = x2.as_constant();1545compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/false);1546return;1547}15481549// Right operand is in register.1550compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/false);1551}15521553// Note: In the worst case, one of the scratch registers is destroyed!!!1554void MacroAssembler::compare64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {1555// Right operand is constant.1556if (x2.is_constant()) {1557jlong value = x2.as_constant();1558compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/true);1559return;1560}15611562// Right operand is in register.1563compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/true);1564}15651566void MacroAssembler::compareU64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {1567// Right operand is constant.1568if (x2.is_constant()) {1569jlong value = x2.as_constant();1570compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/false);1571return;1572}15731574// Right operand is in register.1575compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/false);1576}15771578// Generate an optimal branch to the branch target.1579// Optimal means that a relative branch (brc or brcl) is used if the1580// branch distance is short enough. Loading the target address into a1581// register and branching via reg is used as fallback only.1582//1583// Used registers:1584// Z_R1 - work reg. Holds branch target address.1585// Used in fallback case only.1586//1587// This version of branch_optimized is good for cases where the target address is known1588// and constant, i.e. is never changed (no relocation, no patching).1589void MacroAssembler::branch_optimized(Assembler::branch_condition cond, address branch_addr) {1590address branch_origin = pc();15911592if (RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) {1593z_brc(cond, branch_addr);1594} else if (RelAddr::is_in_range_of_RelAddr32(branch_addr, branch_origin)) {1595z_brcl(cond, branch_addr);1596} else {1597load_const_optimized(Z_R1, branch_addr); // CC must not get killed by load_const_optimized.1598z_bcr(cond, Z_R1);1599}1600}16011602// This version of branch_optimized is good for cases where the target address1603// is potentially not yet known at the time the code is emitted.1604//1605// One very common case is a branch to an unbound label which is handled here.1606// The caller might know (or hope) that the branch distance is short enough1607// to be encoded in a 16bit relative address. In this case he will pass a1608// NearLabel branch_target.1609// Care must be taken with unbound labels. Each call to target(label) creates1610// an entry in the patch queue for that label to patch all references of the label1611// once it gets bound. Those recorded patch locations must be patchable. Otherwise,1612// an assertion fires at patch time.1613void MacroAssembler::branch_optimized(Assembler::branch_condition cond, Label& branch_target) {1614if (branch_target.is_bound()) {1615address branch_addr = target(branch_target);1616branch_optimized(cond, branch_addr);1617} else if (branch_target.is_near()) {1618z_brc(cond, branch_target); // Caller assures that the target will be in range for z_brc.1619} else {1620z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time.1621}1622}16231624// Generate an optimal compare and branch to the branch target.1625// Optimal means that a relative branch (clgrj, brc or brcl) is used if the1626// branch distance is short enough. Loading the target address into a1627// register and branching via reg is used as fallback only.1628//1629// Input:1630// r1 - left compare operand1631// r2 - right compare operand1632void MacroAssembler::compare_and_branch_optimized(Register r1,1633Register r2,1634Assembler::branch_condition cond,1635address branch_addr,1636bool len64,1637bool has_sign) {1638unsigned int casenum = (len64?2:0)+(has_sign?0:1);16391640address branch_origin = pc();1641if (VM_Version::has_CompareBranch() && RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) {1642switch (casenum) {1643case 0: z_crj( r1, r2, cond, branch_addr); break;1644case 1: z_clrj (r1, r2, cond, branch_addr); break;1645case 2: z_cgrj(r1, r2, cond, branch_addr); break;1646case 3: z_clgrj(r1, r2, cond, branch_addr); break;1647default: ShouldNotReachHere(); break;1648}1649} else {1650switch (casenum) {1651case 0: z_cr( r1, r2); break;1652case 1: z_clr(r1, r2); break;1653case 2: z_cgr(r1, r2); break;1654case 3: z_clgr(r1, r2); break;1655default: ShouldNotReachHere(); break;1656}1657branch_optimized(cond, branch_addr);1658}1659}16601661// Generate an optimal compare and branch to the branch target.1662// Optimal means that a relative branch (clgij, brc or brcl) is used if the1663// branch distance is short enough. Loading the target address into a1664// register and branching via reg is used as fallback only.1665//1666// Input:1667// r1 - left compare operand (in register)1668// x2 - right compare operand (immediate)1669void MacroAssembler::compare_and_branch_optimized(Register r1,1670jlong x2,1671Assembler::branch_condition cond,1672Label& branch_target,1673bool len64,1674bool has_sign) {1675address branch_origin = pc();1676bool x2_imm8 = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2));1677bool is_RelAddr16 = branch_target.is_near() ||1678(branch_target.is_bound() &&1679RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin));1680unsigned int casenum = (len64?2:0)+(has_sign?0:1);16811682if (VM_Version::has_CompareBranch() && is_RelAddr16 && x2_imm8) {1683switch (casenum) {1684case 0: z_cij( r1, x2, cond, branch_target); break;1685case 1: z_clij(r1, x2, cond, branch_target); break;1686case 2: z_cgij(r1, x2, cond, branch_target); break;1687case 3: z_clgij(r1, x2, cond, branch_target); break;1688default: ShouldNotReachHere(); break;1689}1690return;1691}16921693if (x2 == 0) {1694switch (casenum) {1695case 0: z_ltr(r1, r1); break;1696case 1: z_ltr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication!1697case 2: z_ltgr(r1, r1); break;1698case 3: z_ltgr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication!1699default: ShouldNotReachHere(); break;1700}1701} else {1702if ((has_sign && Immediate::is_simm16(x2)) || (!has_sign && Immediate::is_uimm(x2, 15))) {1703switch (casenum) {1704case 0: z_chi(r1, x2); break;1705case 1: z_chi(r1, x2); break; // positive immediate < 2**151706case 2: z_cghi(r1, x2); break;1707case 3: z_cghi(r1, x2); break; // positive immediate < 2**151708default: break;1709}1710} else if ( (has_sign && Immediate::is_simm32(x2)) || (!has_sign && Immediate::is_uimm32(x2)) ) {1711switch (casenum) {1712case 0: z_cfi( r1, x2); break;1713case 1: z_clfi(r1, x2); break;1714case 2: z_cgfi(r1, x2); break;1715case 3: z_clgfi(r1, x2); break;1716default: ShouldNotReachHere(); break;1717}1718} else {1719// No instruction with immediate operand possible, so load into register.1720Register scratch = (r1 != Z_R0) ? Z_R0 : Z_R1;1721load_const_optimized(scratch, x2);1722switch (casenum) {1723case 0: z_cr( r1, scratch); break;1724case 1: z_clr(r1, scratch); break;1725case 2: z_cgr(r1, scratch); break;1726case 3: z_clgr(r1, scratch); break;1727default: ShouldNotReachHere(); break;1728}1729}1730}1731branch_optimized(cond, branch_target);1732}17331734// Generate an optimal compare and branch to the branch target.1735// Optimal means that a relative branch (clgrj, brc or brcl) is used if the1736// branch distance is short enough. Loading the target address into a1737// register and branching via reg is used as fallback only.1738//1739// Input:1740// r1 - left compare operand1741// r2 - right compare operand1742void MacroAssembler::compare_and_branch_optimized(Register r1,1743Register r2,1744Assembler::branch_condition cond,1745Label& branch_target,1746bool len64,1747bool has_sign) {1748unsigned int casenum = (len64 ? 2 : 0) + (has_sign ? 0 : 1);17491750if (branch_target.is_bound()) {1751address branch_addr = target(branch_target);1752compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign);1753} else {1754if (VM_Version::has_CompareBranch() && branch_target.is_near()) {1755switch (casenum) {1756case 0: z_crj( r1, r2, cond, branch_target); break;1757case 1: z_clrj( r1, r2, cond, branch_target); break;1758case 2: z_cgrj( r1, r2, cond, branch_target); break;1759case 3: z_clgrj(r1, r2, cond, branch_target); break;1760default: ShouldNotReachHere(); break;1761}1762} else {1763switch (casenum) {1764case 0: z_cr( r1, r2); break;1765case 1: z_clr(r1, r2); break;1766case 2: z_cgr(r1, r2); break;1767case 3: z_clgr(r1, r2); break;1768default: ShouldNotReachHere(); break;1769}1770branch_optimized(cond, branch_target);1771}1772}1773}17741775//===========================================================================1776//=== END H I G H E R L E V E L B R A N C H E M I T T E R S ===1777//===========================================================================17781779AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {1780assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");1781int index = oop_recorder()->allocate_metadata_index(obj);1782RelocationHolder rspec = metadata_Relocation::spec(index);1783return AddressLiteral((address)obj, rspec);1784}17851786AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {1787assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");1788int index = oop_recorder()->find_index(obj);1789RelocationHolder rspec = metadata_Relocation::spec(index);1790return AddressLiteral((address)obj, rspec);1791}17921793AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {1794assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");1795int oop_index = oop_recorder()->allocate_oop_index(obj);1796return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));1797}17981799AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {1800assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");1801int oop_index = oop_recorder()->find_index(obj);1802return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));1803}18041805// NOTE: destroys r1806void MacroAssembler::c2bool(Register r, Register t) {1807z_lcr(t, r); // t = -r1808z_or(r, t); // r = -r OR r1809z_srl(r, 31); // Yields 0 if r was 0, 1 otherwise.1810}18111812// Patch instruction `inst' at offset `inst_pos' to refer to `dest_pos'1813// and return the resulting instruction.1814// Dest_pos and inst_pos are 32 bit only. These parms can only designate1815// relative positions.1816// Use correct argument types. Do not pre-calculate distance.1817unsigned long MacroAssembler::patched_branch(address dest_pos, unsigned long inst, address inst_pos) {1818int c = 0;1819unsigned long patched_inst = 0;1820if (is_call_pcrelative_short(inst) ||1821is_branch_pcrelative_short(inst) ||1822is_branchoncount_pcrelative_short(inst) ||1823is_branchonindex32_pcrelative_short(inst)) {1824c = 1;1825int m = fmask(15, 0); // simm16(-1, 16, 32);1826int v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 32);1827patched_inst = (inst & ~m) | v;1828} else if (is_compareandbranch_pcrelative_short(inst)) {1829c = 2;1830long m = fmask(31, 16); // simm16(-1, 16, 48);1831long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48);1832patched_inst = (inst & ~m) | v;1833} else if (is_branchonindex64_pcrelative_short(inst)) {1834c = 3;1835long m = fmask(31, 16); // simm16(-1, 16, 48);1836long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48);1837patched_inst = (inst & ~m) | v;1838} else if (is_call_pcrelative_long(inst) || is_branch_pcrelative_long(inst)) {1839c = 4;1840long m = fmask(31, 0); // simm32(-1, 16, 48);1841long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48);1842patched_inst = (inst & ~m) | v;1843} else if (is_pcrelative_long(inst)) { // These are the non-branch pc-relative instructions.1844c = 5;1845long m = fmask(31, 0); // simm32(-1, 16, 48);1846long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48);1847patched_inst = (inst & ~m) | v;1848} else {1849print_dbg_msg(tty, inst, "not a relative branch", 0);1850dump_code_range(tty, inst_pos, 32, "not a pcrelative branch");1851ShouldNotReachHere();1852}18531854long new_off = get_pcrel_offset(patched_inst);1855if (new_off != (dest_pos-inst_pos)) {1856tty->print_cr("case %d: dest_pos = %p, inst_pos = %p, disp = %ld(%12.12lx)", c, dest_pos, inst_pos, new_off, new_off);1857print_dbg_msg(tty, inst, "<- original instruction: branch patching error", 0);1858print_dbg_msg(tty, patched_inst, "<- patched instruction: branch patching error", 0);1859#ifdef LUCY_DBG1860VM_Version::z_SIGSEGV();1861#endif1862ShouldNotReachHere();1863}1864return patched_inst;1865}18661867// Only called when binding labels (share/vm/asm/assembler.cpp)1868// Pass arguments as intended. Do not pre-calculate distance.1869void MacroAssembler::pd_patch_instruction(address branch, address target, const char* file, int line) {1870unsigned long stub_inst;1871int inst_len = get_instruction(branch, &stub_inst);18721873set_instruction(branch, patched_branch(target, stub_inst, branch), inst_len);1874}187518761877// Extract relative address (aka offset).1878// inv_simm16 works for 4-byte instructions only.1879// compare and branch instructions are 6-byte and have a 16bit offset "in the middle".1880long MacroAssembler::get_pcrel_offset(unsigned long inst) {18811882if (MacroAssembler::is_pcrelative_short(inst)) {1883if (((inst&0xFFFFffff00000000UL) == 0) && ((inst&0x00000000FFFF0000UL) != 0)) {1884return RelAddr::inv_pcrel_off16(inv_simm16(inst));1885} else {1886return RelAddr::inv_pcrel_off16(inv_simm16_48(inst));1887}1888}18891890if (MacroAssembler::is_pcrelative_long(inst)) {1891return RelAddr::inv_pcrel_off32(inv_simm32(inst));1892}18931894print_dbg_msg(tty, inst, "not a pcrelative instruction", 6);1895#ifdef LUCY_DBG1896VM_Version::z_SIGSEGV();1897#else1898ShouldNotReachHere();1899#endif1900return -1;1901}19021903long MacroAssembler::get_pcrel_offset(address pc) {1904unsigned long inst;1905unsigned int len = get_instruction(pc, &inst);19061907#ifdef ASSERT1908long offset;1909if (MacroAssembler::is_pcrelative_short(inst) || MacroAssembler::is_pcrelative_long(inst)) {1910offset = get_pcrel_offset(inst);1911} else {1912offset = -1;1913}19141915if (offset == -1) {1916dump_code_range(tty, pc, 32, "not a pcrelative instruction");1917#ifdef LUCY_DBG1918VM_Version::z_SIGSEGV();1919#else1920ShouldNotReachHere();1921#endif1922}1923return offset;1924#else1925return get_pcrel_offset(inst);1926#endif // ASSERT1927}19281929// Get target address from pc-relative instructions.1930address MacroAssembler::get_target_addr_pcrel(address pc) {1931assert(is_pcrelative_long(pc), "not a pcrelative instruction");1932return pc + get_pcrel_offset(pc);1933}19341935// Patch pc relative load address.1936void MacroAssembler::patch_target_addr_pcrel(address pc, address con) {1937unsigned long inst;1938// Offset is +/- 2**32 -> use long.1939ptrdiff_t distance = con - pc;19401941get_instruction(pc, &inst);19421943if (is_pcrelative_short(inst)) {1944*(short *)(pc+2) = RelAddr::pcrel_off16(con, pc); // Instructions are at least 2-byte aligned, no test required.19451946// Some extra safety net.1947if (!RelAddr::is_in_range_of_RelAddr16(distance)) {1948print_dbg_msg(tty, inst, "distance out of range (16bit)", 4);1949dump_code_range(tty, pc, 32, "distance out of range (16bit)");1950guarantee(RelAddr::is_in_range_of_RelAddr16(distance), "too far away (more than +/- 2**16");1951}1952return;1953}19541955if (is_pcrelative_long(inst)) {1956*(int *)(pc+2) = RelAddr::pcrel_off32(con, pc);19571958// Some Extra safety net.1959if (!RelAddr::is_in_range_of_RelAddr32(distance)) {1960print_dbg_msg(tty, inst, "distance out of range (32bit)", 6);1961dump_code_range(tty, pc, 32, "distance out of range (32bit)");1962guarantee(RelAddr::is_in_range_of_RelAddr32(distance), "too far away (more than +/- 2**32");1963}1964return;1965}19661967guarantee(false, "not a pcrelative instruction to patch!");1968}19691970// "Current PC" here means the address just behind the basr instruction.1971address MacroAssembler::get_PC(Register result) {1972z_basr(result, Z_R0); // Don't branch, just save next instruction address in result.1973return pc();1974}19751976// Get current PC + offset.1977// Offset given in bytes, must be even!1978// "Current PC" here means the address of the larl instruction plus the given offset.1979address MacroAssembler::get_PC(Register result, int64_t offset) {1980address here = pc();1981z_larl(result, offset/2); // Save target instruction address in result.1982return here + offset;1983}19841985void MacroAssembler::instr_size(Register size, Register pc) {1986// Extract 2 most significant bits of current instruction.1987z_llgc(size, Address(pc));1988z_srl(size, 6);1989// Compute (x+3)&6 which translates 0->2, 1->4, 2->4, 3->6.1990z_ahi(size, 3);1991z_nill(size, 6);1992}19931994// Resize_frame with SP(new) = SP(old) - [offset].1995void MacroAssembler::resize_frame_sub(Register offset, Register fp, bool load_fp)1996{1997assert_different_registers(offset, fp, Z_SP);1998if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); }19992000z_sgr(Z_SP, offset);2001z_stg(fp, _z_abi(callers_sp), Z_SP);2002}20032004// Resize_frame with SP(new) = [newSP] + offset.2005// This emitter is useful if we already have calculated a pointer2006// into the to-be-allocated stack space, e.g. with special alignment properties,2007// but need some additional space, e.g. for spilling.2008// newSP is the pre-calculated pointer. It must not be modified.2009// fp holds, or is filled with, the frame pointer.2010// offset is the additional increment which is added to addr to form the new SP.2011// Note: specify a negative value to reserve more space!2012// load_fp == true only indicates that fp is not pre-filled with the frame pointer.2013// It does not guarantee that fp contains the frame pointer at the end.2014void MacroAssembler::resize_frame_abs_with_offset(Register newSP, Register fp, int offset, bool load_fp) {2015assert_different_registers(newSP, fp, Z_SP);20162017if (load_fp) {2018z_lg(fp, _z_abi(callers_sp), Z_SP);2019}20202021add2reg(Z_SP, offset, newSP);2022z_stg(fp, _z_abi(callers_sp), Z_SP);2023}20242025// Resize_frame with SP(new) = [newSP].2026// load_fp == true only indicates that fp is not pre-filled with the frame pointer.2027// It does not guarantee that fp contains the frame pointer at the end.2028void MacroAssembler::resize_frame_absolute(Register newSP, Register fp, bool load_fp) {2029assert_different_registers(newSP, fp, Z_SP);20302031if (load_fp) {2032z_lg(fp, _z_abi(callers_sp), Z_SP); // need to use load/store.2033}20342035z_lgr(Z_SP, newSP);2036if (newSP != Z_R0) { // make sure we generate correct code, no matter what register newSP uses.2037z_stg(fp, _z_abi(callers_sp), newSP);2038} else {2039z_stg(fp, _z_abi(callers_sp), Z_SP);2040}2041}20422043// Resize_frame with SP(new) = SP(old) + offset.2044void MacroAssembler::resize_frame(RegisterOrConstant offset, Register fp, bool load_fp) {2045assert_different_registers(fp, Z_SP);20462047if (load_fp) {2048z_lg(fp, _z_abi(callers_sp), Z_SP);2049}2050add64(Z_SP, offset);2051z_stg(fp, _z_abi(callers_sp), Z_SP);2052}20532054void MacroAssembler::push_frame(Register bytes, Register old_sp, bool copy_sp, bool bytes_with_inverted_sign) {2055#ifdef ASSERT2056assert_different_registers(bytes, old_sp, Z_SP);2057if (!copy_sp) {2058z_cgr(old_sp, Z_SP);2059asm_assert_eq("[old_sp]!=[Z_SP]", 0x211);2060}2061#endif2062if (copy_sp) { z_lgr(old_sp, Z_SP); }2063if (bytes_with_inverted_sign) {2064z_agr(Z_SP, bytes);2065} else {2066z_sgr(Z_SP, bytes); // Z_sgfr sufficient, but probably not faster.2067}2068z_stg(old_sp, _z_abi(callers_sp), Z_SP);2069}20702071unsigned int MacroAssembler::push_frame(unsigned int bytes, Register scratch) {2072long offset = Assembler::align(bytes, frame::alignment_in_bytes);2073assert(offset > 0, "should push a frame with positive size, size = %ld.", offset);2074assert(Displacement::is_validDisp(-offset), "frame size out of range, size = %ld", offset);20752076// We must not write outside the current stack bounds (given by Z_SP).2077// Thus, we have to first update Z_SP and then store the previous SP as stack linkage.2078// We rely on Z_R0 by default to be available as scratch.2079z_lgr(scratch, Z_SP);2080add2reg(Z_SP, -offset);2081z_stg(scratch, _z_abi(callers_sp), Z_SP);2082#ifdef ASSERT2083// Just make sure nobody uses the value in the default scratch register.2084// When another register is used, the caller might rely on it containing the frame pointer.2085if (scratch == Z_R0) {2086z_iihf(scratch, 0xbaadbabe);2087z_iilf(scratch, 0xdeadbeef);2088}2089#endif2090return offset;2091}20922093// Push a frame of size `bytes' plus abi160 on top.2094unsigned int MacroAssembler::push_frame_abi160(unsigned int bytes) {2095BLOCK_COMMENT("push_frame_abi160 {");2096unsigned int res = push_frame(bytes + frame::z_abi_160_size);2097BLOCK_COMMENT("} push_frame_abi160");2098return res;2099}21002101// Pop current C frame.2102void MacroAssembler::pop_frame() {2103BLOCK_COMMENT("pop_frame:");2104Assembler::z_lg(Z_SP, _z_abi(callers_sp), Z_SP);2105}21062107// Pop current C frame and restore return PC register (Z_R14).2108void MacroAssembler::pop_frame_restore_retPC(int frame_size_in_bytes) {2109BLOCK_COMMENT("pop_frame_restore_retPC:");2110int retPC_offset = _z_abi16(return_pc) + frame_size_in_bytes;2111// If possible, pop frame by add instead of load (a penny saved is a penny got :-).2112if (Displacement::is_validDisp(retPC_offset)) {2113z_lg(Z_R14, retPC_offset, Z_SP);2114add2reg(Z_SP, frame_size_in_bytes);2115} else {2116add2reg(Z_SP, frame_size_in_bytes);2117restore_return_pc();2118}2119}21202121void MacroAssembler::call_VM_leaf_base(address entry_point, bool allow_relocation) {2122if (allow_relocation) {2123call_c(entry_point);2124} else {2125call_c_static(entry_point);2126}2127}21282129void MacroAssembler::call_VM_leaf_base(address entry_point) {2130bool allow_relocation = true;2131call_VM_leaf_base(entry_point, allow_relocation);2132}21332134void MacroAssembler::call_VM_base(Register oop_result,2135Register last_java_sp,2136address entry_point,2137bool allow_relocation,2138bool check_exceptions) { // Defaults to true.2139// Allow_relocation indicates, if true, that the generated code shall2140// be fit for code relocation or referenced data relocation. In other2141// words: all addresses must be considered variable. PC-relative addressing2142// is not possible then.2143// On the other hand, if (allow_relocation == false), addresses and offsets2144// may be considered stable, enabling us to take advantage of some PC-relative2145// addressing tweaks. These might improve performance and reduce code size.21462147// Determine last_java_sp register.2148if (!last_java_sp->is_valid()) {2149last_java_sp = Z_SP; // Load Z_SP as SP.2150}21512152set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, Z_R1, allow_relocation);21532154// ARG1 must hold thread address.2155z_lgr(Z_ARG1, Z_thread);21562157address return_pc = NULL;2158if (allow_relocation) {2159return_pc = call_c(entry_point);2160} else {2161return_pc = call_c_static(entry_point);2162}21632164reset_last_Java_frame(allow_relocation);21652166// C++ interp handles this in the interpreter.2167check_and_handle_popframe(Z_thread);2168check_and_handle_earlyret(Z_thread);21692170// Check for pending exceptions.2171if (check_exceptions) {2172// Check for pending exceptions (java_thread is set upon return).2173load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset()));21742175// This used to conditionally jump to forward_exception however it is2176// possible if we relocate that the branch will not reach. So we must jump2177// around so we can always reach.21782179Label ok;2180z_bre(ok); // Bcondequal is the same as bcondZero.2181call_stub(StubRoutines::forward_exception_entry());2182bind(ok);2183}21842185// Get oop result if there is one and reset the value in the thread.2186if (oop_result->is_valid()) {2187get_vm_result(oop_result);2188}21892190_last_calls_return_pc = return_pc; // Wipe out other (error handling) calls.2191}21922193void MacroAssembler::call_VM_base(Register oop_result,2194Register last_java_sp,2195address entry_point,2196bool check_exceptions) { // Defaults to true.2197bool allow_relocation = true;2198call_VM_base(oop_result, last_java_sp, entry_point, allow_relocation, check_exceptions);2199}22002201// VM calls without explicit last_java_sp.22022203void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {2204// Call takes possible detour via InterpreterMacroAssembler.2205call_VM_base(oop_result, noreg, entry_point, true, check_exceptions);2206}22072208void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {2209// Z_ARG1 is reserved for the thread.2210lgr_if_needed(Z_ARG2, arg_1);2211call_VM(oop_result, entry_point, check_exceptions);2212}22132214void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {2215// Z_ARG1 is reserved for the thread.2216lgr_if_needed(Z_ARG2, arg_1);2217assert(arg_2 != Z_ARG2, "smashed argument");2218lgr_if_needed(Z_ARG3, arg_2);2219call_VM(oop_result, entry_point, check_exceptions);2220}22212222void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2,2223Register arg_3, bool check_exceptions) {2224// Z_ARG1 is reserved for the thread.2225lgr_if_needed(Z_ARG2, arg_1);2226assert(arg_2 != Z_ARG2, "smashed argument");2227lgr_if_needed(Z_ARG3, arg_2);2228assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");2229lgr_if_needed(Z_ARG4, arg_3);2230call_VM(oop_result, entry_point, check_exceptions);2231}22322233// VM static calls without explicit last_java_sp.22342235void MacroAssembler::call_VM_static(Register oop_result, address entry_point, bool check_exceptions) {2236// Call takes possible detour via InterpreterMacroAssembler.2237call_VM_base(oop_result, noreg, entry_point, false, check_exceptions);2238}22392240void MacroAssembler::call_VM_static(Register oop_result, address entry_point, Register arg_1, Register arg_2,2241Register arg_3, bool check_exceptions) {2242// Z_ARG1 is reserved for the thread.2243lgr_if_needed(Z_ARG2, arg_1);2244assert(arg_2 != Z_ARG2, "smashed argument");2245lgr_if_needed(Z_ARG3, arg_2);2246assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");2247lgr_if_needed(Z_ARG4, arg_3);2248call_VM_static(oop_result, entry_point, check_exceptions);2249}22502251// VM calls with explicit last_java_sp.22522253void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions) {2254// Call takes possible detour via InterpreterMacroAssembler.2255call_VM_base(oop_result, last_java_sp, entry_point, true, check_exceptions);2256}22572258void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {2259// Z_ARG1 is reserved for the thread.2260lgr_if_needed(Z_ARG2, arg_1);2261call_VM(oop_result, last_java_sp, entry_point, check_exceptions);2262}22632264void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1,2265Register arg_2, bool check_exceptions) {2266// Z_ARG1 is reserved for the thread.2267lgr_if_needed(Z_ARG2, arg_1);2268assert(arg_2 != Z_ARG2, "smashed argument");2269lgr_if_needed(Z_ARG3, arg_2);2270call_VM(oop_result, last_java_sp, entry_point, check_exceptions);2271}22722273void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1,2274Register arg_2, Register arg_3, bool check_exceptions) {2275// Z_ARG1 is reserved for the thread.2276lgr_if_needed(Z_ARG2, arg_1);2277assert(arg_2 != Z_ARG2, "smashed argument");2278lgr_if_needed(Z_ARG3, arg_2);2279assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");2280lgr_if_needed(Z_ARG4, arg_3);2281call_VM(oop_result, last_java_sp, entry_point, check_exceptions);2282}22832284// VM leaf calls.22852286void MacroAssembler::call_VM_leaf(address entry_point) {2287// Call takes possible detour via InterpreterMacroAssembler.2288call_VM_leaf_base(entry_point, true);2289}22902291void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) {2292if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);2293call_VM_leaf(entry_point);2294}22952296void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {2297if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);2298assert(arg_2 != Z_ARG1, "smashed argument");2299if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);2300call_VM_leaf(entry_point);2301}23022303void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {2304if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);2305assert(arg_2 != Z_ARG1, "smashed argument");2306if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);2307assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument");2308if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3);2309call_VM_leaf(entry_point);2310}23112312// Static VM leaf calls.2313// Really static VM leaf calls are never patched.23142315void MacroAssembler::call_VM_leaf_static(address entry_point) {2316// Call takes possible detour via InterpreterMacroAssembler.2317call_VM_leaf_base(entry_point, false);2318}23192320void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1) {2321if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);2322call_VM_leaf_static(entry_point);2323}23242325void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2) {2326if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);2327assert(arg_2 != Z_ARG1, "smashed argument");2328if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);2329call_VM_leaf_static(entry_point);2330}23312332void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2, Register arg_3) {2333if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);2334assert(arg_2 != Z_ARG1, "smashed argument");2335if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);2336assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument");2337if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3);2338call_VM_leaf_static(entry_point);2339}23402341// Don't use detour via call_c(reg).2342address MacroAssembler::call_c(address function_entry) {2343load_const(Z_R1, function_entry);2344return call(Z_R1);2345}23462347// Variant for really static (non-relocatable) calls which are never patched.2348address MacroAssembler::call_c_static(address function_entry) {2349load_absolute_address(Z_R1, function_entry);2350#if 0 // def ASSERT2351// Verify that call site did not move.2352load_const_optimized(Z_R0, function_entry);2353z_cgr(Z_R1, Z_R0);2354z_brc(bcondEqual, 3);2355z_illtrap(0xba);2356#endif2357return call(Z_R1);2358}23592360address MacroAssembler::call_c_opt(address function_entry) {2361bool success = call_far_patchable(function_entry, -2 /* emit relocation + constant */);2362_last_calls_return_pc = success ? pc() : NULL;2363return _last_calls_return_pc;2364}23652366// Identify a call_far_patchable instruction: LARL + LG + BASR2367//2368// nop ; optionally, if required for alignment2369// lgrl rx,A(TOC entry) ; PC-relative access into constant pool2370// basr Z_R14,rx ; end of this instruction must be aligned to a word boundary2371//2372// Code pattern will eventually get patched into variant2 (see below for detection code).2373//2374bool MacroAssembler::is_call_far_patchable_variant0_at(address instruction_addr) {2375address iaddr = instruction_addr;23762377// Check for the actual load instruction.2378if (!is_load_const_from_toc(iaddr)) { return false; }2379iaddr += load_const_from_toc_size();23802381// Check for the call (BASR) instruction, finally.2382assert(iaddr-instruction_addr+call_byregister_size() == call_far_patchable_size(), "size mismatch");2383return is_call_byregister(iaddr);2384}23852386// Identify a call_far_patchable instruction: BRASL2387//2388// Code pattern to suits atomic patching:2389// nop ; Optionally, if required for alignment.2390// nop ... ; Multiple filler nops to compensate for size difference (variant0 is longer).2391// nop ; For code pattern detection: Prepend each BRASL with a nop.2392// brasl Z_R14,<reladdr> ; End of code must be 4-byte aligned !2393bool MacroAssembler::is_call_far_patchable_variant2_at(address instruction_addr) {2394const address call_addr = (address)((intptr_t)instruction_addr + call_far_patchable_size() - call_far_pcrelative_size());23952396// Check for correct number of leading nops.2397address iaddr;2398for (iaddr = instruction_addr; iaddr < call_addr; iaddr += nop_size()) {2399if (!is_z_nop(iaddr)) { return false; }2400}2401assert(iaddr == call_addr, "sanity");24022403// --> Check for call instruction.2404if (is_call_far_pcrelative(call_addr)) {2405assert(call_addr-instruction_addr+call_far_pcrelative_size() == call_far_patchable_size(), "size mismatch");2406return true;2407}24082409return false;2410}24112412// Emit a NOT mt-safely patchable 64 bit absolute call.2413// If toc_offset == -2, then the destination of the call (= target) is emitted2414// to the constant pool and a runtime_call relocation is added2415// to the code buffer.2416// If toc_offset != -2, target must already be in the constant pool at2417// _ctableStart+toc_offset (a caller can retrieve toc_offset2418// from the runtime_call relocation).2419// Special handling of emitting to scratch buffer when there is no constant pool.2420// Slightly changed code pattern. We emit an additional nop if we would2421// not end emitting at a word aligned address. This is to ensure2422// an atomically patchable displacement in brasl instructions.2423//2424// A call_far_patchable comes in different flavors:2425// - LARL(CP) / LG(CP) / BR (address in constant pool, access via CP register)2426// - LGRL(CP) / BR (address in constant pool, pc-relative accesss)2427// - BRASL (relative address of call target coded in instruction)2428// All flavors occupy the same amount of space. Length differences are compensated2429// by leading nops, such that the instruction sequence always ends at the same2430// byte offset. This is required to keep the return offset constant.2431// Furthermore, the return address (the end of the instruction sequence) is forced2432// to be on a 4-byte boundary. This is required for atomic patching, should we ever2433// need to patch the call target of the BRASL flavor.2434// RETURN value: false, if no constant pool entry could be allocated, true otherwise.2435bool MacroAssembler::call_far_patchable(address target, int64_t tocOffset) {2436// Get current pc and ensure word alignment for end of instr sequence.2437const address start_pc = pc();2438const intptr_t start_off = offset();2439assert(!call_far_patchable_requires_alignment_nop(start_pc), "call_far_patchable requires aligned address");2440const ptrdiff_t dist = (ptrdiff_t)(target - (start_pc + 2)); // Prepend each BRASL with a nop.2441const bool emit_target_to_pool = (tocOffset == -2) && !code_section()->scratch_emit();2442const bool emit_relative_call = !emit_target_to_pool &&2443RelAddr::is_in_range_of_RelAddr32(dist) &&2444ReoptimizeCallSequences &&2445!code_section()->scratch_emit();24462447if (emit_relative_call) {2448// Add padding to get the same size as below.2449const unsigned int padding = call_far_patchable_size() - call_far_pcrelative_size();2450unsigned int current_padding;2451for (current_padding = 0; current_padding < padding; current_padding += nop_size()) { z_nop(); }2452assert(current_padding == padding, "sanity");24532454// relative call: len = 2(nop) + 6 (brasl)2455// CodeBlob resize cannot occur in this case because2456// this call is emitted into pre-existing space.2457z_nop(); // Prepend each BRASL with a nop.2458z_brasl(Z_R14, target);2459} else {2460// absolute call: Get address from TOC.2461// len = (load TOC){6|0} + (load from TOC){6} + (basr){2} = {14|8}2462if (emit_target_to_pool) {2463// When emitting the call for the first time, we do not need to use2464// the pc-relative version. It will be patched anyway, when the code2465// buffer is copied.2466// Relocation is not needed when !ReoptimizeCallSequences.2467relocInfo::relocType rt = ReoptimizeCallSequences ? relocInfo::runtime_call_w_cp_type : relocInfo::none;2468AddressLiteral dest(target, rt);2469// Store_oop_in_toc() adds dest to the constant table. As side effect, this kills2470// inst_mark(). Reset if possible.2471bool reset_mark = (inst_mark() == pc());2472tocOffset = store_oop_in_toc(dest);2473if (reset_mark) { set_inst_mark(); }2474if (tocOffset == -1) {2475return false; // Couldn't create constant pool entry.2476}2477}2478assert(offset() == start_off, "emit no code before this point!");24792480address tocPos = pc() + tocOffset;2481if (emit_target_to_pool) {2482tocPos = code()->consts()->start() + tocOffset;2483}2484load_long_pcrelative(Z_R14, tocPos);2485z_basr(Z_R14, Z_R14);2486}24872488#ifdef ASSERT2489// Assert that we can identify the emitted call.2490assert(is_call_far_patchable_at(addr_at(start_off)), "can't identify emitted call");2491assert(offset() == start_off+call_far_patchable_size(), "wrong size");24922493if (emit_target_to_pool) {2494assert(get_dest_of_call_far_patchable_at(addr_at(start_off), code()->consts()->start()) == target,2495"wrong encoding of dest address");2496}2497#endif2498return true; // success2499}25002501// Identify a call_far_patchable instruction.2502// For more detailed information see header comment of call_far_patchable.2503bool MacroAssembler::is_call_far_patchable_at(address instruction_addr) {2504return is_call_far_patchable_variant2_at(instruction_addr) || // short version: BRASL2505is_call_far_patchable_variant0_at(instruction_addr); // long version LARL + LG + BASR2506}25072508// Does the call_far_patchable instruction use a pc-relative encoding2509// of the call destination?2510bool MacroAssembler::is_call_far_patchable_pcrelative_at(address instruction_addr) {2511// Variant 2 is pc-relative.2512return is_call_far_patchable_variant2_at(instruction_addr);2513}25142515bool MacroAssembler::is_call_far_pcrelative(address instruction_addr) {2516// Prepend each BRASL with a nop.2517return is_z_nop(instruction_addr) && is_z_brasl(instruction_addr + nop_size()); // Match at position after one nop required.2518}25192520// Set destination address of a call_far_patchable instruction.2521void MacroAssembler::set_dest_of_call_far_patchable_at(address instruction_addr, address dest, int64_t tocOffset) {2522ResourceMark rm;25232524// Now that CP entry is verified, patch call to a pc-relative call (if circumstances permit).2525int code_size = MacroAssembler::call_far_patchable_size();2526CodeBuffer buf(instruction_addr, code_size);2527MacroAssembler masm(&buf);2528masm.call_far_patchable(dest, tocOffset);2529ICache::invalidate_range(instruction_addr, code_size); // Empty on z.2530}25312532// Get dest address of a call_far_patchable instruction.2533address MacroAssembler::get_dest_of_call_far_patchable_at(address instruction_addr, address ctable) {2534// Dynamic TOC: absolute address in constant pool.2535// Check variant2 first, it is more frequent.25362537// Relative address encoded in call instruction.2538if (is_call_far_patchable_variant2_at(instruction_addr)) {2539return MacroAssembler::get_target_addr_pcrel(instruction_addr + nop_size()); // Prepend each BRASL with a nop.25402541// Absolute address in constant pool.2542} else if (is_call_far_patchable_variant0_at(instruction_addr)) {2543address iaddr = instruction_addr;25442545long tocOffset = get_load_const_from_toc_offset(iaddr);2546address tocLoc = iaddr + tocOffset;2547return *(address *)(tocLoc);2548} else {2549fprintf(stderr, "MacroAssembler::get_dest_of_call_far_patchable_at has a problem at %p:\n", instruction_addr);2550fprintf(stderr, "not a call_far_patchable: %16.16lx %16.16lx, len = %d\n",2551*(unsigned long*)instruction_addr,2552*(unsigned long*)(instruction_addr+8),2553call_far_patchable_size());2554Disassembler::decode(instruction_addr, instruction_addr+call_far_patchable_size());2555ShouldNotReachHere();2556return NULL;2557}2558}25592560void MacroAssembler::align_call_far_patchable(address pc) {2561if (call_far_patchable_requires_alignment_nop(pc)) { z_nop(); }2562}25632564void MacroAssembler::check_and_handle_earlyret(Register java_thread) {2565}25662567void MacroAssembler::check_and_handle_popframe(Register java_thread) {2568}25692570// Read from the polling page.2571// Use TM or TMY instruction, depending on read offset.2572// offset = 0: Use TM, safepoint polling.2573// offset < 0: Use TMY, profiling safepoint polling.2574void MacroAssembler::load_from_polling_page(Register polling_page_address, int64_t offset) {2575if (Immediate::is_uimm12(offset)) {2576z_tm(offset, polling_page_address, mask_safepoint);2577} else {2578z_tmy(offset, polling_page_address, mask_profiling);2579}2580}25812582// Check whether z_instruction is a read access to the polling page2583// which was emitted by load_from_polling_page(..).2584bool MacroAssembler::is_load_from_polling_page(address instr_loc) {2585unsigned long z_instruction;2586unsigned int ilen = get_instruction(instr_loc, &z_instruction);25872588if (ilen == 2) { return false; } // It's none of the allowed instructions.25892590if (ilen == 4) {2591if (!is_z_tm(z_instruction)) { return false; } // It's len=4, but not a z_tm. fail.25922593int ms = inv_mask(z_instruction,8,32); // mask2594int ra = inv_reg(z_instruction,16,32); // base register2595int ds = inv_uimm12(z_instruction); // displacement25962597if (!(ds == 0 && ra != 0 && ms == mask_safepoint)) {2598return false; // It's not a z_tm(0, ra, mask_safepoint). Fail.2599}26002601} else { /* if (ilen == 6) */26022603assert(!is_z_lg(z_instruction), "old form (LG) polling page access. Please fix and use TM(Y).");26042605if (!is_z_tmy(z_instruction)) { return false; } // It's len=6, but not a z_tmy. fail.26062607int ms = inv_mask(z_instruction,8,48); // mask2608int ra = inv_reg(z_instruction,16,48); // base register2609int ds = inv_simm20(z_instruction); // displacement2610}26112612return true;2613}26142615// Extract poll address from instruction and ucontext.2616address MacroAssembler::get_poll_address(address instr_loc, void* ucontext) {2617assert(ucontext != NULL, "must have ucontext");2618ucontext_t* uc = (ucontext_t*) ucontext;2619unsigned long z_instruction;2620unsigned int ilen = get_instruction(instr_loc, &z_instruction);26212622if (ilen == 4 && is_z_tm(z_instruction)) {2623int ra = inv_reg(z_instruction, 16, 32); // base register2624int ds = inv_uimm12(z_instruction); // displacement2625address addr = (address)uc->uc_mcontext.gregs[ra];2626return addr + ds;2627} else if (ilen == 6 && is_z_tmy(z_instruction)) {2628int ra = inv_reg(z_instruction, 16, 48); // base register2629int ds = inv_simm20(z_instruction); // displacement2630address addr = (address)uc->uc_mcontext.gregs[ra];2631return addr + ds;2632}26332634ShouldNotReachHere();2635return NULL;2636}26372638// Extract poll register from instruction.2639uint MacroAssembler::get_poll_register(address instr_loc) {2640unsigned long z_instruction;2641unsigned int ilen = get_instruction(instr_loc, &z_instruction);26422643if (ilen == 4 && is_z_tm(z_instruction)) {2644return (uint)inv_reg(z_instruction, 16, 32); // base register2645} else if (ilen == 6 && is_z_tmy(z_instruction)) {2646return (uint)inv_reg(z_instruction, 16, 48); // base register2647}26482649ShouldNotReachHere();2650return 0;2651}26522653void MacroAssembler::safepoint_poll(Label& slow_path, Register temp_reg) {2654const Address poll_byte_addr(Z_thread, in_bytes(JavaThread::polling_word_offset()) + 7 /* Big Endian */);2655// Armed page has poll_bit set.2656z_tm(poll_byte_addr, SafepointMechanism::poll_bit());2657z_brnaz(slow_path);2658}26592660// Don't rely on register locking, always use Z_R1 as scratch register instead.2661void MacroAssembler::bang_stack_with_offset(int offset) {2662// Stack grows down, caller passes positive offset.2663assert(offset > 0, "must bang with positive offset");2664if (Displacement::is_validDisp(-offset)) {2665z_tmy(-offset, Z_SP, mask_stackbang);2666} else {2667add2reg(Z_R1, -offset, Z_SP); // Do not destroy Z_SP!!!2668z_tm(0, Z_R1, mask_stackbang); // Just banging.2669}2670}26712672void MacroAssembler::reserved_stack_check(Register return_pc) {2673// Test if reserved zone needs to be enabled.2674Label no_reserved_zone_enabling;2675assert(return_pc == Z_R14, "Return pc must be in R14 before z_br() to StackOverflow stub.");2676BLOCK_COMMENT("reserved_stack_check {");26772678z_clg(Z_SP, Address(Z_thread, JavaThread::reserved_stack_activation_offset()));2679z_brl(no_reserved_zone_enabling);26802681// Enable reserved zone again, throw stack overflow exception.2682save_return_pc();2683push_frame_abi160(0);2684call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), Z_thread);2685pop_frame();2686restore_return_pc();26872688load_const_optimized(Z_R1, StubRoutines::throw_delayed_StackOverflowError_entry());2689// Don't use call() or z_basr(), they will invalidate Z_R14 which contains the return pc.2690z_br(Z_R1);26912692should_not_reach_here();26932694bind(no_reserved_zone_enabling);2695BLOCK_COMMENT("} reserved_stack_check");2696}26972698// Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.2699void MacroAssembler::tlab_allocate(Register obj,2700Register var_size_in_bytes,2701int con_size_in_bytes,2702Register t1,2703Label& slow_case) {2704assert_different_registers(obj, var_size_in_bytes, t1);2705Register end = t1;2706Register thread = Z_thread;27072708z_lg(obj, Address(thread, JavaThread::tlab_top_offset()));2709if (var_size_in_bytes == noreg) {2710z_lay(end, Address(obj, con_size_in_bytes));2711} else {2712z_lay(end, Address(obj, var_size_in_bytes));2713}2714z_cg(end, Address(thread, JavaThread::tlab_end_offset()));2715branch_optimized(bcondHigh, slow_case);27162717// Update the tlab top pointer.2718z_stg(end, Address(thread, JavaThread::tlab_top_offset()));27192720// Recover var_size_in_bytes if necessary.2721if (var_size_in_bytes == end) {2722z_sgr(var_size_in_bytes, obj);2723}2724}27252726// Emitter for interface method lookup.2727// input: recv_klass, intf_klass, itable_index2728// output: method_result2729// kills: itable_index, temp1_reg, Z_R0, Z_R12730// TODO: Temp2_reg is unused. we may use this emitter also in the itable stubs.2731// If the register is still not needed then, remove it.2732void MacroAssembler::lookup_interface_method(Register recv_klass,2733Register intf_klass,2734RegisterOrConstant itable_index,2735Register method_result,2736Register temp1_reg,2737Label& no_such_interface,2738bool return_method) {27392740const Register vtable_len = temp1_reg; // Used to compute itable_entry_addr.2741const Register itable_entry_addr = Z_R1_scratch;2742const Register itable_interface = Z_R0_scratch;27432744BLOCK_COMMENT("lookup_interface_method {");27452746// Load start of itable entries into itable_entry_addr.2747z_llgf(vtable_len, Address(recv_klass, Klass::vtable_length_offset()));2748z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));27492750// Loop over all itable entries until desired interfaceOop(Rinterface) found.2751const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());27522753add2reg_with_index(itable_entry_addr,2754vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(),2755recv_klass, vtable_len);27562757const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize;2758Label search;27592760bind(search);27612762// Handle IncompatibleClassChangeError.2763// If the entry is NULL then we've reached the end of the table2764// without finding the expected interface, so throw an exception.2765load_and_test_long(itable_interface, Address(itable_entry_addr));2766z_bre(no_such_interface);27672768add2reg(itable_entry_addr, itable_offset_search_inc);2769z_cgr(itable_interface, intf_klass);2770z_brne(search);27712772// Entry found and itable_entry_addr points to it, get offset of vtable for interface.2773if (return_method) {2774const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() -2775itableOffsetEntry::interface_offset_in_bytes()) -2776itable_offset_search_inc;27772778// Compute itableMethodEntry and get method and entry point2779// we use addressing with index and displacement, since the formula2780// for computing the entry's offset has a fixed and a dynamic part,2781// the latter depending on the matched interface entry and on the case,2782// that the itable index has been passed as a register, not a constant value.2783int method_offset = itableMethodEntry::method_offset_in_bytes();2784// Fixed part (displacement), common operand.2785Register itable_offset = method_result; // Dynamic part (index register).27862787if (itable_index.is_register()) {2788// Compute the method's offset in that register, for the formula, see the2789// else-clause below.2790z_sllg(itable_offset, itable_index.as_register(), exact_log2(itableMethodEntry::size() * wordSize));2791z_agf(itable_offset, vtable_offset_offset, itable_entry_addr);2792} else {2793// Displacement increases.2794method_offset += itableMethodEntry::size() * wordSize * itable_index.as_constant();27952796// Load index from itable.2797z_llgf(itable_offset, vtable_offset_offset, itable_entry_addr);2798}27992800// Finally load the method's oop.2801z_lg(method_result, method_offset, itable_offset, recv_klass);2802}2803BLOCK_COMMENT("} lookup_interface_method");2804}28052806// Lookup for virtual method invocation.2807void MacroAssembler::lookup_virtual_method(Register recv_klass,2808RegisterOrConstant vtable_index,2809Register method_result) {2810assert_different_registers(recv_klass, vtable_index.register_or_noreg());2811assert(vtableEntry::size() * wordSize == wordSize,2812"else adjust the scaling in the code below");28132814BLOCK_COMMENT("lookup_virtual_method {");28152816const int base = in_bytes(Klass::vtable_start_offset());28172818if (vtable_index.is_constant()) {2819// Load with base + disp.2820Address vtable_entry_addr(recv_klass,2821vtable_index.as_constant() * wordSize +2822base +2823vtableEntry::method_offset_in_bytes());28242825z_lg(method_result, vtable_entry_addr);2826} else {2827// Shift index properly and load with base + index + disp.2828Register vindex = vtable_index.as_register();2829Address vtable_entry_addr(recv_klass, vindex,2830base + vtableEntry::method_offset_in_bytes());28312832z_sllg(vindex, vindex, exact_log2(wordSize));2833z_lg(method_result, vtable_entry_addr);2834}2835BLOCK_COMMENT("} lookup_virtual_method");2836}28372838// Factor out code to call ic_miss_handler.2839// Generate code to call the inline cache miss handler.2840//2841// In most cases, this code will be generated out-of-line.2842// The method parameters are intended to provide some variability.2843// ICM - Label which has to be bound to the start of useful code (past any traps).2844// trapMarker - Marking byte for the generated illtrap instructions (if any).2845// Any value except 0x00 is supported.2846// = 0x00 - do not generate illtrap instructions.2847// use nops to fill ununsed space.2848// requiredSize - required size of the generated code. If the actually2849// generated code is smaller, use padding instructions to fill up.2850// = 0 - no size requirement, no padding.2851// scratch - scratch register to hold branch target address.2852//2853// The method returns the code offset of the bound label.2854unsigned int MacroAssembler::call_ic_miss_handler(Label& ICM, int trapMarker, int requiredSize, Register scratch) {2855intptr_t startOffset = offset();28562857// Prevent entry at content_begin().2858if (trapMarker != 0) {2859z_illtrap(trapMarker);2860}28612862// Load address of inline cache miss code into scratch register2863// and branch to cache miss handler.2864BLOCK_COMMENT("IC miss handler {");2865BIND(ICM);2866unsigned int labelOffset = offset();2867AddressLiteral icmiss(SharedRuntime::get_ic_miss_stub());28682869load_const_optimized(scratch, icmiss);2870z_br(scratch);28712872// Fill unused space.2873if (requiredSize > 0) {2874while ((offset() - startOffset) < requiredSize) {2875if (trapMarker == 0) {2876z_nop();2877} else {2878z_illtrap(trapMarker);2879}2880}2881}2882BLOCK_COMMENT("} IC miss handler");2883return labelOffset;2884}28852886void MacroAssembler::nmethod_UEP(Label& ic_miss) {2887Register ic_reg = Z_inline_cache;2888int klass_offset = oopDesc::klass_offset_in_bytes();2889if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) {2890if (VM_Version::has_CompareBranch()) {2891z_cgij(Z_ARG1, 0, Assembler::bcondEqual, ic_miss);2892} else {2893z_ltgr(Z_ARG1, Z_ARG1);2894z_bre(ic_miss);2895}2896}2897// Compare cached class against klass from receiver.2898compare_klass_ptr(ic_reg, klass_offset, Z_ARG1, false);2899z_brne(ic_miss);2900}29012902void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,2903Register super_klass,2904Register temp1_reg,2905Label* L_success,2906Label* L_failure,2907Label* L_slow_path,2908RegisterOrConstant super_check_offset) {29092910const int sc_offset = in_bytes(Klass::secondary_super_cache_offset());2911const int sco_offset = in_bytes(Klass::super_check_offset_offset());29122913bool must_load_sco = (super_check_offset.constant_or_zero() == -1);2914bool need_slow_path = (must_load_sco ||2915super_check_offset.constant_or_zero() == sc_offset);29162917// Input registers must not overlap.2918assert_different_registers(sub_klass, super_klass, temp1_reg);2919if (super_check_offset.is_register()) {2920assert_different_registers(sub_klass, super_klass,2921super_check_offset.as_register());2922} else if (must_load_sco) {2923assert(temp1_reg != noreg, "supply either a temp or a register offset");2924}29252926const Register Rsuper_check_offset = temp1_reg;29272928NearLabel L_fallthrough;2929int label_nulls = 0;2930if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }2931if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }2932if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }2933assert(label_nulls <= 1 ||2934(L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),2935"at most one NULL in the batch, usually");29362937BLOCK_COMMENT("check_klass_subtype_fast_path {");2938// If the pointers are equal, we are done (e.g., String[] elements).2939// This self-check enables sharing of secondary supertype arrays among2940// non-primary types such as array-of-interface. Otherwise, each such2941// type would need its own customized SSA.2942// We move this check to the front of the fast path because many2943// type checks are in fact trivially successful in this manner,2944// so we get a nicely predicted branch right at the start of the check.2945compare64_and_branch(sub_klass, super_klass, bcondEqual, *L_success);29462947// Check the supertype display, which is uint.2948if (must_load_sco) {2949z_llgf(Rsuper_check_offset, sco_offset, super_klass);2950super_check_offset = RegisterOrConstant(Rsuper_check_offset);2951}2952Address super_check_addr(sub_klass, super_check_offset, 0);2953z_cg(super_klass, super_check_addr); // compare w/ displayed supertype29542955// This check has worked decisively for primary supers.2956// Secondary supers are sought in the super_cache ('super_cache_addr').2957// (Secondary supers are interfaces and very deeply nested subtypes.)2958// This works in the same check above because of a tricky aliasing2959// between the super_cache and the primary super display elements.2960// (The 'super_check_addr' can address either, as the case requires.)2961// Note that the cache is updated below if it does not help us find2962// what we need immediately.2963// So if it was a primary super, we can just fail immediately.2964// Otherwise, it's the slow path for us (no success at this point).29652966// Hacked jmp, which may only be used just before L_fallthrough.2967#define final_jmp(label) \2968if (&(label) == &L_fallthrough) { /*do nothing*/ } \2969else { branch_optimized(Assembler::bcondAlways, label); } /*omit semicolon*/29702971if (super_check_offset.is_register()) {2972branch_optimized(Assembler::bcondEqual, *L_success);2973z_cfi(super_check_offset.as_register(), sc_offset);2974if (L_failure == &L_fallthrough) {2975branch_optimized(Assembler::bcondEqual, *L_slow_path);2976} else {2977branch_optimized(Assembler::bcondNotEqual, *L_failure);2978final_jmp(*L_slow_path);2979}2980} else if (super_check_offset.as_constant() == sc_offset) {2981// Need a slow path; fast failure is impossible.2982if (L_slow_path == &L_fallthrough) {2983branch_optimized(Assembler::bcondEqual, *L_success);2984} else {2985branch_optimized(Assembler::bcondNotEqual, *L_slow_path);2986final_jmp(*L_success);2987}2988} else {2989// No slow path; it's a fast decision.2990if (L_failure == &L_fallthrough) {2991branch_optimized(Assembler::bcondEqual, *L_success);2992} else {2993branch_optimized(Assembler::bcondNotEqual, *L_failure);2994final_jmp(*L_success);2995}2996}29972998bind(L_fallthrough);2999#undef local_brc3000#undef final_jmp3001BLOCK_COMMENT("} check_klass_subtype_fast_path");3002// fallthru (to slow path)3003}30043005void MacroAssembler::check_klass_subtype_slow_path(Register Rsubklass,3006Register Rsuperklass,3007Register Rarray_ptr, // tmp3008Register Rlength, // tmp3009Label* L_success,3010Label* L_failure) {3011// Input registers must not overlap.3012// Also check for R1 which is explicitely used here.3013assert_different_registers(Z_R1, Rsubklass, Rsuperklass, Rarray_ptr, Rlength);3014NearLabel L_fallthrough;3015int label_nulls = 0;3016if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }3017if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }3018assert(label_nulls <= 1, "at most one NULL in the batch");30193020const int ss_offset = in_bytes(Klass::secondary_supers_offset());3021const int sc_offset = in_bytes(Klass::secondary_super_cache_offset());30223023const int length_offset = Array<Klass*>::length_offset_in_bytes();3024const int base_offset = Array<Klass*>::base_offset_in_bytes();30253026// Hacked jmp, which may only be used just before L_fallthrough.3027#define final_jmp(label) \3028if (&(label) == &L_fallthrough) { /*do nothing*/ } \3029else branch_optimized(Assembler::bcondAlways, label) /*omit semicolon*/30303031NearLabel loop_iterate, loop_count, match;30323033BLOCK_COMMENT("check_klass_subtype_slow_path {");3034z_lg(Rarray_ptr, ss_offset, Rsubklass);30353036load_and_test_int(Rlength, Address(Rarray_ptr, length_offset));3037branch_optimized(Assembler::bcondZero, *L_failure);30383039// Oops in table are NO MORE compressed.3040z_cg(Rsuperklass, base_offset, Rarray_ptr); // Check array element for match.3041z_bre(match); // Shortcut for array length = 1.30423043// No match yet, so we must walk the array's elements.3044z_lngfr(Rlength, Rlength);3045z_sllg(Rlength, Rlength, LogBytesPerWord); // -#bytes of cache array3046z_llill(Z_R1, BytesPerWord); // Set increment/end index.3047add2reg(Rlength, 2 * BytesPerWord); // start index = -(n-2)*BytesPerWord3048z_slgr(Rarray_ptr, Rlength); // start addr: += (n-2)*BytesPerWord3049z_bru(loop_count);30503051BIND(loop_iterate);3052z_cg(Rsuperklass, base_offset, Rlength, Rarray_ptr); // Check array element for match.3053z_bre(match);3054BIND(loop_count);3055z_brxlg(Rlength, Z_R1, loop_iterate);30563057// Rsuperklass not found among secondary super classes -> failure.3058branch_optimized(Assembler::bcondAlways, *L_failure);30593060// Got a hit. Return success (zero result). Set cache.3061// Cache load doesn't happen here. For speed it is directly emitted by the compiler.30623063BIND(match);30643065z_stg(Rsuperklass, sc_offset, Rsubklass); // Save result to cache.30663067final_jmp(*L_success);30683069// Exit to the surrounding code.3070BIND(L_fallthrough);3071#undef local_brc3072#undef final_jmp3073BLOCK_COMMENT("} check_klass_subtype_slow_path");3074}30753076// Emitter for combining fast and slow path.3077void MacroAssembler::check_klass_subtype(Register sub_klass,3078Register super_klass,3079Register temp1_reg,3080Register temp2_reg,3081Label& L_success) {3082NearLabel failure;3083BLOCK_COMMENT(err_msg("check_klass_subtype(%s subclass of %s) {", sub_klass->name(), super_klass->name()));3084check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg,3085&L_success, &failure, NULL);3086check_klass_subtype_slow_path(sub_klass, super_klass,3087temp1_reg, temp2_reg, &L_success, NULL);3088BIND(failure);3089BLOCK_COMMENT("} check_klass_subtype");3090}30913092void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) {3093assert(L_fast_path != NULL || L_slow_path != NULL, "at least one is required");30943095Label L_fallthrough;3096if (L_fast_path == NULL) {3097L_fast_path = &L_fallthrough;3098} else if (L_slow_path == NULL) {3099L_slow_path = &L_fallthrough;3100}31013102// Fast path check: class is fully initialized3103z_cli(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);3104z_bre(*L_fast_path);31053106// Fast path check: current thread is initializer thread3107z_cg(thread, Address(klass, InstanceKlass::init_thread_offset()));3108if (L_slow_path == &L_fallthrough) {3109z_bre(*L_fast_path);3110} else if (L_fast_path == &L_fallthrough) {3111z_brne(*L_slow_path);3112} else {3113Unimplemented();3114}31153116bind(L_fallthrough);3117}31183119// Increment a counter at counter_address when the eq condition code is3120// set. Kills registers tmp1_reg and tmp2_reg and preserves the condition code.3121void MacroAssembler::increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg) {3122Label l;3123z_brne(l);3124load_const(tmp1_reg, counter_address);3125add2mem_32(Address(tmp1_reg), 1, tmp2_reg);3126z_cr(tmp1_reg, tmp1_reg); // Set cc to eq.3127bind(l);3128}31293130// Semantics are dependent on the slow_case label:3131// If the slow_case label is not NULL, failure to biased-lock the object3132// transfers control to the location of the slow_case label. If the3133// object could be biased-locked, control is transferred to the done label.3134// The condition code is unpredictable.3135//3136// If the slow_case label is NULL, failure to biased-lock the object results3137// in a transfer of control to the done label with a condition code of not_equal.3138// If the biased-lock could be successfully obtained, control is transfered to3139// the done label with a condition code of equal.3140// It is mandatory to react on the condition code At the done label.3141//3142void MacroAssembler::biased_locking_enter(Register obj_reg,3143Register mark_reg,3144Register temp_reg,3145Register temp2_reg, // May be Z_RO!3146Label &done,3147Label *slow_case) {3148assert(UseBiasedLocking, "why call this otherwise?");3149assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg);31503151Label cas_label; // Try, if implemented, CAS locking. Fall thru to slow path otherwise.31523153BLOCK_COMMENT("biased_locking_enter {");31543155// Biased locking3156// See whether the lock is currently biased toward our thread and3157// whether the epoch is still valid.3158// Note that the runtime guarantees sufficient alignment of JavaThread3159// pointers to allow age to be placed into low bits.3160assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits,3161"biased locking makes assumptions about bit layout");3162z_lr(temp_reg, mark_reg);3163z_nilf(temp_reg, markWord::biased_lock_mask_in_place);3164z_chi(temp_reg, markWord::biased_lock_pattern);3165z_brne(cas_label); // Try cas if object is not biased, i.e. cannot be biased locked.31663167load_prototype_header(temp_reg, obj_reg);3168load_const_optimized(temp2_reg, ~((int) markWord::age_mask_in_place));31693170z_ogr(temp_reg, Z_thread);3171z_xgr(temp_reg, mark_reg);3172z_ngr(temp_reg, temp2_reg);3173if (PrintBiasedLockingStatistics) {3174increment_counter_eq((address) BiasedLocking::biased_lock_entry_count_addr(), mark_reg, temp2_reg);3175// Restore mark_reg.3176z_lg(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);3177}3178branch_optimized(Assembler::bcondEqual, done); // Biased lock obtained, return success.31793180Label try_revoke_bias;3181Label try_rebias;3182Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes());31833184//----------------------------------------------------------------------------3185// At this point we know that the header has the bias pattern and3186// that we are not the bias owner in the current epoch. We need to3187// figure out more details about the state of the header in order to3188// know what operations can be legally performed on the object's3189// header.31903191// If the low three bits in the xor result aren't clear, that means3192// the prototype header is no longer biased and we have to revoke3193// the bias on this object.3194z_tmll(temp_reg, markWord::biased_lock_mask_in_place);3195z_brnaz(try_revoke_bias);31963197// Biasing is still enabled for this data type. See whether the3198// epoch of the current bias is still valid, meaning that the epoch3199// bits of the mark word are equal to the epoch bits of the3200// prototype header. (Note that the prototype header's epoch bits3201// only change at a safepoint.) If not, attempt to rebias the object3202// toward the current thread. Note that we must be absolutely sure3203// that the current epoch is invalid in order to do this because3204// otherwise the manipulations it performs on the mark word are3205// illegal.3206z_tmll(temp_reg, markWord::epoch_mask_in_place);3207z_brnaz(try_rebias);32083209//----------------------------------------------------------------------------3210// The epoch of the current bias is still valid but we know nothing3211// about the owner; it might be set or it might be clear. Try to3212// acquire the bias of the object using an atomic operation. If this3213// fails we will go in to the runtime to revoke the object's bias.3214// Note that we first construct the presumed unbiased header so we3215// don't accidentally blow away another thread's valid bias.3216z_nilf(mark_reg, markWord::biased_lock_mask_in_place | markWord::age_mask_in_place |3217markWord::epoch_mask_in_place);3218z_lgr(temp_reg, Z_thread);3219z_llgfr(mark_reg, mark_reg);3220z_ogr(temp_reg, mark_reg);32213222assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");32233224z_csg(mark_reg, temp_reg, 0, obj_reg);32253226// If the biasing toward our thread failed, this means that3227// another thread succeeded in biasing it toward itself and we3228// need to revoke that bias. The revocation will occur in the3229// interpreter runtime in the slow case.32303231if (PrintBiasedLockingStatistics) {3232increment_counter_eq((address) BiasedLocking::anonymously_biased_lock_entry_count_addr(),3233temp_reg, temp2_reg);3234}3235if (slow_case != NULL) {3236branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way.3237}3238branch_optimized(Assembler::bcondAlways, done); // Biased lock status given in condition code.32393240//----------------------------------------------------------------------------3241bind(try_rebias);3242// At this point we know the epoch has expired, meaning that the3243// current "bias owner", if any, is actually invalid. Under these3244// circumstances _only_, we are allowed to use the current header's3245// value as the comparison value when doing the cas to acquire the3246// bias in the current epoch. In other words, we allow transfer of3247// the bias from one thread to another directly in this situation.32483249z_nilf(mark_reg, markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place);3250load_prototype_header(temp_reg, obj_reg);3251z_llgfr(mark_reg, mark_reg);32523253z_ogr(temp_reg, Z_thread);32543255assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");32563257z_csg(mark_reg, temp_reg, 0, obj_reg);32583259// If the biasing toward our thread failed, this means that3260// another thread succeeded in biasing it toward itself and we3261// need to revoke that bias. The revocation will occur in the3262// interpreter runtime in the slow case.32633264if (PrintBiasedLockingStatistics) {3265increment_counter_eq((address) BiasedLocking::rebiased_lock_entry_count_addr(), temp_reg, temp2_reg);3266}3267if (slow_case != NULL) {3268branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way.3269}3270z_bru(done); // Biased lock status given in condition code.32713272//----------------------------------------------------------------------------3273bind(try_revoke_bias);3274// The prototype mark in the klass doesn't have the bias bit set any3275// more, indicating that objects of this data type are not supposed3276// to be biased any more. We are going to try to reset the mark of3277// this object to the prototype value and fall through to the3278// CAS-based locking scheme. Note that if our CAS fails, it means3279// that another thread raced us for the privilege of revoking the3280// bias of this particular object, so it's okay to continue in the3281// normal locking code.3282load_prototype_header(temp_reg, obj_reg);32833284assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");32853286z_csg(mark_reg, temp_reg, 0, obj_reg);32873288// Fall through to the normal CAS-based lock, because no matter what3289// the result of the above CAS, some thread must have succeeded in3290// removing the bias bit from the object's header.3291if (PrintBiasedLockingStatistics) {3292// z_cgr(mark_reg, temp2_reg);3293increment_counter_eq((address) BiasedLocking::revoked_lock_entry_count_addr(), temp_reg, temp2_reg);3294}32953296bind(cas_label);3297BLOCK_COMMENT("} biased_locking_enter");3298}32993300void MacroAssembler::biased_locking_exit(Register mark_addr, Register temp_reg, Label& done) {3301// Check for biased locking unlock case, which is a no-op3302// Note: we do not have to check the thread ID for two reasons.3303// First, the interpreter checks for IllegalMonitorStateException at3304// a higher level. Second, if the bias was revoked while we held the3305// lock, the object could not be rebiased toward another thread, so3306// the bias bit would be clear.3307BLOCK_COMMENT("biased_locking_exit {");33083309z_lg(temp_reg, 0, mark_addr);3310z_nilf(temp_reg, markWord::biased_lock_mask_in_place);33113312z_chi(temp_reg, markWord::biased_lock_pattern);3313z_bre(done);3314BLOCK_COMMENT("} biased_locking_exit");3315}33163317void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) {3318Register displacedHeader = temp1;3319Register currentHeader = temp1;3320Register temp = temp2;3321NearLabel done, object_has_monitor;33223323BLOCK_COMMENT("compiler_fast_lock_object {");33243325// Load markWord from oop into mark.3326z_lg(displacedHeader, 0, oop);33273328if (DiagnoseSyncOnValueBasedClasses != 0) {3329load_klass(Z_R1_scratch, oop);3330z_l(Z_R1_scratch, Address(Z_R1_scratch, Klass::access_flags_offset()));3331assert((JVM_ACC_IS_VALUE_BASED_CLASS & 0xFFFF) == 0, "or change following instruction");3332z_nilh(Z_R1_scratch, JVM_ACC_IS_VALUE_BASED_CLASS >> 16);3333z_brne(done);3334}33353336if (try_bias) {3337biased_locking_enter(oop, displacedHeader, temp, Z_R0, done);3338}33393340// Handle existing monitor.3341// The object has an existing monitor iff (mark & monitor_value) != 0.3342guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word");3343z_lr(temp, displacedHeader);3344z_nill(temp, markWord::monitor_value);3345z_brne(object_has_monitor);33463347// Set mark to markWord | markWord::unlocked_value.3348z_oill(displacedHeader, markWord::unlocked_value);33493350// Load Compare Value application register.33513352// Initialize the box (must happen before we update the object mark).3353z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box);33543355// Memory Fence (in cmpxchgd)3356// Compare object markWord with mark and if equal exchange scratch1 with object markWord.33573358// If the compare-and-swap succeeded, then we found an unlocked object and we3359// have now locked it.3360z_csg(displacedHeader, box, 0, oop);3361assert(currentHeader==displacedHeader, "must be same register"); // Identified two registers from z/Architecture.3362z_bre(done);33633364// We did not see an unlocked object so try the fast recursive case.33653366z_sgr(currentHeader, Z_SP);3367load_const_optimized(temp, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));33683369z_ngr(currentHeader, temp);3370// z_brne(done);3371// z_release();3372z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box);33733374z_bru(done);33753376Register zero = temp;3377Register monitor_tagged = displacedHeader; // Tagged with markWord::monitor_value.3378bind(object_has_monitor);3379// The object's monitor m is unlocked iff m->owner == NULL,3380// otherwise m->owner may contain a thread or a stack address.3381//3382// Try to CAS m->owner from NULL to current thread.3383z_lghi(zero, 0);3384// If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ.3385z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged);3386// Store a non-null value into the box.3387z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box);3388#ifdef ASSERT3389z_brne(done);3390// We've acquired the monitor, check some invariants.3391// Invariant 1: _recursions should be 0.3392asm_assert_mem8_is_zero(OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), monitor_tagged,3393"monitor->_recursions should be 0", -1);3394z_ltgr(zero, zero); // Set CR=EQ.3395#endif3396bind(done);33973398BLOCK_COMMENT("} compiler_fast_lock_object");3399// If locking was successful, CR should indicate 'EQ'.3400// The compiler or the native wrapper generates a branch to the runtime call3401// _complete_monitor_locking_Java.3402}34033404void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) {3405Register displacedHeader = temp1;3406Register currentHeader = temp2;3407Register temp = temp1;3408Register monitor = temp2;34093410Label done, object_has_monitor;34113412BLOCK_COMMENT("compiler_fast_unlock_object {");34133414if (try_bias) {3415biased_locking_exit(oop, currentHeader, done);3416}34173418// Find the lock address and load the displaced header from the stack.3419// if the displaced header is zero, we have a recursive unlock.3420load_and_test_long(displacedHeader, Address(box, BasicLock::displaced_header_offset_in_bytes()));3421z_bre(done);34223423// Handle existing monitor.3424// The object has an existing monitor iff (mark & monitor_value) != 0.3425z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);3426guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word");3427z_nill(currentHeader, markWord::monitor_value);3428z_brne(object_has_monitor);34293430// Check if it is still a light weight lock, this is true if we see3431// the stack address of the basicLock in the markWord of the object3432// copy box to currentHeader such that csg does not kill it.3433z_lgr(currentHeader, box);3434z_csg(currentHeader, displacedHeader, 0, oop);3435z_bru(done); // Csg sets CR as desired.34363437// Handle existing monitor.3438bind(object_has_monitor);3439z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop); // CurrentHeader is tagged with monitor_value set.3440load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));3441z_brne(done);3442load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));3443z_brne(done);3444load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));3445z_brne(done);3446load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));3447z_brne(done);3448z_release();3449z_stg(temp/*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), currentHeader);34503451bind(done);34523453BLOCK_COMMENT("} compiler_fast_unlock_object");3454// flag == EQ indicates success3455// flag == NE indicates failure3456}34573458void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) {3459BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();3460bs->resolve_jobject(this, value, tmp1, tmp2);3461}34623463// Last_Java_sp must comply to the rules in frame_s390.hpp.3464void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation) {3465BLOCK_COMMENT("set_last_Java_frame {");34663467// Always set last_Java_pc and flags first because once last_Java_sp3468// is visible has_last_Java_frame is true and users will look at the3469// rest of the fields. (Note: flags should always be zero before we3470// get here so doesn't need to be set.)34713472// Verify that last_Java_pc was zeroed on return to Java.3473if (allow_relocation) {3474asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()),3475Z_thread,3476"last_Java_pc not zeroed before leaving Java",34770x200);3478} else {3479asm_assert_mem8_is_zero_static(in_bytes(JavaThread::last_Java_pc_offset()),3480Z_thread,3481"last_Java_pc not zeroed before leaving Java",34820x200);3483}34843485// When returning from calling out from Java mode the frame anchor's3486// last_Java_pc will always be set to NULL. It is set here so that3487// if we are doing a call to native (not VM) that we capture the3488// known pc and don't have to rely on the native call having a3489// standard frame linkage where we can find the pc.3490if (last_Java_pc!=noreg) {3491z_stg(last_Java_pc, Address(Z_thread, JavaThread::last_Java_pc_offset()));3492}34933494// This membar release is not required on z/Architecture, since the sequence of stores3495// in maintained. Nevertheless, we leave it in to document the required ordering.3496// The implementation of z_release() should be empty.3497// z_release();34983499z_stg(last_Java_sp, Address(Z_thread, JavaThread::last_Java_sp_offset()));3500BLOCK_COMMENT("} set_last_Java_frame");3501}35023503void MacroAssembler::reset_last_Java_frame(bool allow_relocation) {3504BLOCK_COMMENT("reset_last_Java_frame {");35053506if (allow_relocation) {3507asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()),3508Z_thread,3509"SP was not set, still zero",35100x202);3511} else {3512asm_assert_mem8_isnot_zero_static(in_bytes(JavaThread::last_Java_sp_offset()),3513Z_thread,3514"SP was not set, still zero",35150x202);3516}35173518// _last_Java_sp = 03519// Clearing storage must be atomic here, so don't use clear_mem()!3520store_const(Address(Z_thread, JavaThread::last_Java_sp_offset()), 0);35213522// _last_Java_pc = 03523store_const(Address(Z_thread, JavaThread::last_Java_pc_offset()), 0);35243525BLOCK_COMMENT("} reset_last_Java_frame");3526return;3527}35283529void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, bool allow_relocation) {3530assert_different_registers(sp, tmp1);35313532// We cannot trust that code generated by the C++ compiler saves R143533// to z_abi_160.return_pc, because sometimes it spills R14 using stmg at3534// z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()).3535// Therefore we load the PC into tmp1 and let set_last_Java_frame() save3536// it into the frame anchor.3537get_PC(tmp1);3538set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1, allow_relocation);3539}35403541void MacroAssembler::set_thread_state(JavaThreadState new_state) {3542z_release();35433544assert(Immediate::is_uimm16(_thread_max_state), "enum value out of range for instruction");3545assert(sizeof(JavaThreadState) == sizeof(int), "enum value must have base type int");3546store_const(Address(Z_thread, JavaThread::thread_state_offset()), new_state, Z_R0, false);3547}35483549void MacroAssembler::get_vm_result(Register oop_result) {3550verify_thread();35513552z_lg(oop_result, Address(Z_thread, JavaThread::vm_result_offset()));3553clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(void*));35543555verify_oop(oop_result, FILE_AND_LINE);3556}35573558void MacroAssembler::get_vm_result_2(Register result) {3559verify_thread();35603561z_lg(result, Address(Z_thread, JavaThread::vm_result_2_offset()));3562clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(void*));3563}35643565// We require that C code which does not return a value in vm_result will3566// leave it undisturbed.3567void MacroAssembler::set_vm_result(Register oop_result) {3568z_stg(oop_result, Address(Z_thread, JavaThread::vm_result_offset()));3569}35703571// Explicit null checks (used for method handle code).3572void MacroAssembler::null_check(Register reg, Register tmp, int64_t offset) {3573if (!ImplicitNullChecks) {3574NearLabel ok;35753576compare64_and_branch(reg, (intptr_t) 0, Assembler::bcondNotEqual, ok);35773578// We just put the address into reg if it was 0 (tmp==Z_R0 is allowed so we can't use it for the address).3579address exception_entry = Interpreter::throw_NullPointerException_entry();3580load_absolute_address(reg, exception_entry);3581z_br(reg);35823583bind(ok);3584} else {3585if (needs_explicit_null_check((intptr_t)offset)) {3586// Provoke OS NULL exception if reg = NULL by3587// accessing M[reg] w/o changing any registers.3588z_lg(tmp, 0, reg);3589}3590// else3591// Nothing to do, (later) access of M[reg + offset]3592// will provoke OS NULL exception if reg = NULL.3593}3594}35953596//-------------------------------------3597// Compressed Klass Pointers3598//-------------------------------------35993600// Klass oop manipulations if compressed.3601void MacroAssembler::encode_klass_not_null(Register dst, Register src) {3602Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. (dst == src) also possible.3603address base = CompressedKlassPointers::base();3604int shift = CompressedKlassPointers::shift();3605bool need_zero_extend = base != 0;3606assert(UseCompressedClassPointers, "only for compressed klass ptrs");36073608BLOCK_COMMENT("cKlass encoder {");36093610#ifdef ASSERT3611Label ok;3612z_tmll(current, KlassAlignmentInBytes-1); // Check alignment.3613z_brc(Assembler::bcondAllZero, ok);3614// The plain disassembler does not recognize illtrap. It instead displays3615// a 32-bit value. Issueing two illtraps assures the disassembler finds3616// the proper beginning of the next instruction.3617z_illtrap(0xee);3618z_illtrap(0xee);3619bind(ok);3620#endif36213622// Scale down the incoming klass pointer first.3623// We then can be sure we calculate an offset that fits into 32 bit.3624// More generally speaking: all subsequent calculations are purely 32-bit.3625if (shift != 0) {3626assert (LogKlassAlignmentInBytes == shift, "decode alg wrong");3627z_srlg(dst, current, shift);3628current = dst;3629}36303631if (base != NULL) {3632// Use scaled-down base address parts to match scaled-down klass pointer.3633unsigned int base_h = ((unsigned long)base)>>(32+shift);3634unsigned int base_l = (unsigned int)(((unsigned long)base)>>shift);36353636// General considerations:3637// - when calculating (current_h - base_h), all digits must cancel (become 0).3638// Otherwise, we would end up with a compressed klass pointer which doesn't3639// fit into 32-bit.3640// - Only bit#33 of the difference could potentially be non-zero. For that3641// to happen, (current_l < base_l) must hold. In this case, the subtraction3642// will create a borrow out of bit#32, nicely killing bit#33.3643// - With the above, we only need to consider current_l and base_l to3644// calculate the result.3645// - Both values are treated as unsigned. The unsigned subtraction is3646// replaced by adding (unsigned) the 2's complement of the subtrahend.36473648if (base_l == 0) {3649// - By theory, the calculation to be performed here (current_h - base_h) MUST3650// cancel all high-word bits. Otherwise, we would end up with an offset3651// (i.e. compressed klass pointer) that does not fit into 32 bit.3652// - current_l remains unchanged.3653// - Therefore, we can replace all calculation with just a3654// zero-extending load 32 to 64 bit.3655// - Even that can be replaced with a conditional load if dst != current.3656// (this is a local view. The shift step may have requested zero-extension).3657} else {3658if ((base_h == 0) && is_uimm(base_l, 31)) {3659// If we happen to find that (base_h == 0), and that base_l is within the range3660// which can be represented by a signed int, then we can use 64bit signed add with3661// (-base_l) as 32bit signed immediate operand. The add will take care of the3662// upper 32 bits of the result, saving us the need of an extra zero extension.3663// For base_l to be in the required range, it must not have the most significant3664// bit (aka sign bit) set.3665lgr_if_needed(dst, current); // no zero/sign extension in this case!3666z_agfi(dst, -(int)base_l); // base_l must be passed as signed.3667need_zero_extend = false;3668current = dst;3669} else {3670// To begin with, we may need to copy and/or zero-extend the register operand.3671// We have to calculate (current_l - base_l). Because there is no unsigend3672// subtract instruction with immediate operand, we add the 2's complement of base_l.3673if (need_zero_extend) {3674z_llgfr(dst, current);3675need_zero_extend = false;3676} else {3677llgfr_if_needed(dst, current);3678}3679current = dst;3680z_alfi(dst, -base_l);3681}3682}3683}36843685if (need_zero_extend) {3686// We must zero-extend the calculated result. It may have some leftover bits in3687// the hi-word because we only did optimized calculations.3688z_llgfr(dst, current);3689} else {3690llgfr_if_needed(dst, current); // zero-extension while copying comes at no extra cost.3691}36923693BLOCK_COMMENT("} cKlass encoder");3694}36953696// This function calculates the size of the code generated by3697// decode_klass_not_null(register dst, Register src)3698// when (Universe::heap() != NULL). Hence, if the instructions3699// it generates change, then this method needs to be updated.3700int MacroAssembler::instr_size_for_decode_klass_not_null() {3701address base = CompressedKlassPointers::base();3702int shift_size = CompressedKlassPointers::shift() == 0 ? 0 : 6; /* sllg */3703int addbase_size = 0;3704assert(UseCompressedClassPointers, "only for compressed klass ptrs");37053706if (base != NULL) {3707unsigned int base_h = ((unsigned long)base)>>32;3708unsigned int base_l = (unsigned int)((unsigned long)base);3709if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {3710addbase_size += 6; /* aih */3711} else if ((base_h == 0) && (base_l != 0)) {3712addbase_size += 6; /* algfi */3713} else {3714addbase_size += load_const_size();3715addbase_size += 4; /* algr */3716}3717}3718#ifdef ASSERT3719addbase_size += 10;3720addbase_size += 2; // Extra sigill.3721#endif3722return addbase_size + shift_size;3723}37243725// !!! If the instructions that get generated here change3726// then function instr_size_for_decode_klass_not_null()3727// needs to get updated.3728// This variant of decode_klass_not_null() must generate predictable code!3729// The code must only depend on globally known parameters.3730void MacroAssembler::decode_klass_not_null(Register dst) {3731address base = CompressedKlassPointers::base();3732int shift = CompressedKlassPointers::shift();3733int beg_off = offset();3734assert(UseCompressedClassPointers, "only for compressed klass ptrs");37353736BLOCK_COMMENT("cKlass decoder (const size) {");37373738if (shift != 0) { // Shift required?3739z_sllg(dst, dst, shift);3740}3741if (base != NULL) {3742unsigned int base_h = ((unsigned long)base)>>32;3743unsigned int base_l = (unsigned int)((unsigned long)base);3744if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {3745z_aih(dst, base_h); // Base has no set bits in lower half.3746} else if ((base_h == 0) && (base_l != 0)) {3747z_algfi(dst, base_l); // Base has no set bits in upper half.3748} else {3749load_const(Z_R0, base); // Base has set bits everywhere.3750z_algr(dst, Z_R0);3751}3752}37533754#ifdef ASSERT3755Label ok;3756z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.3757z_brc(Assembler::bcondAllZero, ok);3758// The plain disassembler does not recognize illtrap. It instead displays3759// a 32-bit value. Issueing two illtraps assures the disassembler finds3760// the proper beginning of the next instruction.3761z_illtrap(0xd1);3762z_illtrap(0xd1);3763bind(ok);3764#endif3765assert(offset() == beg_off + instr_size_for_decode_klass_not_null(), "Code gen mismatch.");37663767BLOCK_COMMENT("} cKlass decoder (const size)");3768}37693770// This variant of decode_klass_not_null() is for cases where3771// 1) the size of the generated instructions may vary3772// 2) the result is (potentially) stored in a register different from the source.3773void MacroAssembler::decode_klass_not_null(Register dst, Register src) {3774address base = CompressedKlassPointers::base();3775int shift = CompressedKlassPointers::shift();3776assert(UseCompressedClassPointers, "only for compressed klass ptrs");37773778BLOCK_COMMENT("cKlass decoder {");37793780if (src == noreg) src = dst;37813782if (shift != 0) { // Shift or at least move required?3783z_sllg(dst, src, shift);3784} else {3785lgr_if_needed(dst, src);3786}37873788if (base != NULL) {3789unsigned int base_h = ((unsigned long)base)>>32;3790unsigned int base_l = (unsigned int)((unsigned long)base);3791if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {3792z_aih(dst, base_h); // Base has not set bits in lower half.3793} else if ((base_h == 0) && (base_l != 0)) {3794z_algfi(dst, base_l); // Base has no set bits in upper half.3795} else {3796load_const_optimized(Z_R0, base); // Base has set bits everywhere.3797z_algr(dst, Z_R0);3798}3799}38003801#ifdef ASSERT3802Label ok;3803z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.3804z_brc(Assembler::bcondAllZero, ok);3805// The plain disassembler does not recognize illtrap. It instead displays3806// a 32-bit value. Issueing two illtraps assures the disassembler finds3807// the proper beginning of the next instruction.3808z_illtrap(0xd2);3809z_illtrap(0xd2);3810bind(ok);3811#endif3812BLOCK_COMMENT("} cKlass decoder");3813}38143815void MacroAssembler::load_klass(Register klass, Address mem) {3816if (UseCompressedClassPointers) {3817z_llgf(klass, mem);3818// Attention: no null check here!3819decode_klass_not_null(klass);3820} else {3821z_lg(klass, mem);3822}3823}38243825void MacroAssembler::load_klass(Register klass, Register src_oop) {3826if (UseCompressedClassPointers) {3827z_llgf(klass, oopDesc::klass_offset_in_bytes(), src_oop);3828// Attention: no null check here!3829decode_klass_not_null(klass);3830} else {3831z_lg(klass, oopDesc::klass_offset_in_bytes(), src_oop);3832}3833}38343835void MacroAssembler::load_prototype_header(Register Rheader, Register Rsrc_oop) {3836assert_different_registers(Rheader, Rsrc_oop);3837load_klass(Rheader, Rsrc_oop);3838z_lg(Rheader, Address(Rheader, Klass::prototype_header_offset()));3839}38403841void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck) {3842if (UseCompressedClassPointers) {3843assert_different_registers(dst_oop, klass, Z_R0);3844if (ck == noreg) ck = klass;3845encode_klass_not_null(ck, klass);3846z_st(ck, Address(dst_oop, oopDesc::klass_offset_in_bytes()));3847} else {3848z_stg(klass, Address(dst_oop, oopDesc::klass_offset_in_bytes()));3849}3850}38513852void MacroAssembler::store_klass_gap(Register s, Register d) {3853if (UseCompressedClassPointers) {3854assert(s != d, "not enough registers");3855// Support s = noreg.3856if (s != noreg) {3857z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes()));3858} else {3859z_mvhi(Address(d, oopDesc::klass_gap_offset_in_bytes()), 0);3860}3861}3862}38633864// Compare klass ptr in memory against klass ptr in register.3865//3866// Rop1 - klass in register, always uncompressed.3867// disp - Offset of klass in memory, compressed/uncompressed, depending on runtime flag.3868// Rbase - Base address of cKlass in memory.3869// maybeNULL - True if Rop1 possibly is a NULL.3870void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL) {38713872BLOCK_COMMENT("compare klass ptr {");38733874if (UseCompressedClassPointers) {3875const int shift = CompressedKlassPointers::shift();3876address base = CompressedKlassPointers::base();38773878assert((shift == 0) || (shift == LogKlassAlignmentInBytes), "cKlass encoder detected bad shift");3879assert_different_registers(Rop1, Z_R0);3880assert_different_registers(Rop1, Rbase, Z_R1);38813882// First encode register oop and then compare with cOop in memory.3883// This sequence saves an unnecessary cOop load and decode.3884if (base == NULL) {3885if (shift == 0) {3886z_cl(Rop1, disp, Rbase); // Unscaled3887} else {3888z_srlg(Z_R0, Rop1, shift); // ZeroBased3889z_cl(Z_R0, disp, Rbase);3890}3891} else { // HeapBased3892#ifdef ASSERT3893bool used_R0 = true;3894bool used_R1 = true;3895#endif3896Register current = Rop1;3897Label done;38983899if (maybeNULL) { // NULL ptr must be preserved!3900z_ltgr(Z_R0, current);3901z_bre(done);3902current = Z_R0;3903}39043905unsigned int base_h = ((unsigned long)base)>>32;3906unsigned int base_l = (unsigned int)((unsigned long)base);3907if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {3908lgr_if_needed(Z_R0, current);3909z_aih(Z_R0, -((int)base_h)); // Base has no set bits in lower half.3910} else if ((base_h == 0) && (base_l != 0)) {3911lgr_if_needed(Z_R0, current);3912z_agfi(Z_R0, -(int)base_l);3913} else {3914int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));3915add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); // Subtract base by adding complement.3916}39173918if (shift != 0) {3919z_srlg(Z_R0, Z_R0, shift);3920}3921bind(done);3922z_cl(Z_R0, disp, Rbase);3923#ifdef ASSERT3924if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);3925if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);3926#endif3927}3928} else {3929z_clg(Rop1, disp, Z_R0, Rbase);3930}3931BLOCK_COMMENT("} compare klass ptr");3932}39333934//---------------------------3935// Compressed oops3936//---------------------------39373938void MacroAssembler::encode_heap_oop(Register oop) {3939oop_encoder(oop, oop, true /*maybe null*/);3940}39413942void MacroAssembler::encode_heap_oop_not_null(Register oop) {3943oop_encoder(oop, oop, false /*not null*/);3944}39453946// Called with something derived from the oop base. e.g. oop_base>>3.3947int MacroAssembler::get_oop_base_pow2_offset(uint64_t oop_base) {3948unsigned int oop_base_ll = ((unsigned int)(oop_base >> 0)) & 0xffff;3949unsigned int oop_base_lh = ((unsigned int)(oop_base >> 16)) & 0xffff;3950unsigned int oop_base_hl = ((unsigned int)(oop_base >> 32)) & 0xffff;3951unsigned int oop_base_hh = ((unsigned int)(oop_base >> 48)) & 0xffff;3952unsigned int n_notzero_parts = (oop_base_ll == 0 ? 0:1)3953+ (oop_base_lh == 0 ? 0:1)3954+ (oop_base_hl == 0 ? 0:1)3955+ (oop_base_hh == 0 ? 0:1);39563957assert(oop_base != 0, "This is for HeapBased cOops only");39583959if (n_notzero_parts != 1) { // Check if oop_base is just a few pages shy of a power of 2.3960uint64_t pow2_offset = 0x10000 - oop_base_ll;3961if (pow2_offset < 0x8000) { // This might not be necessary.3962uint64_t oop_base2 = oop_base + pow2_offset;39633964oop_base_ll = ((unsigned int)(oop_base2 >> 0)) & 0xffff;3965oop_base_lh = ((unsigned int)(oop_base2 >> 16)) & 0xffff;3966oop_base_hl = ((unsigned int)(oop_base2 >> 32)) & 0xffff;3967oop_base_hh = ((unsigned int)(oop_base2 >> 48)) & 0xffff;3968n_notzero_parts = (oop_base_ll == 0 ? 0:1) +3969(oop_base_lh == 0 ? 0:1) +3970(oop_base_hl == 0 ? 0:1) +3971(oop_base_hh == 0 ? 0:1);3972if (n_notzero_parts == 1) {3973assert(-(int64_t)pow2_offset != (int64_t)-1, "We use -1 to signal uninitialized base register");3974return -pow2_offset;3975}3976}3977}3978return 0;3979}39803981// If base address is offset from a straight power of two by just a few pages,3982// return this offset to the caller for a possible later composite add.3983// TODO/FIX: will only work correctly for 4k pages.3984int MacroAssembler::get_oop_base(Register Rbase, uint64_t oop_base) {3985int pow2_offset = get_oop_base_pow2_offset(oop_base);39863987load_const_optimized(Rbase, oop_base - pow2_offset); // Best job possible.39883989return pow2_offset;3990}39913992int MacroAssembler::get_oop_base_complement(Register Rbase, uint64_t oop_base) {3993int offset = get_oop_base(Rbase, oop_base);3994z_lcgr(Rbase, Rbase);3995return -offset;3996}39973998// Compare compressed oop in memory against oop in register.3999// Rop1 - Oop in register.4000// disp - Offset of cOop in memory.4001// Rbase - Base address of cOop in memory.4002// maybeNULL - True if Rop1 possibly is a NULL.4003// maybeNULLtarget - Branch target for Rop1 == NULL, if flow control shall NOT continue with compare instruction.4004void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL) {4005Register Rbase = mem.baseOrR0();4006Register Rindex = mem.indexOrR0();4007int64_t disp = mem.disp();40084009const int shift = CompressedOops::shift();4010address base = CompressedOops::base();40114012assert(UseCompressedOops, "must be on to call this method");4013assert(Universe::heap() != NULL, "java heap must be initialized to call this method");4014assert((shift == 0) || (shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");4015assert_different_registers(Rop1, Z_R0);4016assert_different_registers(Rop1, Rbase, Z_R1);4017assert_different_registers(Rop1, Rindex, Z_R1);40184019BLOCK_COMMENT("compare heap oop {");40204021// First encode register oop and then compare with cOop in memory.4022// This sequence saves an unnecessary cOop load and decode.4023if (base == NULL) {4024if (shift == 0) {4025z_cl(Rop1, disp, Rindex, Rbase); // Unscaled4026} else {4027z_srlg(Z_R0, Rop1, shift); // ZeroBased4028z_cl(Z_R0, disp, Rindex, Rbase);4029}4030} else { // HeapBased4031#ifdef ASSERT4032bool used_R0 = true;4033bool used_R1 = true;4034#endif4035Label done;4036int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));40374038if (maybeNULL) { // NULL ptr must be preserved!4039z_ltgr(Z_R0, Rop1);4040z_bre(done);4041}40424043add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1);4044z_srlg(Z_R0, Z_R0, shift);40454046bind(done);4047z_cl(Z_R0, disp, Rindex, Rbase);4048#ifdef ASSERT4049if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);4050if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);4051#endif4052}4053BLOCK_COMMENT("} compare heap oop");4054}40554056void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,4057const Address& addr, Register val,4058Register tmp1, Register tmp2, Register tmp3) {4059assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL |4060ON_UNKNOWN_OOP_REF)) == 0, "unsupported decorator");4061BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();4062decorators = AccessInternal::decorator_fixup(decorators);4063bool as_raw = (decorators & AS_RAW) != 0;4064if (as_raw) {4065bs->BarrierSetAssembler::store_at(this, decorators, type,4066addr, val,4067tmp1, tmp2, tmp3);4068} else {4069bs->store_at(this, decorators, type,4070addr, val,4071tmp1, tmp2, tmp3);4072}4073}40744075void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,4076const Address& addr, Register dst,4077Register tmp1, Register tmp2, Label *is_null) {4078assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL |4079ON_PHANTOM_OOP_REF | ON_WEAK_OOP_REF)) == 0, "unsupported decorator");4080BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();4081decorators = AccessInternal::decorator_fixup(decorators);4082bool as_raw = (decorators & AS_RAW) != 0;4083if (as_raw) {4084bs->BarrierSetAssembler::load_at(this, decorators, type,4085addr, dst,4086tmp1, tmp2, is_null);4087} else {4088bs->load_at(this, decorators, type,4089addr, dst,4090tmp1, tmp2, is_null);4091}4092}40934094void MacroAssembler::load_heap_oop(Register dest, const Address &a,4095Register tmp1, Register tmp2,4096DecoratorSet decorators, Label *is_null) {4097access_load_at(T_OBJECT, IN_HEAP | decorators, a, dest, tmp1, tmp2, is_null);4098}40994100void MacroAssembler::store_heap_oop(Register Roop, const Address &a,4101Register tmp1, Register tmp2, Register tmp3,4102DecoratorSet decorators) {4103access_store_at(T_OBJECT, IN_HEAP | decorators, a, Roop, tmp1, tmp2, tmp3);4104}41054106//-------------------------------------------------4107// Encode compressed oop. Generally usable encoder.4108//-------------------------------------------------4109// Rsrc - contains regular oop on entry. It remains unchanged.4110// Rdst - contains compressed oop on exit.4111// Rdst and Rsrc may indicate same register, in which case Rsrc does not remain unchanged.4112//4113// Rdst must not indicate scratch register Z_R1 (Z_R1_scratch) for functionality.4114// Rdst should not indicate scratch register Z_R0 (Z_R0_scratch) for performance.4115//4116// only32bitValid is set, if later code only uses the lower 32 bits. In this4117// case we must not fix the upper 32 bits.4118void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL,4119Register Rbase, int pow2_offset, bool only32bitValid) {41204121const address oop_base = CompressedOops::base();4122const int oop_shift = CompressedOops::shift();4123const bool disjoint = CompressedOops::base_disjoint();41244125assert(UseCompressedOops, "must be on to call this method");4126assert(Universe::heap() != NULL, "java heap must be initialized to call this encoder");4127assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");41284129if (disjoint || (oop_base == NULL)) {4130BLOCK_COMMENT("cOop encoder zeroBase {");4131if (oop_shift == 0) {4132if (oop_base != NULL && !only32bitValid) {4133z_llgfr(Rdst, Rsrc); // Clear upper bits in case the register will be decoded again.4134} else {4135lgr_if_needed(Rdst, Rsrc);4136}4137} else {4138z_srlg(Rdst, Rsrc, oop_shift);4139if (oop_base != NULL && !only32bitValid) {4140z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.4141}4142}4143BLOCK_COMMENT("} cOop encoder zeroBase");4144return;4145}41464147bool used_R0 = false;4148bool used_R1 = false;41494150BLOCK_COMMENT("cOop encoder general {");4151assert_different_registers(Rdst, Z_R1);4152assert_different_registers(Rsrc, Rbase);4153if (maybeNULL) {4154Label done;4155// We reorder shifting and subtracting, so that we can compare4156// and shift in parallel:4157//4158// cycle 0: potential LoadN, base = <const>4159// cycle 1: base = !base dst = src >> 3, cmp cr = (src != 0)4160// cycle 2: if (cr) br, dst = dst + base + offset41614162// Get oop_base components.4163if (pow2_offset == -1) {4164if (Rdst == Rbase) {4165if (Rdst == Z_R1 || Rsrc == Z_R1) {4166Rbase = Z_R0;4167used_R0 = true;4168} else {4169Rdst = Z_R1;4170used_R1 = true;4171}4172}4173if (Rbase == Z_R1) {4174used_R1 = true;4175}4176pow2_offset = get_oop_base_complement(Rbase, ((uint64_t)(intptr_t)oop_base) >> oop_shift);4177}4178assert_different_registers(Rdst, Rbase);41794180// Check for NULL oop (must be left alone) and shift.4181if (oop_shift != 0) { // Shift out alignment bits4182if (((intptr_t)oop_base&0xc000000000000000L) == 0L) { // We are sure: no single address will have the leftmost bit set.4183z_srag(Rdst, Rsrc, oop_shift); // Arithmetic shift sets the condition code.4184} else {4185z_srlg(Rdst, Rsrc, oop_shift);4186z_ltgr(Rsrc, Rsrc); // This is the recommended way of testing for zero.4187// This probably is faster, as it does not write a register. No!4188// z_cghi(Rsrc, 0);4189}4190} else {4191z_ltgr(Rdst, Rsrc); // Move NULL to result register.4192}4193z_bre(done);41944195// Subtract oop_base components.4196if ((Rdst == Z_R0) || (Rbase == Z_R0)) {4197z_algr(Rdst, Rbase);4198if (pow2_offset != 0) { add2reg(Rdst, pow2_offset); }4199} else {4200add2reg_with_index(Rdst, pow2_offset, Rbase, Rdst);4201}4202if (!only32bitValid) {4203z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.4204}4205bind(done);42064207} else { // not null4208// Get oop_base components.4209if (pow2_offset == -1) {4210pow2_offset = get_oop_base_complement(Rbase, (uint64_t)(intptr_t)oop_base);4211}42124213// Subtract oop_base components and shift.4214if (Rdst == Z_R0 || Rsrc == Z_R0 || Rbase == Z_R0) {4215// Don't use lay instruction.4216if (Rdst == Rsrc) {4217z_algr(Rdst, Rbase);4218} else {4219lgr_if_needed(Rdst, Rbase);4220z_algr(Rdst, Rsrc);4221}4222if (pow2_offset != 0) add2reg(Rdst, pow2_offset);4223} else {4224add2reg_with_index(Rdst, pow2_offset, Rbase, Rsrc);4225}4226if (oop_shift != 0) { // Shift out alignment bits.4227z_srlg(Rdst, Rdst, oop_shift);4228}4229if (!only32bitValid) {4230z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.4231}4232}4233#ifdef ASSERT4234if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb01bUL, 2); }4235if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb02bUL, 2); }4236#endif4237BLOCK_COMMENT("} cOop encoder general");4238}42394240//-------------------------------------------------4241// decode compressed oop. Generally usable decoder.4242//-------------------------------------------------4243// Rsrc - contains compressed oop on entry.4244// Rdst - contains regular oop on exit.4245// Rdst and Rsrc may indicate same register.4246// Rdst must not be the same register as Rbase, if Rbase was preloaded (before call).4247// Rdst can be the same register as Rbase. Then, either Z_R0 or Z_R1 must be available as scratch.4248// Rbase - register to use for the base4249// pow2_offset - offset of base to nice value. If -1, base must be loaded.4250// For performance, it is good to4251// - avoid Z_R0 for any of the argument registers.4252// - keep Rdst and Rsrc distinct from Rbase. Rdst == Rsrc is ok for performance.4253// - avoid Z_R1 for Rdst if Rdst == Rbase.4254void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, Register Rbase, int pow2_offset) {42554256const address oop_base = CompressedOops::base();4257const int oop_shift = CompressedOops::shift();4258const bool disjoint = CompressedOops::base_disjoint();42594260assert(UseCompressedOops, "must be on to call this method");4261assert(Universe::heap() != NULL, "java heap must be initialized to call this decoder");4262assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes),4263"cOop encoder detected bad shift");42644265// cOops are always loaded zero-extended from memory. No explicit zero-extension necessary.42664267if (oop_base != NULL) {4268unsigned int oop_base_hl = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xffff;4269unsigned int oop_base_hh = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 48)) & 0xffff;4270unsigned int oop_base_hf = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xFFFFffff;4271if (disjoint && (oop_base_hl == 0 || oop_base_hh == 0)) {4272BLOCK_COMMENT("cOop decoder disjointBase {");4273// We do not need to load the base. Instead, we can install the upper bits4274// with an OR instead of an ADD.4275Label done;42764277// Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.4278if (maybeNULL) { // NULL ptr must be preserved!4279z_slag(Rdst, Rsrc, oop_shift); // Arithmetic shift sets the condition code.4280z_bre(done);4281} else {4282z_sllg(Rdst, Rsrc, oop_shift); // Logical shift leaves condition code alone.4283}4284if ((oop_base_hl != 0) && (oop_base_hh != 0)) {4285z_oihf(Rdst, oop_base_hf);4286} else if (oop_base_hl != 0) {4287z_oihl(Rdst, oop_base_hl);4288} else {4289assert(oop_base_hh != 0, "not heapbased mode");4290z_oihh(Rdst, oop_base_hh);4291}4292bind(done);4293BLOCK_COMMENT("} cOop decoder disjointBase");4294} else {4295BLOCK_COMMENT("cOop decoder general {");4296// There are three decode steps:4297// scale oop offset (shift left)4298// get base (in reg) and pow2_offset (constant)4299// add base, pow2_offset, and oop offset4300// The following register overlap situations may exist:4301// Rdst == Rsrc, Rbase any other4302// not a problem. Scaling in-place leaves Rbase undisturbed.4303// Loading Rbase does not impact the scaled offset.4304// Rdst == Rbase, Rsrc any other4305// scaling would destroy a possibly preloaded Rbase. Loading Rbase4306// would destroy the scaled offset.4307// Remedy: use Rdst_tmp if Rbase has been preloaded.4308// use Rbase_tmp if base has to be loaded.4309// Rsrc == Rbase, Rdst any other4310// Only possible without preloaded Rbase.4311// Loading Rbase does not destroy compressed oop because it was scaled into Rdst before.4312// Rsrc == Rbase, Rdst == Rbase4313// Only possible without preloaded Rbase.4314// Loading Rbase would destroy compressed oop. Scaling in-place is ok.4315// Remedy: use Rbase_tmp.4316//4317Label done;4318Register Rdst_tmp = Rdst;4319Register Rbase_tmp = Rbase;4320bool used_R0 = false;4321bool used_R1 = false;4322bool base_preloaded = pow2_offset >= 0;4323guarantee(!(base_preloaded && (Rsrc == Rbase)), "Register clash, check caller");4324assert(oop_shift != 0, "room for optimization");43254326// Check if we need to use scratch registers.4327if (Rdst == Rbase) {4328assert(!(((Rdst == Z_R0) && (Rsrc == Z_R1)) || ((Rdst == Z_R1) && (Rsrc == Z_R0))), "need a scratch reg");4329if (Rdst != Rsrc) {4330if (base_preloaded) { Rdst_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; }4331else { Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; }4332} else {4333Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1;4334}4335}4336if (base_preloaded) lgr_if_needed(Rbase_tmp, Rbase);43374338// Scale oop and check for NULL.4339// Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.4340if (maybeNULL) { // NULL ptr must be preserved!4341z_slag(Rdst_tmp, Rsrc, oop_shift); // Arithmetic shift sets the condition code.4342z_bre(done);4343} else {4344z_sllg(Rdst_tmp, Rsrc, oop_shift); // Logical shift leaves condition code alone.4345}43464347// Get oop_base components.4348if (!base_preloaded) {4349pow2_offset = get_oop_base(Rbase_tmp, (uint64_t)(intptr_t)oop_base);4350}43514352// Add up all components.4353if ((Rbase_tmp == Z_R0) || (Rdst_tmp == Z_R0)) {4354z_algr(Rdst_tmp, Rbase_tmp);4355if (pow2_offset != 0) { add2reg(Rdst_tmp, pow2_offset); }4356} else {4357add2reg_with_index(Rdst_tmp, pow2_offset, Rbase_tmp, Rdst_tmp);4358}43594360bind(done);4361lgr_if_needed(Rdst, Rdst_tmp);4362#ifdef ASSERT4363if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb03bUL, 2); }4364if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb04bUL, 2); }4365#endif4366BLOCK_COMMENT("} cOop decoder general");4367}4368} else {4369BLOCK_COMMENT("cOop decoder zeroBase {");4370if (oop_shift == 0) {4371lgr_if_needed(Rdst, Rsrc);4372} else {4373z_sllg(Rdst, Rsrc, oop_shift);4374}4375BLOCK_COMMENT("} cOop decoder zeroBase");4376}4377}43784379// ((OopHandle)result).resolve();4380void MacroAssembler::resolve_oop_handle(Register result) {4381// OopHandle::resolve is an indirection.4382z_lg(result, 0, result);4383}43844385void MacroAssembler::load_mirror_from_const_method(Register mirror, Register const_method) {4386mem2reg_opt(mirror, Address(const_method, ConstMethod::constants_offset()));4387mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes()));4388mem2reg_opt(mirror, Address(mirror, Klass::java_mirror_offset()));4389resolve_oop_handle(mirror);4390}43914392void MacroAssembler::load_method_holder(Register holder, Register method) {4393mem2reg_opt(holder, Address(method, Method::const_offset()));4394mem2reg_opt(holder, Address(holder, ConstMethod::constants_offset()));4395mem2reg_opt(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes()));4396}43974398//---------------------------------------------------------------4399//--- Operations on arrays.4400//---------------------------------------------------------------44014402// Compiler ensures base is doubleword aligned and cnt is #doublewords.4403// Emitter does not KILL cnt and base arguments, since they need to be copied to4404// work registers anyway.4405// Actually, only r0, r1, and r5 are killed.4406unsigned int MacroAssembler::Clear_Array(Register cnt_arg, Register base_pointer_arg, Register odd_tmp_reg) {44074408int block_start = offset();4409Register dst_len = Z_R1; // Holds dst len for MVCLE.4410Register dst_addr = Z_R0; // Holds dst addr for MVCLE.44114412Label doXC, doMVCLE, done;44134414BLOCK_COMMENT("Clear_Array {");44154416// Check for zero len and convert to long.4417z_ltgfr(odd_tmp_reg, cnt_arg);4418z_bre(done); // Nothing to do if len == 0.44194420// Prefetch data to be cleared.4421if (VM_Version::has_Prefetch()) {4422z_pfd(0x02, 0, Z_R0, base_pointer_arg);4423z_pfd(0x02, 256, Z_R0, base_pointer_arg);4424}44254426z_sllg(dst_len, odd_tmp_reg, 3); // #bytes to clear.4427z_cghi(odd_tmp_reg, 32); // Check for len <= 256 bytes (<=32 DW).4428z_brnh(doXC); // If so, use executed XC to clear.44294430// MVCLE: initialize long arrays (general case).4431bind(doMVCLE);4432z_lgr(dst_addr, base_pointer_arg);4433// Pass 0 as source length to MVCLE: destination will be filled with padding byte 0.4434// The even register of the register pair is not killed.4435clear_reg(odd_tmp_reg, true, false);4436MacroAssembler::move_long_ext(dst_addr, as_Register(odd_tmp_reg->encoding()-1), 0);4437z_bru(done);44384439// XC: initialize short arrays.4440Label XC_template; // Instr template, never exec directly!4441bind(XC_template);4442z_xc(0,0,base_pointer_arg,0,base_pointer_arg);44434444bind(doXC);4445add2reg(dst_len, -1); // Get #bytes-1 for EXECUTE.4446if (VM_Version::has_ExecuteExtensions()) {4447z_exrl(dst_len, XC_template); // Execute XC with var. len.4448} else {4449z_larl(odd_tmp_reg, XC_template);4450z_ex(dst_len,0,Z_R0,odd_tmp_reg); // Execute XC with var. len.4451}4452// z_bru(done); // fallthru44534454bind(done);44554456BLOCK_COMMENT("} Clear_Array");44574458int block_end = offset();4459return block_end - block_start;4460}44614462// Compiler ensures base is doubleword aligned and cnt is count of doublewords.4463// Emitter does not KILL any arguments nor work registers.4464// Emitter generates up to 16 XC instructions, depending on the array length.4465unsigned int MacroAssembler::Clear_Array_Const(long cnt, Register base) {4466int block_start = offset();4467int off;4468int lineSize_Bytes = AllocatePrefetchStepSize;4469int lineSize_DW = AllocatePrefetchStepSize>>LogBytesPerWord;4470bool doPrefetch = VM_Version::has_Prefetch();4471int XC_maxlen = 256;4472int numXCInstr = cnt > 0 ? (cnt*BytesPerWord-1)/XC_maxlen+1 : 0;44734474BLOCK_COMMENT("Clear_Array_Const {");4475assert(cnt*BytesPerWord <= 4096, "ClearArrayConst can handle 4k only");44764477// Do less prefetching for very short arrays.4478if (numXCInstr > 0) {4479// Prefetch only some cache lines, then begin clearing.4480if (doPrefetch) {4481if (cnt*BytesPerWord <= lineSize_Bytes/4) { // If less than 1/4 of a cache line to clear,4482z_pfd(0x02, 0, Z_R0, base); // prefetch just the first cache line.4483} else {4484assert(XC_maxlen == lineSize_Bytes, "ClearArrayConst needs 256B cache lines");4485for (off = 0; (off < AllocatePrefetchLines) && (off <= numXCInstr); off ++) {4486z_pfd(0x02, off*lineSize_Bytes, Z_R0, base);4487}4488}4489}44904491for (off=0; off<(numXCInstr-1); off++) {4492z_xc(off*XC_maxlen, XC_maxlen-1, base, off*XC_maxlen, base);44934494// Prefetch some cache lines in advance.4495if (doPrefetch && (off <= numXCInstr-AllocatePrefetchLines)) {4496z_pfd(0x02, (off+AllocatePrefetchLines)*lineSize_Bytes, Z_R0, base);4497}4498}4499if (off*XC_maxlen < cnt*BytesPerWord) {4500z_xc(off*XC_maxlen, (cnt*BytesPerWord-off*XC_maxlen)-1, base, off*XC_maxlen, base);4501}4502}4503BLOCK_COMMENT("} Clear_Array_Const");45044505int block_end = offset();4506return block_end - block_start;4507}45084509// Compiler ensures base is doubleword aligned and cnt is #doublewords.4510// Emitter does not KILL cnt and base arguments, since they need to be copied to4511// work registers anyway.4512// Actually, only r0, r1, (which are work registers) and odd_tmp_reg are killed.4513//4514// For very large arrays, exploit MVCLE H/W support.4515// MVCLE instruction automatically exploits H/W-optimized page mover.4516// - Bytes up to next page boundary are cleared with a series of XC to self.4517// - All full pages are cleared with the page mover H/W assist.4518// - Remaining bytes are again cleared by a series of XC to self.4519//4520unsigned int MacroAssembler::Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register odd_tmp_reg) {45214522int block_start = offset();4523Register dst_len = Z_R1; // Holds dst len for MVCLE.4524Register dst_addr = Z_R0; // Holds dst addr for MVCLE.45254526BLOCK_COMMENT("Clear_Array_Const_Big {");45274528// Get len to clear.4529load_const_optimized(dst_len, (long)cnt*8L); // in Bytes = #DW*845304531// Prepare other args to MVCLE.4532z_lgr(dst_addr, base_pointer_arg);4533// Pass 0 as source length to MVCLE: destination will be filled with padding byte 0.4534// The even register of the register pair is not killed.4535(void) clear_reg(odd_tmp_reg, true, false); // Src len of MVCLE is zero.4536MacroAssembler::move_long_ext(dst_addr, as_Register(odd_tmp_reg->encoding() - 1), 0);4537BLOCK_COMMENT("} Clear_Array_Const_Big");45384539int block_end = offset();4540return block_end - block_start;4541}45424543// Allocator.4544unsigned int MacroAssembler::CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg,4545Register cnt_reg,4546Register tmp1_reg, Register tmp2_reg) {4547// Tmp1 is oddReg.4548// Tmp2 is evenReg.45494550int block_start = offset();4551Label doMVC, doMVCLE, done, MVC_template;45524553BLOCK_COMMENT("CopyRawMemory_AlignedDisjoint {");45544555// Check for zero len and convert to long.4556z_ltgfr(cnt_reg, cnt_reg); // Remember casted value for doSTG case.4557z_bre(done); // Nothing to do if len == 0.45584559z_sllg(Z_R1, cnt_reg, 3); // Dst len in bytes. calc early to have the result ready.45604561z_cghi(cnt_reg, 32); // Check for len <= 256 bytes (<=32 DW).4562z_brnh(doMVC); // If so, use executed MVC to clear.45634564bind(doMVCLE); // A lot of data (more than 256 bytes).4565// Prep dest reg pair.4566z_lgr(Z_R0, dst_reg); // dst addr4567// Dst len already in Z_R1.4568// Prep src reg pair.4569z_lgr(tmp2_reg, src_reg); // src addr4570z_lgr(tmp1_reg, Z_R1); // Src len same as dst len.45714572// Do the copy.4573move_long_ext(Z_R0, tmp2_reg, 0xb0); // Bypass cache.4574z_bru(done); // All done.45754576bind(MVC_template); // Just some data (not more than 256 bytes).4577z_mvc(0, 0, dst_reg, 0, src_reg);45784579bind(doMVC);45804581if (VM_Version::has_ExecuteExtensions()) {4582add2reg(Z_R1, -1);4583} else {4584add2reg(tmp1_reg, -1, Z_R1);4585z_larl(Z_R1, MVC_template);4586}45874588if (VM_Version::has_Prefetch()) {4589z_pfd(1, 0,Z_R0,src_reg);4590z_pfd(2, 0,Z_R0,dst_reg);4591// z_pfd(1,256,Z_R0,src_reg); // Assume very short copy.4592// z_pfd(2,256,Z_R0,dst_reg);4593}45944595if (VM_Version::has_ExecuteExtensions()) {4596z_exrl(Z_R1, MVC_template);4597} else {4598z_ex(tmp1_reg, 0, Z_R0, Z_R1);4599}46004601bind(done);46024603BLOCK_COMMENT("} CopyRawMemory_AlignedDisjoint");46044605int block_end = offset();4606return block_end - block_start;4607}46084609//-------------------------------------------------4610// Constants (scalar and oop) in constant pool4611//-------------------------------------------------46124613// Add a non-relocated constant to the CP.4614int MacroAssembler::store_const_in_toc(AddressLiteral& val) {4615long value = val.value();4616address tocPos = long_constant(value);46174618if (tocPos != NULL) {4619int tocOffset = (int)(tocPos - code()->consts()->start());4620return tocOffset;4621}4622// Address_constant returned NULL, so no constant entry has been created.4623// In that case, we return a "fatal" offset, just in case that subsequently4624// generated access code is executed.4625return -1;4626}46274628// Returns the TOC offset where the address is stored.4629// Add a relocated constant to the CP.4630int MacroAssembler::store_oop_in_toc(AddressLiteral& oop) {4631// Use RelocationHolder::none for the constant pool entry.4632// Otherwise we will end up with a failing NativeCall::verify(x),4633// where x is the address of the constant pool entry.4634address tocPos = address_constant((address)oop.value(), RelocationHolder::none);46354636if (tocPos != NULL) {4637int tocOffset = (int)(tocPos - code()->consts()->start());4638RelocationHolder rsp = oop.rspec();4639Relocation *rel = rsp.reloc();46404641// Store toc_offset in relocation, used by call_far_patchable.4642if ((relocInfo::relocType)rel->type() == relocInfo::runtime_call_w_cp_type) {4643((runtime_call_w_cp_Relocation *)(rel))->set_constant_pool_offset(tocOffset);4644}4645// Relocate at the load's pc.4646relocate(rsp);46474648return tocOffset;4649}4650// Address_constant returned NULL, so no constant entry has been created4651// in that case, we return a "fatal" offset, just in case that subsequently4652// generated access code is executed.4653return -1;4654}46554656bool MacroAssembler::load_const_from_toc(Register dst, AddressLiteral& a, Register Rtoc) {4657int tocOffset = store_const_in_toc(a);4658if (tocOffset == -1) return false;4659address tocPos = tocOffset + code()->consts()->start();4660assert((address)code()->consts()->start() != NULL, "Please add CP address");4661relocate(a.rspec());4662load_long_pcrelative(dst, tocPos);4663return true;4664}46654666bool MacroAssembler::load_oop_from_toc(Register dst, AddressLiteral& a, Register Rtoc) {4667int tocOffset = store_oop_in_toc(a);4668if (tocOffset == -1) return false;4669address tocPos = tocOffset + code()->consts()->start();4670assert((address)code()->consts()->start() != NULL, "Please add CP address");46714672load_addr_pcrelative(dst, tocPos);4673return true;4674}46754676// If the instruction sequence at the given pc is a load_const_from_toc4677// sequence, return the value currently stored at the referenced position4678// in the TOC.4679intptr_t MacroAssembler::get_const_from_toc(address pc) {46804681assert(is_load_const_from_toc(pc), "must be load_const_from_pool");46824683long offset = get_load_const_from_toc_offset(pc);4684address dataLoc = NULL;4685if (is_load_const_from_toc_pcrelative(pc)) {4686dataLoc = pc + offset;4687} else {4688CodeBlob* cb = CodeCache::find_blob_unsafe(pc); // Else we get assertion if nmethod is zombie.4689assert(cb && cb->is_nmethod(), "sanity");4690nmethod* nm = (nmethod*)cb;4691dataLoc = nm->ctable_begin() + offset;4692}4693return *(intptr_t *)dataLoc;4694}46954696// If the instruction sequence at the given pc is a load_const_from_toc4697// sequence, copy the passed-in new_data value into the referenced4698// position in the TOC.4699void MacroAssembler::set_const_in_toc(address pc, unsigned long new_data, CodeBlob *cb) {4700assert(is_load_const_from_toc(pc), "must be load_const_from_pool");47014702long offset = MacroAssembler::get_load_const_from_toc_offset(pc);4703address dataLoc = NULL;4704if (is_load_const_from_toc_pcrelative(pc)) {4705dataLoc = pc+offset;4706} else {4707nmethod* nm = CodeCache::find_nmethod(pc);4708assert((cb == NULL) || (nm == (nmethod*)cb), "instruction address should be in CodeBlob");4709dataLoc = nm->ctable_begin() + offset;4710}4711if (*(unsigned long *)dataLoc != new_data) { // Prevent cache invalidation: update only if necessary.4712*(unsigned long *)dataLoc = new_data;4713}4714}47154716// Dynamic TOC. Getter must only be called if "a" is a load_const_from_toc4717// site. Verify by calling is_load_const_from_toc() before!!4718// Offset is +/- 2**32 -> use long.4719long MacroAssembler::get_load_const_from_toc_offset(address a) {4720assert(is_load_const_from_toc_pcrelative(a), "expected pc relative load");4721// expected code sequence:4722// z_lgrl(t, simm32); len = 64723unsigned long inst;4724unsigned int len = get_instruction(a, &inst);4725return get_pcrel_offset(inst);4726}47274728//**********************************************************************************4729// inspection of generated instruction sequences for a particular pattern4730//**********************************************************************************47314732bool MacroAssembler::is_load_const_from_toc_pcrelative(address a) {4733#ifdef ASSERT4734unsigned long inst;4735unsigned int len = get_instruction(a+2, &inst);4736if ((len == 6) && is_load_pcrelative_long(a) && is_call_pcrelative_long(inst)) {4737const int range = 128;4738Assembler::dump_code_range(tty, a, range, "instr(a) == z_lgrl && instr(a+2) == z_brasl");4739VM_Version::z_SIGSEGV();4740}4741#endif4742// expected code sequence:4743// z_lgrl(t, relAddr32); len = 64744//TODO: verify accessed data is in CP, if possible.4745return is_load_pcrelative_long(a); // TODO: might be too general. Currently, only lgrl is used.4746}47474748bool MacroAssembler::is_load_const_from_toc_call(address a) {4749return is_load_const_from_toc(a) && is_call_byregister(a + load_const_from_toc_size());4750}47514752bool MacroAssembler::is_load_const_call(address a) {4753return is_load_const(a) && is_call_byregister(a + load_const_size());4754}47554756//-------------------------------------------------4757// Emitters for some really CICS instructions4758//-------------------------------------------------47594760void MacroAssembler::move_long_ext(Register dst, Register src, unsigned int pad) {4761assert(dst->encoding()%2==0, "must be an even/odd register pair");4762assert(src->encoding()%2==0, "must be an even/odd register pair");4763assert(pad<256, "must be a padding BYTE");47644765Label retry;4766bind(retry);4767Assembler::z_mvcle(dst, src, pad);4768Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);4769}47704771void MacroAssembler::compare_long_ext(Register left, Register right, unsigned int pad) {4772assert(left->encoding() % 2 == 0, "must be an even/odd register pair");4773assert(right->encoding() % 2 == 0, "must be an even/odd register pair");4774assert(pad<256, "must be a padding BYTE");47754776Label retry;4777bind(retry);4778Assembler::z_clcle(left, right, pad, Z_R0);4779Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);4780}47814782void MacroAssembler::compare_long_uni(Register left, Register right, unsigned int pad) {4783assert(left->encoding() % 2 == 0, "must be an even/odd register pair");4784assert(right->encoding() % 2 == 0, "must be an even/odd register pair");4785assert(pad<=0xfff, "must be a padding HALFWORD");4786assert(VM_Version::has_ETF2(), "instruction must be available");47874788Label retry;4789bind(retry);4790Assembler::z_clclu(left, right, pad, Z_R0);4791Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);4792}47934794void MacroAssembler::search_string(Register end, Register start) {4795assert(end->encoding() != 0, "end address must not be in R0");4796assert(start->encoding() != 0, "start address must not be in R0");47974798Label retry;4799bind(retry);4800Assembler::z_srst(end, start);4801Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);4802}48034804void MacroAssembler::search_string_uni(Register end, Register start) {4805assert(end->encoding() != 0, "end address must not be in R0");4806assert(start->encoding() != 0, "start address must not be in R0");4807assert(VM_Version::has_ETF3(), "instruction must be available");48084809Label retry;4810bind(retry);4811Assembler::z_srstu(end, start);4812Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);4813}48144815void MacroAssembler::kmac(Register srcBuff) {4816assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0");4817assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");48184819Label retry;4820bind(retry);4821Assembler::z_kmac(Z_R0, srcBuff);4822Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);4823}48244825void MacroAssembler::kimd(Register srcBuff) {4826assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0");4827assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");48284829Label retry;4830bind(retry);4831Assembler::z_kimd(Z_R0, srcBuff);4832Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);4833}48344835void MacroAssembler::klmd(Register srcBuff) {4836assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0");4837assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");48384839Label retry;4840bind(retry);4841Assembler::z_klmd(Z_R0, srcBuff);4842Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);4843}48444845void MacroAssembler::km(Register dstBuff, Register srcBuff) {4846// DstBuff and srcBuff are allowed to be the same register (encryption in-place).4847// DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.4848assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0");4849assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");4850assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");48514852Label retry;4853bind(retry);4854Assembler::z_km(dstBuff, srcBuff);4855Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);4856}48574858void MacroAssembler::kmc(Register dstBuff, Register srcBuff) {4859// DstBuff and srcBuff are allowed to be the same register (encryption in-place).4860// DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.4861assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0");4862assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");4863assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");48644865Label retry;4866bind(retry);4867Assembler::z_kmc(dstBuff, srcBuff);4868Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);4869}48704871void MacroAssembler::cksm(Register crcBuff, Register srcBuff) {4872assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");48734874Label retry;4875bind(retry);4876Assembler::z_cksm(crcBuff, srcBuff);4877Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);4878}48794880void MacroAssembler::translate_oo(Register r1, Register r2, uint m3) {4881assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");4882assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");48834884Label retry;4885bind(retry);4886Assembler::z_troo(r1, r2, m3);4887Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);4888}48894890void MacroAssembler::translate_ot(Register r1, Register r2, uint m3) {4891assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");4892assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");48934894Label retry;4895bind(retry);4896Assembler::z_trot(r1, r2, m3);4897Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);4898}48994900void MacroAssembler::translate_to(Register r1, Register r2, uint m3) {4901assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");4902assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");49034904Label retry;4905bind(retry);4906Assembler::z_trto(r1, r2, m3);4907Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);4908}49094910void MacroAssembler::translate_tt(Register r1, Register r2, uint m3) {4911assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");4912assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");49134914Label retry;4915bind(retry);4916Assembler::z_trtt(r1, r2, m3);4917Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);4918}49194920//---------------------------------------4921// Helpers for Intrinsic Emitters4922//---------------------------------------49234924/**4925* uint32_t crc;4926* timesXtoThe32[crc & 0xFF] ^ (crc >> 8);4927*/4928void MacroAssembler::fold_byte_crc32(Register crc, Register val, Register table, Register tmp) {4929assert_different_registers(crc, table, tmp);4930assert_different_registers(val, table);4931if (crc == val) { // Must rotate first to use the unmodified value.4932rotate_then_insert(tmp, val, 56-2, 63-2, 2, true); // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.4933z_srl(crc, 8); // Unsigned shift, clear leftmost 8 bits.4934} else {4935z_srl(crc, 8); // Unsigned shift, clear leftmost 8 bits.4936rotate_then_insert(tmp, val, 56-2, 63-2, 2, true); // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.4937}4938z_x(crc, Address(table, tmp, 0));4939}49404941/**4942* uint32_t crc;4943* timesXtoThe32[crc & 0xFF] ^ (crc >> 8);4944*/4945void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {4946fold_byte_crc32(crc, crc, table, tmp);4947}49484949/**4950* Emits code to update CRC-32 with a byte value according to constants in table.4951*4952* @param [in,out]crc Register containing the crc.4953* @param [in]val Register containing the byte to fold into the CRC.4954* @param [in]table Register containing the table of crc constants.4955*4956* uint32_t crc;4957* val = crc_table[(val ^ crc) & 0xFF];4958* crc = val ^ (crc >> 8);4959*/4960void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {4961z_xr(val, crc);4962fold_byte_crc32(crc, val, table, val);4963}496449654966/**4967* @param crc register containing existing CRC (32-bit)4968* @param buf register pointing to input byte buffer (byte*)4969* @param len register containing number of bytes4970* @param table register pointing to CRC table4971*/4972void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, Register data) {4973assert_different_registers(crc, buf, len, table, data);49744975Label L_mainLoop, L_done;4976const int mainLoop_stepping = 1;49774978// Process all bytes in a single-byte loop.4979z_ltr(len, len);4980z_brnh(L_done);49814982bind(L_mainLoop);4983z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register.4984add2reg(buf, mainLoop_stepping); // Advance buffer position.4985update_byte_crc32(crc, data, table);4986z_brct(len, L_mainLoop); // Iterate.49874988bind(L_done);4989}49904991/**4992* Emits code to update CRC-32 with a 4-byte value according to constants in table.4993* Implementation according to jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.c.4994*4995*/4996void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,4997Register t0, Register t1, Register t2, Register t3) {4998// This is what we implement (the DOBIG4 part):4999//5000// #define DOBIG4 c ^= *++buf4; \5001// c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \5002// crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]5003// #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG45004// Pre-calculate (constant) column offsets, use columns 4..7 for big-endian.5005const int ix0 = 4*(4*CRC32_COLUMN_SIZE);5006const int ix1 = 5*(4*CRC32_COLUMN_SIZE);5007const int ix2 = 6*(4*CRC32_COLUMN_SIZE);5008const int ix3 = 7*(4*CRC32_COLUMN_SIZE);50095010// XOR crc with next four bytes of buffer.5011lgr_if_needed(t0, crc);5012z_x(t0, Address(buf, bufDisp));5013if (bufInc != 0) {5014add2reg(buf, bufInc);5015}50165017// Chop crc into 4 single-byte pieces, shifted left 2 bits, to form the table indices.5018rotate_then_insert(t3, t0, 56-2, 63-2, 2, true); // ((c >> 0) & 0xff) << 25019rotate_then_insert(t2, t0, 56-2, 63-2, 2-8, true); // ((c >> 8) & 0xff) << 25020rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true); // ((c >> 16) & 0xff) << 25021rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true); // ((c >> 24) & 0xff) << 250225023// XOR indexed table values to calculate updated crc.5024z_ly(t2, Address(table, t2, (intptr_t)ix1));5025z_ly(t0, Address(table, t0, (intptr_t)ix3));5026z_xy(t2, Address(table, t3, (intptr_t)ix0));5027z_xy(t0, Address(table, t1, (intptr_t)ix2));5028z_xr(t0, t2); // Now t0 contains the updated CRC value.5029lgr_if_needed(crc, t0);5030}50315032/**5033* @param crc register containing existing CRC (32-bit)5034* @param buf register pointing to input byte buffer (byte*)5035* @param len register containing number of bytes5036* @param table register pointing to CRC table5037*5038* uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!5039*/5040void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,5041Register t0, Register t1, Register t2, Register t3,5042bool invertCRC) {5043assert_different_registers(crc, buf, len, table);50445045Label L_mainLoop, L_tail;5046Register data = t0;5047Register ctr = Z_R0;5048const int mainLoop_stepping = 4;5049const int log_stepping = exact_log2(mainLoop_stepping);50505051// Don't test for len <= 0 here. This pathological case should not occur anyway.5052// Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.5053// The situation itself is detected and handled correctly by the conditional branches5054// following aghi(len, -stepping) and aghi(len, +stepping).50555056if (invertCRC) {5057not_(crc, noreg, false); // 1s complement of crc5058}50595060// Check for short (<4 bytes) buffer.5061z_srag(ctr, len, log_stepping);5062z_brnh(L_tail);50635064z_lrvr(crc, crc); // Revert byte order because we are dealing with big-endian data.5065rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop50665067BIND(L_mainLoop);5068update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3);5069z_brct(ctr, L_mainLoop); // Iterate.50705071z_lrvr(crc, crc); // Revert byte order back to original.50725073// Process last few (<8) bytes of buffer.5074BIND(L_tail);5075update_byteLoop_crc32(crc, buf, len, table, data);50765077if (invertCRC) {5078not_(crc, noreg, false); // 1s complement of crc5079}5080}50815082/**5083* @param crc register containing existing CRC (32-bit)5084* @param buf register pointing to input byte buffer (byte*)5085* @param len register containing number of bytes5086* @param table register pointing to CRC table5087*/5088void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,5089Register t0, Register t1, Register t2, Register t3,5090bool invertCRC) {5091assert_different_registers(crc, buf, len, table);5092Register data = t0;50935094if (invertCRC) {5095not_(crc, noreg, false); // 1s complement of crc5096}50975098update_byteLoop_crc32(crc, buf, len, table, data);50995100if (invertCRC) {5101not_(crc, noreg, false); // 1s complement of crc5102}5103}51045105void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,5106bool invertCRC) {5107assert_different_registers(crc, buf, len, table, tmp);51085109if (invertCRC) {5110not_(crc, noreg, false); // 1s complement of crc5111}51125113z_llgc(tmp, Address(buf, (intptr_t)0)); // Current byte of input buffer (zero extended). Avoids garbage in upper half of register.5114update_byte_crc32(crc, tmp, table);51155116if (invertCRC) {5117not_(crc, noreg, false); // 1s complement of crc5118}5119}51205121void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table,5122bool invertCRC) {5123assert_different_registers(crc, val, table);51245125if (invertCRC) {5126not_(crc, noreg, false); // 1s complement of crc5127}51285129update_byte_crc32(crc, val, table);51305131if (invertCRC) {5132not_(crc, noreg, false); // 1s complement of crc5133}5134}51355136//5137// Code for BigInteger::multiplyToLen() intrinsic.5138//51395140// dest_lo += src1 + src25141// dest_hi += carry1 + carry25142// Z_R7 is destroyed !5143void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo,5144Register src1, Register src2) {5145clear_reg(Z_R7);5146z_algr(dest_lo, src1);5147z_alcgr(dest_hi, Z_R7);5148z_algr(dest_lo, src2);5149z_alcgr(dest_hi, Z_R7);5150}51515152// Multiply 64 bit by 64 bit first loop.5153void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart,5154Register x_xstart,5155Register y, Register y_idx,5156Register z,5157Register carry,5158Register product,5159Register idx, Register kdx) {5160// jlong carry, x[], y[], z[];5161// for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) {5162// huge_128 product = y[idx] * x[xstart] + carry;5163// z[kdx] = (jlong)product;5164// carry = (jlong)(product >>> 64);5165// }5166// z[xstart] = carry;51675168Label L_first_loop, L_first_loop_exit;5169Label L_one_x, L_one_y, L_multiply;51705171z_aghi(xstart, -1);5172z_brl(L_one_x); // Special case: length of x is 1.51735174// Load next two integers of x.5175z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);5176mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0));517751785179bind(L_first_loop);51805181z_aghi(idx, -1);5182z_brl(L_first_loop_exit);5183z_aghi(idx, -1);5184z_brl(L_one_y);51855186// Load next two integers of y.5187z_sllg(Z_R1_scratch, idx, LogBytesPerInt);5188mem2reg_opt(y_idx, Address(y, Z_R1_scratch, 0));518951905191bind(L_multiply);51925193Register multiplicand = product->successor();5194Register product_low = multiplicand;51955196lgr_if_needed(multiplicand, x_xstart);5197z_mlgr(product, y_idx); // multiplicand * y_idx -> product::multiplicand5198clear_reg(Z_R7);5199z_algr(product_low, carry); // Add carry to result.5200z_alcgr(product, Z_R7); // Add carry of the last addition.5201add2reg(kdx, -2);52025203// Store result.5204z_sllg(Z_R7, kdx, LogBytesPerInt);5205reg2mem_opt(product_low, Address(z, Z_R7, 0));5206lgr_if_needed(carry, product);5207z_bru(L_first_loop);520852095210bind(L_one_y); // Load one 32 bit portion of y as (0,value).52115212clear_reg(y_idx);5213mem2reg_opt(y_idx, Address(y, (intptr_t) 0), false);5214z_bru(L_multiply);521552165217bind(L_one_x); // Load one 32 bit portion of x as (0,value).52185219clear_reg(x_xstart);5220mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false);5221z_bru(L_first_loop);52225223bind(L_first_loop_exit);5224}52255226// Multiply 64 bit by 64 bit and add 128 bit.5227void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y,5228Register z,5229Register yz_idx, Register idx,5230Register carry, Register product,5231int offset) {5232// huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;5233// z[kdx] = (jlong)product;52345235Register multiplicand = product->successor();5236Register product_low = multiplicand;52375238z_sllg(Z_R7, idx, LogBytesPerInt);5239mem2reg_opt(yz_idx, Address(y, Z_R7, offset));52405241lgr_if_needed(multiplicand, x_xstart);5242z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand5243mem2reg_opt(yz_idx, Address(z, Z_R7, offset));52445245add2_with_carry(product, product_low, carry, yz_idx);52465247z_sllg(Z_R7, idx, LogBytesPerInt);5248reg2mem_opt(product_low, Address(z, Z_R7, offset));52495250}52515252// Multiply 128 bit by 128 bit. Unrolled inner loop.5253void MacroAssembler::multiply_128_x_128_loop(Register x_xstart,5254Register y, Register z,5255Register yz_idx, Register idx,5256Register jdx,5257Register carry, Register product,5258Register carry2) {5259// jlong carry, x[], y[], z[];5260// int kdx = ystart+1;5261// for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop5262// huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;5263// z[kdx+idx+1] = (jlong)product;5264// jlong carry2 = (jlong)(product >>> 64);5265// product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;5266// z[kdx+idx] = (jlong)product;5267// carry = (jlong)(product >>> 64);5268// }5269// idx += 2;5270// if (idx > 0) {5271// product = (y[idx] * x_xstart) + z[kdx+idx] + carry;5272// z[kdx+idx] = (jlong)product;5273// carry = (jlong)(product >>> 64);5274// }52755276Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;52775278// scale the index5279lgr_if_needed(jdx, idx);5280and_imm(jdx, 0xfffffffffffffffcL);5281rshift(jdx, 2);528252835284bind(L_third_loop);52855286z_aghi(jdx, -1);5287z_brl(L_third_loop_exit);5288add2reg(idx, -4);52895290multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);5291lgr_if_needed(carry2, product);52925293multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);5294lgr_if_needed(carry, product);5295z_bru(L_third_loop);529652975298bind(L_third_loop_exit); // Handle any left-over operand parts.52995300and_imm(idx, 0x3);5301z_brz(L_post_third_loop_done);53025303Label L_check_1;53045305z_aghi(idx, -2);5306z_brl(L_check_1);53075308multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);5309lgr_if_needed(carry, product);531053115312bind(L_check_1);53135314add2reg(idx, 0x2);5315and_imm(idx, 0x1);5316z_aghi(idx, -1);5317z_brl(L_post_third_loop_done);53185319Register multiplicand = product->successor();5320Register product_low = multiplicand;53215322z_sllg(Z_R7, idx, LogBytesPerInt);5323clear_reg(yz_idx);5324mem2reg_opt(yz_idx, Address(y, Z_R7, 0), false);5325lgr_if_needed(multiplicand, x_xstart);5326z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand5327clear_reg(yz_idx);5328mem2reg_opt(yz_idx, Address(z, Z_R7, 0), false);53295330add2_with_carry(product, product_low, yz_idx, carry);53315332z_sllg(Z_R7, idx, LogBytesPerInt);5333reg2mem_opt(product_low, Address(z, Z_R7, 0), false);5334rshift(product_low, 32);53355336lshift(product, 32);5337z_ogr(product_low, product);5338lgr_if_needed(carry, product_low);53395340bind(L_post_third_loop_done);5341}53425343void MacroAssembler::multiply_to_len(Register x, Register xlen,5344Register y, Register ylen,5345Register z,5346Register tmp1, Register tmp2,5347Register tmp3, Register tmp4,5348Register tmp5) {5349ShortBranchVerifier sbv(this);53505351assert_different_registers(x, xlen, y, ylen, z,5352tmp1, tmp2, tmp3, tmp4, tmp5, Z_R1_scratch, Z_R7);5353assert_different_registers(x, xlen, y, ylen, z,5354tmp1, tmp2, tmp3, tmp4, tmp5, Z_R8);53555356z_stmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP);53575358// In openJdk, we store the argument as 32-bit value to slot.5359Address zlen(Z_SP, _z_abi(remaining_cargs)); // Int in long on big endian.53605361const Register idx = tmp1;5362const Register kdx = tmp2;5363const Register xstart = tmp3;53645365const Register y_idx = tmp4;5366const Register carry = tmp5;5367const Register product = Z_R0_scratch;5368const Register x_xstart = Z_R8;53695370// First Loop.5371//5372// final static long LONG_MASK = 0xffffffffL;5373// int xstart = xlen - 1;5374// int ystart = ylen - 1;5375// long carry = 0;5376// for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {5377// long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;5378// z[kdx] = (int)product;5379// carry = product >>> 32;5380// }5381// z[xstart] = (int)carry;5382//53835384lgr_if_needed(idx, ylen); // idx = ylen5385z_llgf(kdx, zlen); // C2 does not respect int to long conversion for stub calls, thus load zero-extended.5386clear_reg(carry); // carry = 053875388Label L_done;53895390lgr_if_needed(xstart, xlen);5391z_aghi(xstart, -1);5392z_brl(L_done);53935394multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);53955396NearLabel L_second_loop;5397compare64_and_branch(kdx, RegisterOrConstant((intptr_t) 0), bcondEqual, L_second_loop);53985399NearLabel L_carry;5400z_aghi(kdx, -1);5401z_brz(L_carry);54025403// Store lower 32 bits of carry.5404z_sllg(Z_R1_scratch, kdx, LogBytesPerInt);5405reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);5406rshift(carry, 32);5407z_aghi(kdx, -1);540854095410bind(L_carry);54115412// Store upper 32 bits of carry.5413z_sllg(Z_R1_scratch, kdx, LogBytesPerInt);5414reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);54155416// Second and third (nested) loops.5417//5418// for (int i = xstart-1; i >= 0; i--) { // Second loop5419// carry = 0;5420// for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop5421// long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +5422// (z[k] & LONG_MASK) + carry;5423// z[k] = (int)product;5424// carry = product >>> 32;5425// }5426// z[i] = (int)carry;5427// }5428//5429// i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx54305431const Register jdx = tmp1;54325433bind(L_second_loop);54345435clear_reg(carry); // carry = 0;5436lgr_if_needed(jdx, ylen); // j = ystart+154375438z_aghi(xstart, -1); // i = xstart-1;5439z_brl(L_done);54405441// Use free slots in the current stackframe instead of push/pop.5442Address zsave(Z_SP, _z_abi(carg_1));5443reg2mem_opt(z, zsave);544454455446Label L_last_x;54475448z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);5449load_address(z, Address(z, Z_R1_scratch, 4)); // z = z + k - j5450z_aghi(xstart, -1); // i = xstart-1;5451z_brl(L_last_x);54525453z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);5454mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0));545554565457Label L_third_loop_prologue;54585459bind(L_third_loop_prologue);54605461Address xsave(Z_SP, _z_abi(carg_2));5462Address xlensave(Z_SP, _z_abi(carg_3));5463Address ylensave(Z_SP, _z_abi(carg_4));54645465reg2mem_opt(x, xsave);5466reg2mem_opt(xstart, xlensave);5467reg2mem_opt(ylen, ylensave);546854695470multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);54715472mem2reg_opt(z, zsave);5473mem2reg_opt(x, xsave);5474mem2reg_opt(xlen, xlensave); // This is the decrement of the loop counter!5475mem2reg_opt(ylen, ylensave);54765477add2reg(tmp3, 1, xlen);5478z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt);5479reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);5480z_aghi(tmp3, -1);5481z_brl(L_done);54825483rshift(carry, 32);5484z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt);5485reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);5486z_bru(L_second_loop);54875488// Next infrequent code is moved outside loops.5489bind(L_last_x);54905491clear_reg(x_xstart);5492mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false);5493z_bru(L_third_loop_prologue);54945495bind(L_done);54965497z_lmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP);5498}54995500#ifndef PRODUCT5501// Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false).5502void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) {5503Label ok;5504if (check_equal) {5505z_bre(ok);5506} else {5507z_brne(ok);5508}5509stop(msg, id);5510bind(ok);5511}55125513// Assert if CC indicates "low".5514void MacroAssembler::asm_assert_low(const char *msg, int id) {5515Label ok;5516z_brnl(ok);5517stop(msg, id);5518bind(ok);5519}55205521// Assert if CC indicates "high".5522void MacroAssembler::asm_assert_high(const char *msg, int id) {5523Label ok;5524z_brnh(ok);5525stop(msg, id);5526bind(ok);5527}55285529// Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false)5530// generate non-relocatable code.5531void MacroAssembler::asm_assert_static(bool check_equal, const char *msg, int id) {5532Label ok;5533if (check_equal) { z_bre(ok); }5534else { z_brne(ok); }5535stop_static(msg, id);5536bind(ok);5537}55385539void MacroAssembler::asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset,5540Register mem_base, const char* msg, int id) {5541switch (size) {5542case 4:5543load_and_test_int(Z_R0, Address(mem_base, mem_offset));5544break;5545case 8:5546load_and_test_long(Z_R0, Address(mem_base, mem_offset));5547break;5548default:5549ShouldNotReachHere();5550}5551if (allow_relocation) { asm_assert(check_equal, msg, id); }5552else { asm_assert_static(check_equal, msg, id); }5553}55545555// Check the condition5556// expected_size == FP - SP5557// after transformation:5558// expected_size - FP + SP == 05559// Destroys Register expected_size if no tmp register is passed.5560void MacroAssembler::asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) {5561if (tmp == noreg) {5562tmp = expected_size;5563} else {5564if (tmp != expected_size) {5565z_lgr(tmp, expected_size);5566}5567z_algr(tmp, Z_SP);5568z_slg(tmp, 0, Z_R0, Z_SP);5569asm_assert_eq(msg, id);5570}5571}5572#endif // !PRODUCT55735574void MacroAssembler::verify_thread() {5575if (VerifyThread) {5576unimplemented("", 117);5577}5578}55795580// Save and restore functions: Exclude Z_R0.5581void MacroAssembler::save_volatile_regs(Register dst, int offset, bool include_fp, bool include_flags) {5582z_stmg(Z_R1, Z_R5, offset, dst); offset += 5 * BytesPerWord;5583if (include_fp) {5584z_std(Z_F0, Address(dst, offset)); offset += BytesPerWord;5585z_std(Z_F1, Address(dst, offset)); offset += BytesPerWord;5586z_std(Z_F2, Address(dst, offset)); offset += BytesPerWord;5587z_std(Z_F3, Address(dst, offset)); offset += BytesPerWord;5588z_std(Z_F4, Address(dst, offset)); offset += BytesPerWord;5589z_std(Z_F5, Address(dst, offset)); offset += BytesPerWord;5590z_std(Z_F6, Address(dst, offset)); offset += BytesPerWord;5591z_std(Z_F7, Address(dst, offset)); offset += BytesPerWord;5592}5593if (include_flags) {5594Label done;5595z_mvi(Address(dst, offset), 2); // encoding: equal5596z_bre(done);5597z_mvi(Address(dst, offset), 4); // encoding: higher5598z_brh(done);5599z_mvi(Address(dst, offset), 1); // encoding: lower5600bind(done);5601}5602}5603void MacroAssembler::restore_volatile_regs(Register src, int offset, bool include_fp, bool include_flags) {5604z_lmg(Z_R1, Z_R5, offset, src); offset += 5 * BytesPerWord;5605if (include_fp) {5606z_ld(Z_F0, Address(src, offset)); offset += BytesPerWord;5607z_ld(Z_F1, Address(src, offset)); offset += BytesPerWord;5608z_ld(Z_F2, Address(src, offset)); offset += BytesPerWord;5609z_ld(Z_F3, Address(src, offset)); offset += BytesPerWord;5610z_ld(Z_F4, Address(src, offset)); offset += BytesPerWord;5611z_ld(Z_F5, Address(src, offset)); offset += BytesPerWord;5612z_ld(Z_F6, Address(src, offset)); offset += BytesPerWord;5613z_ld(Z_F7, Address(src, offset)); offset += BytesPerWord;5614}5615if (include_flags) {5616z_cli(Address(src, offset), 2); // see encoding above5617}5618}56195620// Plausibility check for oops.5621void MacroAssembler::verify_oop(Register oop, const char* msg) {5622if (!VerifyOops) return;56235624BLOCK_COMMENT("verify_oop {");5625unsigned int nbytes_save = (5 + 8 + 1) * BytesPerWord;5626address entry_addr = StubRoutines::verify_oop_subroutine_entry_address();56275628save_return_pc();56295630// Push frame, but preserve flags5631z_lgr(Z_R0, Z_SP);5632z_lay(Z_SP, -((int64_t)nbytes_save + frame::z_abi_160_size), Z_SP);5633z_stg(Z_R0, _z_abi(callers_sp), Z_SP);56345635save_volatile_regs(Z_SP, frame::z_abi_160_size, true, true);56365637lgr_if_needed(Z_ARG2, oop);5638load_const_optimized(Z_ARG1, (address)msg);5639load_const_optimized(Z_R1, entry_addr);5640z_lg(Z_R1, 0, Z_R1);5641call_c(Z_R1);56425643restore_volatile_regs(Z_SP, frame::z_abi_160_size, true, true);5644pop_frame();5645restore_return_pc();56465647BLOCK_COMMENT("} verify_oop ");5648}56495650void MacroAssembler::verify_oop_addr(Address addr, const char* msg) {5651if (!VerifyOops) return;56525653BLOCK_COMMENT("verify_oop {");5654unsigned int nbytes_save = (5 + 8) * BytesPerWord;5655address entry_addr = StubRoutines::verify_oop_subroutine_entry_address();56565657save_return_pc();5658unsigned int frame_size = push_frame_abi160(nbytes_save); // kills Z_R05659save_volatile_regs(Z_SP, frame::z_abi_160_size, true, false);56605661z_lg(Z_ARG2, addr.plus_disp(frame_size));5662load_const_optimized(Z_ARG1, (address)msg);5663load_const_optimized(Z_R1, entry_addr);5664z_lg(Z_R1, 0, Z_R1);5665call_c(Z_R1);56665667restore_volatile_regs(Z_SP, frame::z_abi_160_size, true, false);5668pop_frame();5669restore_return_pc();56705671BLOCK_COMMENT("} verify_oop ");5672}56735674const char* MacroAssembler::stop_types[] = {5675"stop",5676"untested",5677"unimplemented",5678"shouldnotreachhere"5679};56805681static void stop_on_request(const char* tp, const char* msg) {5682tty->print("Z assembly code requires stop: (%s) %s\n", tp, msg);5683guarantee(false, "Z assembly code requires stop: %s", msg);5684}56855686void MacroAssembler::stop(int type, const char* msg, int id) {5687BLOCK_COMMENT(err_msg("stop: %s {", msg));56885689// Setup arguments.5690load_const(Z_ARG1, (void*) stop_types[type%stop_end]);5691load_const(Z_ARG2, (void*) msg);5692get_PC(Z_R14); // Following code pushes a frame without entering a new function. Use current pc as return address.5693save_return_pc(); // Saves return pc Z_R14.5694push_frame_abi160(0);5695call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);5696// The plain disassembler does not recognize illtrap. It instead displays5697// a 32-bit value. Issueing two illtraps assures the disassembler finds5698// the proper beginning of the next instruction.5699z_illtrap(); // Illegal instruction.5700z_illtrap(); // Illegal instruction.57015702BLOCK_COMMENT(" } stop");5703}57045705// Special version of stop() for code size reduction.5706// Reuses the previously generated call sequence, if any.5707// Generates the call sequence on its own, if necessary.5708// Note: This code will work only in non-relocatable code!5709// The relative address of the data elements (arg1, arg2) must not change.5710// The reentry point must not move relative to it's users. This prerequisite5711// should be given for "hand-written" code, if all chain calls are in the same code blob.5712// Generated code must not undergo any transformation, e.g. ShortenBranches, to be safe.5713address MacroAssembler::stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation) {5714BLOCK_COMMENT(err_msg("stop_chain(%s,%s): %s {", reentry==NULL?"init":"cont", allow_relocation?"reloc ":"static", msg));57155716// Setup arguments.5717if (allow_relocation) {5718// Relocatable version (for comparison purposes). Remove after some time.5719load_const(Z_ARG1, (void*) stop_types[type%stop_end]);5720load_const(Z_ARG2, (void*) msg);5721} else {5722load_absolute_address(Z_ARG1, (address)stop_types[type%stop_end]);5723load_absolute_address(Z_ARG2, (address)msg);5724}5725if ((reentry != NULL) && RelAddr::is_in_range_of_RelAddr16(reentry, pc())) {5726BLOCK_COMMENT("branch to reentry point:");5727z_brc(bcondAlways, reentry);5728} else {5729BLOCK_COMMENT("reentry point:");5730reentry = pc(); // Re-entry point for subsequent stop calls.5731save_return_pc(); // Saves return pc Z_R14.5732push_frame_abi160(0);5733if (allow_relocation) {5734reentry = NULL; // Prevent reentry if code relocation is allowed.5735call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);5736} else {5737call_VM_leaf_static(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);5738}5739z_illtrap(); // Illegal instruction as emergency stop, should the above call return.5740}5741BLOCK_COMMENT(" } stop_chain");57425743return reentry;5744}57455746// Special version of stop() for code size reduction.5747// Assumes constant relative addresses for data and runtime call.5748void MacroAssembler::stop_static(int type, const char* msg, int id) {5749stop_chain(NULL, type, msg, id, false);5750}57515752void MacroAssembler::stop_subroutine() {5753unimplemented("stop_subroutine", 710);5754}57555756// Prints msg to stdout from within generated code..5757void MacroAssembler::warn(const char* msg) {5758RegisterSaver::save_live_registers(this, RegisterSaver::all_registers, Z_R14);5759load_absolute_address(Z_R1, (address) warning);5760load_absolute_address(Z_ARG1, (address) msg);5761(void) call(Z_R1);5762RegisterSaver::restore_live_registers(this, RegisterSaver::all_registers);5763}57645765#ifndef PRODUCT57665767// Write pattern 0x0101010101010101 in region [low-before, high+after].5768void MacroAssembler::zap_from_to(Register low, Register high, Register val, Register addr, int before, int after) {5769if (!ZapEmptyStackFields) return;5770BLOCK_COMMENT("zap memory region {");5771load_const_optimized(val, 0x0101010101010101);5772int size = before + after;5773if (low == high && size < 5 && size > 0) {5774int offset = -before*BytesPerWord;5775for (int i = 0; i < size; ++i) {5776z_stg(val, Address(low, offset));5777offset +=(1*BytesPerWord);5778}5779} else {5780add2reg(addr, -before*BytesPerWord, low);5781if (after) {5782#ifdef ASSERT5783jlong check = after * BytesPerWord;5784assert(Immediate::is_simm32(check) && Immediate::is_simm32(-check), "value not encodable !");5785#endif5786add2reg(high, after * BytesPerWord);5787}5788NearLabel loop;5789bind(loop);5790z_stg(val, Address(addr));5791add2reg(addr, 8);5792compare64_and_branch(addr, high, bcondNotHigh, loop);5793if (after) {5794add2reg(high, -after * BytesPerWord);5795}5796}5797BLOCK_COMMENT("} zap memory region");5798}5799#endif // !PRODUCT58005801SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value, Register _rscratch) {5802_masm = masm;5803_masm->load_absolute_address(_rscratch, (address)flag_addr);5804_masm->load_and_test_int(_rscratch, Address(_rscratch));5805if (value) {5806_masm->z_brne(_label); // Skip if true, i.e. != 0.5807} else {5808_masm->z_bre(_label); // Skip if false, i.e. == 0.5809}5810}58115812SkipIfEqual::~SkipIfEqual() {5813_masm->bind(_label);5814}581558165817