Path: blob/master/src/hotspot/cpu/ppc/assembler_ppc.cpp
40930 views
/*1* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.2* Copyright (c) 2012, 2015 SAP SE. All rights reserved.3* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.4*5* This code is free software; you can redistribute it and/or modify it6* under the terms of the GNU General Public License version 2 only, as7* published by the Free Software Foundation.8*9* This code is distributed in the hope that it will be useful, but WITHOUT10* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License12* version 2 for more details (a copy is included in the LICENSE file that13* accompanied this code).14*15* You should have received a copy of the GNU General Public License version16* 2 along with this work; if not, write to the Free Software Foundation,17* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.18*19* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA20* or visit www.oracle.com if you need additional information or have any21* questions.22*23*/2425#include "precompiled.hpp"26#include "asm/assembler.inline.hpp"27#include "gc/shared/cardTableBarrierSet.hpp"28#include "gc/shared/collectedHeap.inline.hpp"29#include "interpreter/interpreter.hpp"30#include "memory/resourceArea.hpp"31#include "prims/methodHandles.hpp"32#include "runtime/biasedLocking.hpp"33#include "runtime/interfaceSupport.inline.hpp"34#include "runtime/objectMonitor.hpp"35#include "runtime/os.hpp"36#include "runtime/sharedRuntime.hpp"37#include "runtime/stubRoutines.hpp"38#include "utilities/macros.hpp"39#include "utilities/powerOfTwo.hpp"4041#ifdef PRODUCT42#define BLOCK_COMMENT(str) // nothing43#else44#define BLOCK_COMMENT(str) block_comment(str)45#endif4647int AbstractAssembler::code_fill_byte() {48return 0x00; // illegal instruction 0x0000000049}505152// Patch instruction `inst' at offset `inst_pos' to refer to53// `dest_pos' and return the resulting instruction. We should have54// pcs, not offsets, but since all is relative, it will work out fine.55int Assembler::patched_branch(int dest_pos, int inst, int inst_pos) {56int m = 0; // mask for displacement field57int v = 0; // new value for displacement field5859switch (inv_op_ppc(inst)) {60case b_op: m = li(-1); v = li(disp(dest_pos, inst_pos)); break;61case bc_op: m = bd(-1); v = bd(disp(dest_pos, inst_pos)); break;62default: ShouldNotReachHere();63}64return inst & ~m | v;65}6667// Return the offset, relative to _code_begin, of the destination of68// the branch inst at offset pos.69int Assembler::branch_destination(int inst, int pos) {70int r = 0;71switch (inv_op_ppc(inst)) {72case b_op: r = bxx_destination_offset(inst, pos); break;73case bc_op: r = inv_bd_field(inst, pos); break;74default: ShouldNotReachHere();75}76return r;77}7879// Low-level andi-one-instruction-macro.80void Assembler::andi(Register a, Register s, const long ui16) {81if (is_power_of_2(((jlong) ui16)+1)) {82// pow2minus183clrldi(a, s, 64 - log2i_exact((((jlong) ui16)+1)));84} else if (is_power_of_2((jlong) ui16)) {85// pow286rlwinm(a, s, 0, 31 - log2i_exact((jlong) ui16), 31 - log2i_exact((jlong) ui16));87} else if (is_power_of_2((jlong)-ui16)) {88// negpow289clrrdi(a, s, log2i_exact((jlong)-ui16));90} else {91assert(is_uimm(ui16, 16), "must be 16-bit unsigned immediate");92andi_(a, s, ui16);93}94}9596// RegisterOrConstant version.97void Assembler::ld(Register d, RegisterOrConstant roc, Register s1) {98if (roc.is_constant()) {99if (s1 == noreg) {100int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);101Assembler::ld(d, simm16_rest, d);102} else if (is_simm(roc.as_constant(), 16)) {103Assembler::ld(d, roc.as_constant(), s1);104} else {105load_const_optimized(d, roc.as_constant());106Assembler::ldx(d, d, s1);107}108} else {109if (s1 == noreg)110Assembler::ld(d, 0, roc.as_register());111else112Assembler::ldx(d, roc.as_register(), s1);113}114}115116void Assembler::lwa(Register d, RegisterOrConstant roc, Register s1) {117if (roc.is_constant()) {118if (s1 == noreg) {119int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);120Assembler::lwa(d, simm16_rest, d);121} else if (is_simm(roc.as_constant(), 16)) {122Assembler::lwa(d, roc.as_constant(), s1);123} else {124load_const_optimized(d, roc.as_constant());125Assembler::lwax(d, d, s1);126}127} else {128if (s1 == noreg)129Assembler::lwa(d, 0, roc.as_register());130else131Assembler::lwax(d, roc.as_register(), s1);132}133}134135void Assembler::lwz(Register d, RegisterOrConstant roc, Register s1) {136if (roc.is_constant()) {137if (s1 == noreg) {138int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);139Assembler::lwz(d, simm16_rest, d);140} else if (is_simm(roc.as_constant(), 16)) {141Assembler::lwz(d, roc.as_constant(), s1);142} else {143load_const_optimized(d, roc.as_constant());144Assembler::lwzx(d, d, s1);145}146} else {147if (s1 == noreg)148Assembler::lwz(d, 0, roc.as_register());149else150Assembler::lwzx(d, roc.as_register(), s1);151}152}153154void Assembler::lha(Register d, RegisterOrConstant roc, Register s1) {155if (roc.is_constant()) {156if (s1 == noreg) {157int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);158Assembler::lha(d, simm16_rest, d);159} else if (is_simm(roc.as_constant(), 16)) {160Assembler::lha(d, roc.as_constant(), s1);161} else {162load_const_optimized(d, roc.as_constant());163Assembler::lhax(d, d, s1);164}165} else {166if (s1 == noreg)167Assembler::lha(d, 0, roc.as_register());168else169Assembler::lhax(d, roc.as_register(), s1);170}171}172173void Assembler::lhz(Register d, RegisterOrConstant roc, Register s1) {174if (roc.is_constant()) {175if (s1 == noreg) {176int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);177Assembler::lhz(d, simm16_rest, d);178} else if (is_simm(roc.as_constant(), 16)) {179Assembler::lhz(d, roc.as_constant(), s1);180} else {181load_const_optimized(d, roc.as_constant());182Assembler::lhzx(d, d, s1);183}184} else {185if (s1 == noreg)186Assembler::lhz(d, 0, roc.as_register());187else188Assembler::lhzx(d, roc.as_register(), s1);189}190}191192void Assembler::lbz(Register d, RegisterOrConstant roc, Register s1) {193if (roc.is_constant()) {194if (s1 == noreg) {195int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);196Assembler::lbz(d, simm16_rest, d);197} else if (is_simm(roc.as_constant(), 16)) {198Assembler::lbz(d, roc.as_constant(), s1);199} else {200load_const_optimized(d, roc.as_constant());201Assembler::lbzx(d, d, s1);202}203} else {204if (s1 == noreg)205Assembler::lbz(d, 0, roc.as_register());206else207Assembler::lbzx(d, roc.as_register(), s1);208}209}210211void Assembler::std(Register d, RegisterOrConstant roc, Register s1, Register tmp) {212if (roc.is_constant()) {213if (s1 == noreg) {214guarantee(tmp != noreg, "Need tmp reg to encode large constants");215int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);216Assembler::std(d, simm16_rest, tmp);217} else if (is_simm(roc.as_constant(), 16)) {218Assembler::std(d, roc.as_constant(), s1);219} else {220guarantee(tmp != noreg, "Need tmp reg to encode large constants");221load_const_optimized(tmp, roc.as_constant());222Assembler::stdx(d, tmp, s1);223}224} else {225if (s1 == noreg)226Assembler::std(d, 0, roc.as_register());227else228Assembler::stdx(d, roc.as_register(), s1);229}230}231232void Assembler::stw(Register d, RegisterOrConstant roc, Register s1, Register tmp) {233if (roc.is_constant()) {234if (s1 == noreg) {235guarantee(tmp != noreg, "Need tmp reg to encode large constants");236int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);237Assembler::stw(d, simm16_rest, tmp);238} else if (is_simm(roc.as_constant(), 16)) {239Assembler::stw(d, roc.as_constant(), s1);240} else {241guarantee(tmp != noreg, "Need tmp reg to encode large constants");242load_const_optimized(tmp, roc.as_constant());243Assembler::stwx(d, tmp, s1);244}245} else {246if (s1 == noreg)247Assembler::stw(d, 0, roc.as_register());248else249Assembler::stwx(d, roc.as_register(), s1);250}251}252253void Assembler::sth(Register d, RegisterOrConstant roc, Register s1, Register tmp) {254if (roc.is_constant()) {255if (s1 == noreg) {256guarantee(tmp != noreg, "Need tmp reg to encode large constants");257int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);258Assembler::sth(d, simm16_rest, tmp);259} else if (is_simm(roc.as_constant(), 16)) {260Assembler::sth(d, roc.as_constant(), s1);261} else {262guarantee(tmp != noreg, "Need tmp reg to encode large constants");263load_const_optimized(tmp, roc.as_constant());264Assembler::sthx(d, tmp, s1);265}266} else {267if (s1 == noreg)268Assembler::sth(d, 0, roc.as_register());269else270Assembler::sthx(d, roc.as_register(), s1);271}272}273274void Assembler::stb(Register d, RegisterOrConstant roc, Register s1, Register tmp) {275if (roc.is_constant()) {276if (s1 == noreg) {277guarantee(tmp != noreg, "Need tmp reg to encode large constants");278int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);279Assembler::stb(d, simm16_rest, tmp);280} else if (is_simm(roc.as_constant(), 16)) {281Assembler::stb(d, roc.as_constant(), s1);282} else {283guarantee(tmp != noreg, "Need tmp reg to encode large constants");284load_const_optimized(tmp, roc.as_constant());285Assembler::stbx(d, tmp, s1);286}287} else {288if (s1 == noreg)289Assembler::stb(d, 0, roc.as_register());290else291Assembler::stbx(d, roc.as_register(), s1);292}293}294295void Assembler::add(Register d, RegisterOrConstant roc, Register s1) {296if (roc.is_constant()) {297intptr_t c = roc.as_constant();298assert(is_simm(c, 16), "too big");299addi(d, s1, (int)c);300}301else add(d, roc.as_register(), s1);302}303304void Assembler::subf(Register d, RegisterOrConstant roc, Register s1) {305if (roc.is_constant()) {306intptr_t c = roc.as_constant();307assert(is_simm(-c, 16), "too big");308addi(d, s1, (int)-c);309}310else subf(d, roc.as_register(), s1);311}312313void Assembler::cmpd(ConditionRegister d, RegisterOrConstant roc, Register s1) {314if (roc.is_constant()) {315intptr_t c = roc.as_constant();316assert(is_simm(c, 16), "too big");317cmpdi(d, s1, (int)c);318}319else cmpd(d, roc.as_register(), s1);320}321322// Load a 64 bit constant. Patchable.323void Assembler::load_const(Register d, long x, Register tmp) {324// 64-bit value: x = xa xb xc xd325int xa = (x >> 48) & 0xffff;326int xb = (x >> 32) & 0xffff;327int xc = (x >> 16) & 0xffff;328int xd = (x >> 0) & 0xffff;329if (tmp == noreg) {330Assembler::lis( d, (int)(short)xa);331Assembler::ori( d, d, (unsigned int)xb);332Assembler::sldi(d, d, 32);333Assembler::oris(d, d, (unsigned int)xc);334Assembler::ori( d, d, (unsigned int)xd);335} else {336// exploit instruction level parallelism if we have a tmp register337assert_different_registers(d, tmp);338Assembler::lis(tmp, (int)(short)xa);339Assembler::lis(d, (int)(short)xc);340Assembler::ori(tmp, tmp, (unsigned int)xb);341Assembler::ori(d, d, (unsigned int)xd);342Assembler::insrdi(d, tmp, 32, 0);343}344}345346// Load a 64 bit constant, optimized, not identifyable.347// Tmp can be used to increase ILP. Set return_simm16_rest=true to get a348// 16 bit immediate offset.349int Assembler::load_const_optimized(Register d, long x, Register tmp, bool return_simm16_rest) {350// Avoid accidentally trying to use R0 for indexed addressing.351assert_different_registers(d, tmp);352353short xa, xb, xc, xd; // Four 16-bit chunks of const.354long rem = x; // Remaining part of const.355356xd = rem & 0xFFFF; // Lowest 16-bit chunk.357rem = (rem >> 16) + ((unsigned short)xd >> 15); // Compensation for sign extend.358359if (rem == 0) { // opt 1: simm16360li(d, xd);361return 0;362}363364int retval = 0;365if (return_simm16_rest) {366retval = xd;367x = rem << 16;368xd = 0;369}370371if (d == R0) { // Can't use addi.372if (is_simm(x, 32)) { // opt 2: simm32373lis(d, x >> 16);374if (xd) ori(d, d, (unsigned short)xd);375} else {376// 64-bit value: x = xa xb xc xd377xa = (x >> 48) & 0xffff;378xb = (x >> 32) & 0xffff;379xc = (x >> 16) & 0xffff;380bool xa_loaded = (xb & 0x8000) ? (xa != -1) : (xa != 0);381if (tmp == noreg || (xc == 0 && xd == 0)) {382if (xa_loaded) {383lis(d, xa);384if (xb) { ori(d, d, (unsigned short)xb); }385} else {386li(d, xb);387}388sldi(d, d, 32);389if (xc) { oris(d, d, (unsigned short)xc); }390if (xd) { ori( d, d, (unsigned short)xd); }391} else {392// Exploit instruction level parallelism if we have a tmp register.393bool xc_loaded = (xd & 0x8000) ? (xc != -1) : (xc != 0);394if (xa_loaded) {395lis(tmp, xa);396}397if (xc_loaded) {398lis(d, xc);399}400if (xa_loaded) {401if (xb) { ori(tmp, tmp, (unsigned short)xb); }402} else {403li(tmp, xb);404}405if (xc_loaded) {406if (xd) { ori(d, d, (unsigned short)xd); }407} else {408li(d, xd);409}410insrdi(d, tmp, 32, 0);411}412}413return retval;414}415416xc = rem & 0xFFFF; // Next 16-bit chunk.417rem = (rem >> 16) + ((unsigned short)xc >> 15); // Compensation for sign extend.418419if (rem == 0) { // opt 2: simm32420lis(d, xc);421} else { // High 32 bits needed.422423if (tmp != noreg && (int)x != 0) { // opt 3: We have a temp reg.424// No carry propagation between xc and higher chunks here (use logical instructions).425xa = (x >> 48) & 0xffff;426xb = (x >> 32) & 0xffff; // No sign compensation, we use lis+ori or li to allow usage of R0.427bool xa_loaded = (xb & 0x8000) ? (xa != -1) : (xa != 0);428bool return_xd = false;429430if (xa_loaded) { lis(tmp, xa); }431if (xc) { lis(d, xc); }432if (xa_loaded) {433if (xb) { ori(tmp, tmp, (unsigned short)xb); } // No addi, we support tmp == R0.434} else {435li(tmp, xb);436}437if (xc) {438if (xd) { addi(d, d, xd); }439} else {440li(d, xd);441}442insrdi(d, tmp, 32, 0);443return retval;444}445446xb = rem & 0xFFFF; // Next 16-bit chunk.447rem = (rem >> 16) + ((unsigned short)xb >> 15); // Compensation for sign extend.448449xa = rem & 0xFFFF; // Highest 16-bit chunk.450451// opt 4: avoid adding 0452if (xa) { // Highest 16-bit needed?453lis(d, xa);454if (xb) { addi(d, d, xb); }455} else {456li(d, xb);457}458sldi(d, d, 32);459if (xc) { addis(d, d, xc); }460}461462if (xd) { addi(d, d, xd); }463return retval;464}465466// We emit only one addition to s to optimize latency.467int Assembler::add_const_optimized(Register d, Register s, long x, Register tmp, bool return_simm16_rest) {468assert(s != R0 && s != tmp, "unsupported");469long rem = x;470471// Case 1: Can use mr or addi.472short xd = rem & 0xFFFF; // Lowest 16-bit chunk.473rem = (rem >> 16) + ((unsigned short)xd >> 15);474if (rem == 0) {475if (xd == 0) {476if (d != s) { mr(d, s); }477return 0;478}479if (return_simm16_rest && (d == s)) {480return xd;481}482addi(d, s, xd);483return 0;484}485486// Case 2: Can use addis.487if (xd == 0) {488short xc = rem & 0xFFFF; // 2nd 16-bit chunk.489rem = (rem >> 16) + ((unsigned short)xc >> 15);490if (rem == 0) {491addis(d, s, xc);492return 0;493}494}495496// Other cases: load & add.497Register tmp1 = tmp,498tmp2 = noreg;499if ((d != tmp) && (d != s)) {500// Can use d.501tmp1 = d;502tmp2 = tmp;503}504int simm16_rest = load_const_optimized(tmp1, x, tmp2, return_simm16_rest);505add(d, tmp1, s);506return simm16_rest;507}508509#ifndef PRODUCT510// Test of ppc assembler.511void Assembler::test_asm() {512// PPC 1, section 3.3.8, Fixed-Point Arithmetic Instructions513addi( R0, R1, 10);514addis( R5, R2, 11);515addic_( R3, R31, 42);516subfic( R21, R12, 2112);517add( R3, R2, R1);518add_( R11, R22, R30);519subf( R7, R6, R5);520subf_( R8, R9, R4);521addc( R11, R12, R13);522addc_( R14, R14, R14);523subfc( R15, R16, R17);524subfc_( R18, R20, R19);525adde( R20, R22, R24);526adde_( R29, R27, R26);527subfe( R28, R1, R0);528subfe_( R21, R11, R29);529neg( R21, R22);530neg_( R13, R23);531mulli( R0, R11, -31);532mulld( R1, R18, R21);533mulld_( R2, R17, R22);534mullw( R3, R16, R23);535mullw_( R4, R15, R24);536divd( R5, R14, R25);537divd_( R6, R13, R26);538divw( R7, R12, R27);539divw_( R8, R11, R28);540541li( R3, -4711);542543// PPC 1, section 3.3.9, Fixed-Point Compare Instructions544cmpi( CCR7, 0, R27, 4711);545cmp( CCR0, 1, R14, R11);546cmpli( CCR5, 1, R17, 45);547cmpl( CCR3, 0, R9, R10);548549cmpwi( CCR7, R27, 4711);550cmpw( CCR0, R14, R11);551cmplwi( CCR5, R17, 45);552cmplw( CCR3, R9, R10);553554cmpdi( CCR7, R27, 4711);555cmpd( CCR0, R14, R11);556cmpldi( CCR5, R17, 45);557cmpld( CCR3, R9, R10);558559// PPC 1, section 3.3.11, Fixed-Point Logical Instructions560andi_( R4, R5, 0xff);561andis_( R12, R13, 0x7b51);562ori( R1, R4, 13);563oris( R3, R5, 177);564xori( R7, R6, 51);565xoris( R29, R0, 1);566andr( R17, R21, R16);567and_( R3, R5, R15);568orr( R2, R1, R9);569or_( R17, R15, R11);570xorr( R19, R18, R10);571xor_( R31, R21, R11);572nand( R5, R7, R3);573nand_( R3, R1, R0);574nor( R2, R3, R5);575nor_( R3, R6, R8);576andc( R25, R12, R11);577andc_( R24, R22, R21);578orc( R20, R10, R12);579orc_( R22, R2, R13);580581nop();582583// PPC 1, section 3.3.12, Fixed-Point Rotate and Shift Instructions584sld( R5, R6, R8);585sld_( R3, R5, R9);586slw( R2, R1, R10);587slw_( R6, R26, R16);588srd( R16, R24, R8);589srd_( R21, R14, R7);590srw( R22, R25, R29);591srw_( R5, R18, R17);592srad( R7, R11, R0);593srad_( R9, R13, R1);594sraw( R7, R15, R2);595sraw_( R4, R17, R3);596sldi( R3, R18, 63);597sldi_( R2, R20, 30);598slwi( R1, R21, 30);599slwi_( R7, R23, 8);600srdi( R0, R19, 2);601srdi_( R12, R24, 5);602srwi( R13, R27, 6);603srwi_( R14, R29, 7);604sradi( R15, R30, 9);605sradi_( R16, R31, 19);606srawi( R17, R31, 15);607srawi_( R18, R31, 12);608609clrrdi( R3, R30, 5);610clrldi( R9, R10, 11);611612rldicr( R19, R20, 13, 15);613rldicr_(R20, R20, 16, 14);614rldicl( R21, R21, 30, 33);615rldicl_(R22, R1, 20, 25);616rlwinm( R23, R2, 25, 10, 11);617rlwinm_(R24, R3, 12, 13, 14);618619// PPC 1, section 3.3.2 Fixed-Point Load Instructions620lwzx( R3, R5, R7);621lwz( R11, 0, R1);622lwzu( R31, -4, R11);623624lwax( R3, R5, R7);625lwa( R31, -4, R11);626lhzx( R3, R5, R7);627lhz( R31, -4, R11);628lhzu( R31, -4, R11);629630631lhax( R3, R5, R7);632lha( R31, -4, R11);633lhau( R11, 0, R1);634635lbzx( R3, R5, R7);636lbz( R31, -4, R11);637lbzu( R11, 0, R1);638639ld( R31, -4, R11);640ldx( R3, R5, R7);641ldu( R31, -4, R11);642643// PPC 1, section 3.3.3 Fixed-Point Store Instructions644stwx( R3, R5, R7);645stw( R31, -4, R11);646stwu( R11, 0, R1);647648sthx( R3, R5, R7 );649sth( R31, -4, R11);650sthu( R31, -4, R11);651652stbx( R3, R5, R7);653stb( R31, -4, R11);654stbu( R31, -4, R11);655656std( R31, -4, R11);657stdx( R3, R5, R7);658stdu( R31, -4, R11);659660// PPC 1, section 3.3.13 Move To/From System Register Instructions661mtlr( R3);662mflr( R3);663mtctr( R3);664mfctr( R3);665mtcrf( 0xff, R15);666mtcr( R15);667mtcrf( 0x03, R15);668mtcr( R15);669mfcr( R15);670671// PPC 1, section 2.4.1 Branch Instructions672Label lbl1, lbl2, lbl3;673bind(lbl1);674675b(pc());676b(pc() - 8);677b(lbl1);678b(lbl2);679b(lbl3);680681bl(pc() - 8);682bl(lbl1);683bl(lbl2);684685bcl(4, 10, pc() - 8);686bcl(4, 10, lbl1);687bcl(4, 10, lbl2);688689bclr( 4, 6, 0);690bclrl(4, 6, 0);691692bind(lbl2);693694bcctr( 4, 6, 0);695bcctrl(4, 6, 0);696697blt(CCR0, lbl2);698bgt(CCR1, lbl2);699beq(CCR2, lbl2);700bso(CCR3, lbl2);701bge(CCR4, lbl2);702ble(CCR5, lbl2);703bne(CCR6, lbl2);704bns(CCR7, lbl2);705706bltl(CCR0, lbl2);707bgtl(CCR1, lbl2);708beql(CCR2, lbl2);709bsol(CCR3, lbl2);710bgel(CCR4, lbl2);711blel(CCR5, lbl2);712bnel(CCR6, lbl2);713bnsl(CCR7, lbl2);714blr();715716sync();717icbi( R1, R2);718dcbst(R2, R3);719720// FLOATING POINT instructions ppc.721// PPC 1, section 4.6.2 Floating-Point Load Instructions722lfs( F1, -11, R3);723lfsu(F2, 123, R4);724lfsx(F3, R5, R6);725lfd( F4, 456, R7);726lfdu(F5, 789, R8);727lfdx(F6, R10, R11);728729// PPC 1, section 4.6.3 Floating-Point Store Instructions730stfs( F7, 876, R12);731stfsu( F8, 543, R13);732stfsx( F9, R14, R15);733stfd( F10, 210, R16);734stfdu( F11, 111, R17);735stfdx( F12, R18, R19);736737// PPC 1, section 4.6.4 Floating-Point Move Instructions738fmr( F13, F14);739fmr_( F14, F15);740fneg( F16, F17);741fneg_( F18, F19);742fabs( F20, F21);743fabs_( F22, F23);744fnabs( F24, F25);745fnabs_(F26, F27);746747// PPC 1, section 4.6.5.1 Floating-Point Elementary Arithmetic748// Instructions749fadd( F28, F29, F30);750fadd_( F31, F0, F1);751fadds( F2, F3, F4);752fadds_(F5, F6, F7);753fsub( F8, F9, F10);754fsub_( F11, F12, F13);755fsubs( F14, F15, F16);756fsubs_(F17, F18, F19);757fmul( F20, F21, F22);758fmul_( F23, F24, F25);759fmuls( F26, F27, F28);760fmuls_(F29, F30, F31);761fdiv( F0, F1, F2);762fdiv_( F3, F4, F5);763fdivs( F6, F7, F8);764fdivs_(F9, F10, F11);765766// PPC 1, section 4.6.6 Floating-Point Rounding and Conversion767// Instructions768frsp( F12, F13);769fctid( F14, F15);770fctidz(F16, F17);771fctiw( F18, F19);772fctiwz(F20, F21);773fcfid( F22, F23);774775// PPC 1, section 4.6.7 Floating-Point Compare Instructions776fcmpu( CCR7, F24, F25);777778tty->print_cr("\ntest_asm disassembly (0x%lx 0x%lx):", p2i(code()->insts_begin()), p2i(code()->insts_end()));779code()->decode();780}781782#endif // !PRODUCT783784785