Path: blob/main/contrib/llvm-project/llvm/lib/Target/Sparc/SparcISelLowering.cpp
35294 views
//===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This file implements the interfaces that Sparc uses to lower LLVM code into a9// selection DAG.10//11//===----------------------------------------------------------------------===//1213#include "SparcISelLowering.h"14#include "MCTargetDesc/SparcMCExpr.h"15#include "MCTargetDesc/SparcMCTargetDesc.h"16#include "SparcMachineFunctionInfo.h"17#include "SparcRegisterInfo.h"18#include "SparcTargetMachine.h"19#include "SparcTargetObjectFile.h"20#include "llvm/ADT/StringExtras.h"21#include "llvm/ADT/StringSwitch.h"22#include "llvm/CodeGen/CallingConvLower.h"23#include "llvm/CodeGen/MachineFrameInfo.h"24#include "llvm/CodeGen/MachineFunction.h"25#include "llvm/CodeGen/MachineInstrBuilder.h"26#include "llvm/CodeGen/MachineRegisterInfo.h"27#include "llvm/CodeGen/SelectionDAG.h"28#include "llvm/CodeGen/SelectionDAGNodes.h"29#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"30#include "llvm/IR/DerivedTypes.h"31#include "llvm/IR/DiagnosticInfo.h"32#include "llvm/IR/Function.h"33#include "llvm/IR/Module.h"34#include "llvm/Support/ErrorHandling.h"35#include "llvm/Support/KnownBits.h"36using namespace llvm;373839//===----------------------------------------------------------------------===//40// Calling Convention Implementation41//===----------------------------------------------------------------------===//4243static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,44MVT &LocVT, CCValAssign::LocInfo &LocInfo,45ISD::ArgFlagsTy &ArgFlags, CCState &State)46{47assert (ArgFlags.isSRet());4849// Assign SRet argument.50State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,510,52LocVT, LocInfo));53return true;54}5556static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,57MVT &LocVT, CCValAssign::LocInfo &LocInfo,58ISD::ArgFlagsTy &ArgFlags, CCState &State)59{60static const MCPhysReg RegList[] = {61SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I562};63// Try to get first reg.64if (Register Reg = State.AllocateReg(RegList)) {65State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));66} else {67// Assign whole thing in stack.68State.addLoc(CCValAssign::getCustomMem(69ValNo, ValVT, State.AllocateStack(8, Align(4)), LocVT, LocInfo));70return true;71}7273// Try to get second reg.74if (Register Reg = State.AllocateReg(RegList))75State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));76else77State.addLoc(CCValAssign::getCustomMem(78ValNo, ValVT, State.AllocateStack(4, Align(4)), LocVT, LocInfo));79return true;80}8182static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,83MVT &LocVT, CCValAssign::LocInfo &LocInfo,84ISD::ArgFlagsTy &ArgFlags, CCState &State)85{86static const MCPhysReg RegList[] = {87SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I588};8990// Try to get first reg.91if (Register Reg = State.AllocateReg(RegList))92State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));93else94return false;9596// Try to get second reg.97if (Register Reg = State.AllocateReg(RegList))98State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));99else100return false;101102return true;103}104105// Allocate a full-sized argument for the 64-bit ABI.106static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT,107MVT &LocVT, CCValAssign::LocInfo &LocInfo,108ISD::ArgFlagsTy &ArgFlags, CCState &State) {109assert((LocVT == MVT::f32 || LocVT == MVT::f128110|| LocVT.getSizeInBits() == 64) &&111"Can't handle non-64 bits locations");112113// Stack space is allocated for all arguments starting from [%fp+BIAS+128].114unsigned size = (LocVT == MVT::f128) ? 16 : 8;115Align alignment = (LocVT == MVT::f128) ? Align(16) : Align(8);116unsigned Offset = State.AllocateStack(size, alignment);117unsigned Reg = 0;118119if (LocVT == MVT::i64 && Offset < 6*8)120// Promote integers to %i0-%i5.121Reg = SP::I0 + Offset/8;122else if (LocVT == MVT::f64 && Offset < 16*8)123// Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).124Reg = SP::D0 + Offset/8;125else if (LocVT == MVT::f32 && Offset < 16*8)126// Promote floats to %f1, %f3, ...127Reg = SP::F1 + Offset/4;128else if (LocVT == MVT::f128 && Offset < 16*8)129// Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).130Reg = SP::Q0 + Offset/16;131132// Promote to register when possible, otherwise use the stack slot.133if (Reg) {134State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));135return true;136}137138// Bail out if this is a return CC and we run out of registers to place139// values into.140if (IsReturn)141return false;142143// This argument goes on the stack in an 8-byte slot.144// When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to145// the right-aligned float. The first 4 bytes of the stack slot are undefined.146if (LocVT == MVT::f32)147Offset += 4;148149State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));150return true;151}152153// Allocate a half-sized argument for the 64-bit ABI.154//155// This is used when passing { float, int } structs by value in registers.156static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT,157MVT &LocVT, CCValAssign::LocInfo &LocInfo,158ISD::ArgFlagsTy &ArgFlags, CCState &State) {159assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");160unsigned Offset = State.AllocateStack(4, Align(4));161162if (LocVT == MVT::f32 && Offset < 16*8) {163// Promote floats to %f0-%f31.164State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,165LocVT, LocInfo));166return true;167}168169if (LocVT == MVT::i32 && Offset < 6*8) {170// Promote integers to %i0-%i5, using half the register.171unsigned Reg = SP::I0 + Offset/8;172LocVT = MVT::i64;173LocInfo = CCValAssign::AExt;174175// Set the Custom bit if this i32 goes in the high bits of a register.176if (Offset % 8 == 0)177State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,178LocVT, LocInfo));179else180State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));181return true;182}183184// Bail out if this is a return CC and we run out of registers to place185// values into.186if (IsReturn)187return false;188189State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));190return true;191}192193static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,194CCValAssign::LocInfo &LocInfo,195ISD::ArgFlagsTy &ArgFlags, CCState &State) {196return Analyze_CC_Sparc64_Full(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,197State);198}199200static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,201CCValAssign::LocInfo &LocInfo,202ISD::ArgFlagsTy &ArgFlags, CCState &State) {203return Analyze_CC_Sparc64_Half(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,204State);205}206207static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,208CCValAssign::LocInfo &LocInfo,209ISD::ArgFlagsTy &ArgFlags, CCState &State) {210return Analyze_CC_Sparc64_Full(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,211State);212}213214static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,215CCValAssign::LocInfo &LocInfo,216ISD::ArgFlagsTy &ArgFlags, CCState &State) {217return Analyze_CC_Sparc64_Half(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,218State);219}220221#include "SparcGenCallingConv.inc"222223// The calling conventions in SparcCallingConv.td are described in terms of the224// callee's register window. This function translates registers to the225// corresponding caller window %o register.226static unsigned toCallerWindow(unsigned Reg) {227static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,228"Unexpected enum");229if (Reg >= SP::I0 && Reg <= SP::I7)230return Reg - SP::I0 + SP::O0;231return Reg;232}233234bool SparcTargetLowering::CanLowerReturn(235CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,236const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {237SmallVector<CCValAssign, 16> RVLocs;238CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);239return CCInfo.CheckReturn(Outs, Subtarget->is64Bit() ? RetCC_Sparc64240: RetCC_Sparc32);241}242243SDValue244SparcTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,245bool IsVarArg,246const SmallVectorImpl<ISD::OutputArg> &Outs,247const SmallVectorImpl<SDValue> &OutVals,248const SDLoc &DL, SelectionDAG &DAG) const {249if (Subtarget->is64Bit())250return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);251return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);252}253254SDValue255SparcTargetLowering::LowerReturn_32(SDValue Chain, CallingConv::ID CallConv,256bool IsVarArg,257const SmallVectorImpl<ISD::OutputArg> &Outs,258const SmallVectorImpl<SDValue> &OutVals,259const SDLoc &DL, SelectionDAG &DAG) const {260MachineFunction &MF = DAG.getMachineFunction();261262// CCValAssign - represent the assignment of the return value to locations.263SmallVector<CCValAssign, 16> RVLocs;264265// CCState - Info about the registers and stack slot.266CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,267*DAG.getContext());268269// Analyze return values.270CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);271272SDValue Glue;273SmallVector<SDValue, 4> RetOps(1, Chain);274// Make room for the return address offset.275RetOps.push_back(SDValue());276277// Copy the result values into the output registers.278for (unsigned i = 0, realRVLocIdx = 0;279i != RVLocs.size();280++i, ++realRVLocIdx) {281CCValAssign &VA = RVLocs[i];282assert(VA.isRegLoc() && "Can only return in registers!");283284SDValue Arg = OutVals[realRVLocIdx];285286if (VA.needsCustom()) {287assert(VA.getLocVT() == MVT::v2i32);288// Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would289// happen by default if this wasn't a legal type)290291SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,292Arg,293DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout())));294SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,295Arg,296DAG.getConstant(1, DL, getVectorIdxTy(DAG.getDataLayout())));297298Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Glue);299Glue = Chain.getValue(1);300RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));301VA = RVLocs[++i]; // skip ahead to next loc302Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,303Glue);304} else305Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Glue);306307// Guarantee that all emitted copies are stuck together with flags.308Glue = Chain.getValue(1);309RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));310}311312unsigned RetAddrOffset = 8; // Call Inst + Delay Slot313// If the function returns a struct, copy the SRetReturnReg to I0314if (MF.getFunction().hasStructRetAttr()) {315SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();316Register Reg = SFI->getSRetReturnReg();317if (!Reg)318llvm_unreachable("sret virtual register not created in the entry block");319auto PtrVT = getPointerTy(DAG.getDataLayout());320SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);321Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Glue);322Glue = Chain.getValue(1);323RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));324RetAddrOffset = 12; // CallInst + Delay Slot + Unimp325}326327RetOps[0] = Chain; // Update chain.328RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);329330// Add the glue if we have it.331if (Glue.getNode())332RetOps.push_back(Glue);333334return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);335}336337// Lower return values for the 64-bit ABI.338// Return values are passed the exactly the same way as function arguments.339SDValue340SparcTargetLowering::LowerReturn_64(SDValue Chain, CallingConv::ID CallConv,341bool IsVarArg,342const SmallVectorImpl<ISD::OutputArg> &Outs,343const SmallVectorImpl<SDValue> &OutVals,344const SDLoc &DL, SelectionDAG &DAG) const {345// CCValAssign - represent the assignment of the return value to locations.346SmallVector<CCValAssign, 16> RVLocs;347348// CCState - Info about the registers and stack slot.349CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,350*DAG.getContext());351352// Analyze return values.353CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);354355SDValue Glue;356SmallVector<SDValue, 4> RetOps(1, Chain);357358// The second operand on the return instruction is the return address offset.359// The return address is always %i7+8 with the 64-bit ABI.360RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));361362// Copy the result values into the output registers.363for (unsigned i = 0; i != RVLocs.size(); ++i) {364CCValAssign &VA = RVLocs[i];365assert(VA.isRegLoc() && "Can only return in registers!");366SDValue OutVal = OutVals[i];367368// Integer return values must be sign or zero extended by the callee.369switch (VA.getLocInfo()) {370case CCValAssign::Full: break;371case CCValAssign::SExt:372OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);373break;374case CCValAssign::ZExt:375OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);376break;377case CCValAssign::AExt:378OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);379break;380default:381llvm_unreachable("Unknown loc info!");382}383384// The custom bit on an i32 return value indicates that it should be passed385// in the high bits of the register.386if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {387OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,388DAG.getConstant(32, DL, MVT::i32));389390// The next value may go in the low bits of the same register.391// Handle both at once.392if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {393SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);394OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);395// Skip the next value, it's already done.396++i;397}398}399400Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Glue);401402// Guarantee that all emitted copies are stuck together with flags.403Glue = Chain.getValue(1);404RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));405}406407RetOps[0] = Chain; // Update chain.408409// Add the flag if we have it.410if (Glue.getNode())411RetOps.push_back(Glue);412413return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);414}415416SDValue SparcTargetLowering::LowerFormalArguments(417SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,418const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,419SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {420if (Subtarget->is64Bit())421return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,422DL, DAG, InVals);423return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,424DL, DAG, InVals);425}426427/// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are428/// passed in either one or two GPRs, including FP values. TODO: we should429/// pass FP values in FP registers for fastcc functions.430SDValue SparcTargetLowering::LowerFormalArguments_32(431SDValue Chain, CallingConv::ID CallConv, bool isVarArg,432const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,433SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {434MachineFunction &MF = DAG.getMachineFunction();435MachineRegisterInfo &RegInfo = MF.getRegInfo();436SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();437438// Assign locations to all of the incoming arguments.439SmallVector<CCValAssign, 16> ArgLocs;440CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,441*DAG.getContext());442CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);443444const unsigned StackOffset = 92;445bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();446447unsigned InIdx = 0;448for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {449CCValAssign &VA = ArgLocs[i];450451if (Ins[InIdx].Flags.isSRet()) {452if (InIdx != 0)453report_fatal_error("sparc only supports sret on the first parameter");454// Get SRet from [%fp+64].455int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);456SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);457SDValue Arg =458DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());459InVals.push_back(Arg);460continue;461}462463if (VA.isRegLoc()) {464if (VA.needsCustom()) {465assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);466467Register VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);468MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);469SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);470471assert(i+1 < e);472CCValAssign &NextVA = ArgLocs[++i];473474SDValue LoVal;475if (NextVA.isMemLoc()) {476int FrameIdx = MF.getFrameInfo().477CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);478SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);479LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());480} else {481Register loReg = MF.addLiveIn(NextVA.getLocReg(),482&SP::IntRegsRegClass);483LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);484}485486if (IsLittleEndian)487std::swap(LoVal, HiVal);488489SDValue WholeValue =490DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);491WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);492InVals.push_back(WholeValue);493continue;494}495Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);496MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);497SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);498if (VA.getLocVT() == MVT::f32)499Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);500else if (VA.getLocVT() != MVT::i32) {501Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,502DAG.getValueType(VA.getLocVT()));503Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);504}505InVals.push_back(Arg);506continue;507}508509assert(VA.isMemLoc());510511unsigned Offset = VA.getLocMemOffset()+StackOffset;512auto PtrVT = getPointerTy(DAG.getDataLayout());513514if (VA.needsCustom()) {515assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);516// If it is double-word aligned, just load.517if (Offset % 8 == 0) {518int FI = MF.getFrameInfo().CreateFixedObject(8,519Offset,520true);521SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);522SDValue Load =523DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());524InVals.push_back(Load);525continue;526}527528int FI = MF.getFrameInfo().CreateFixedObject(4,529Offset,530true);531SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);532SDValue HiVal =533DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());534int FI2 = MF.getFrameInfo().CreateFixedObject(4,535Offset+4,536true);537SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);538539SDValue LoVal =540DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());541542if (IsLittleEndian)543std::swap(LoVal, HiVal);544545SDValue WholeValue =546DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);547WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);548InVals.push_back(WholeValue);549continue;550}551552int FI = MF.getFrameInfo().CreateFixedObject(4,553Offset,554true);555SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);556SDValue Load ;557if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {558Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());559} else if (VA.getValVT() == MVT::f128) {560report_fatal_error("SPARCv8 does not handle f128 in calls; "561"pass indirectly");562} else {563// We shouldn't see any other value types here.564llvm_unreachable("Unexpected ValVT encountered in frame lowering.");565}566InVals.push_back(Load);567}568569if (MF.getFunction().hasStructRetAttr()) {570// Copy the SRet Argument to SRetReturnReg.571SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();572Register Reg = SFI->getSRetReturnReg();573if (!Reg) {574Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);575SFI->setSRetReturnReg(Reg);576}577SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);578Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);579}580581// Store remaining ArgRegs to the stack if this is a varargs function.582if (isVarArg) {583static const MCPhysReg ArgRegs[] = {584SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5585};586unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);587const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;588unsigned ArgOffset = CCInfo.getStackSize();589if (NumAllocated == 6)590ArgOffset += StackOffset;591else {592assert(!ArgOffset);593ArgOffset = 68+4*NumAllocated;594}595596// Remember the vararg offset for the va_start implementation.597FuncInfo->setVarArgsFrameOffset(ArgOffset);598599std::vector<SDValue> OutChains;600601for (; CurArgReg != ArgRegEnd; ++CurArgReg) {602Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);603MF.getRegInfo().addLiveIn(*CurArgReg, VReg);604SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);605606int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,607true);608SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);609610OutChains.push_back(611DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));612ArgOffset += 4;613}614615if (!OutChains.empty()) {616OutChains.push_back(Chain);617Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);618}619}620621return Chain;622}623624// Lower formal arguments for the 64 bit ABI.625SDValue SparcTargetLowering::LowerFormalArguments_64(626SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,627const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,628SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {629MachineFunction &MF = DAG.getMachineFunction();630631// Analyze arguments according to CC_Sparc64.632SmallVector<CCValAssign, 16> ArgLocs;633CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,634*DAG.getContext());635CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);636637// The argument array begins at %fp+BIAS+128, after the register save area.638const unsigned ArgArea = 128;639640for (const CCValAssign &VA : ArgLocs) {641if (VA.isRegLoc()) {642// This argument is passed in a register.643// All integer register arguments are promoted by the caller to i64.644645// Create a virtual register for the promoted live-in value.646Register VReg = MF.addLiveIn(VA.getLocReg(),647getRegClassFor(VA.getLocVT()));648SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());649650// Get the high bits for i32 struct elements.651if (VA.getValVT() == MVT::i32 && VA.needsCustom())652Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,653DAG.getConstant(32, DL, MVT::i32));654655// The caller promoted the argument, so insert an Assert?ext SDNode so we656// won't promote the value again in this function.657switch (VA.getLocInfo()) {658case CCValAssign::SExt:659Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,660DAG.getValueType(VA.getValVT()));661break;662case CCValAssign::ZExt:663Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,664DAG.getValueType(VA.getValVT()));665break;666default:667break;668}669670// Truncate the register down to the argument type.671if (VA.isExtInLoc())672Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);673674InVals.push_back(Arg);675continue;676}677678// The registers are exhausted. This argument was passed on the stack.679assert(VA.isMemLoc());680// The CC_Sparc64_Full/Half functions compute stack offsets relative to the681// beginning of the arguments area at %fp+BIAS+128.682unsigned Offset = VA.getLocMemOffset() + ArgArea;683unsigned ValSize = VA.getValVT().getSizeInBits() / 8;684// Adjust offset for extended arguments, SPARC is big-endian.685// The caller will have written the full slot with extended bytes, but we686// prefer our own extending loads.687if (VA.isExtInLoc())688Offset += 8 - ValSize;689int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);690InVals.push_back(691DAG.getLoad(VA.getValVT(), DL, Chain,692DAG.getFrameIndex(FI, getPointerTy(MF.getDataLayout())),693MachinePointerInfo::getFixedStack(MF, FI)));694}695696if (!IsVarArg)697return Chain;698699// This function takes variable arguments, some of which may have been passed700// in registers %i0-%i5. Variable floating point arguments are never passed701// in floating point registers. They go on %i0-%i5 or on the stack like702// integer arguments.703//704// The va_start intrinsic needs to know the offset to the first variable705// argument.706unsigned ArgOffset = CCInfo.getStackSize();707SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();708// Skip the 128 bytes of register save area.709FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +710Subtarget->getStackPointerBias());711712// Save the variable arguments that were passed in registers.713// The caller is required to reserve stack space for 6 arguments regardless714// of how many arguments were actually passed.715SmallVector<SDValue, 8> OutChains;716for (; ArgOffset < 6*8; ArgOffset += 8) {717Register VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);718SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);719int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);720auto PtrVT = getPointerTy(MF.getDataLayout());721OutChains.push_back(722DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),723MachinePointerInfo::getFixedStack(MF, FI)));724}725726if (!OutChains.empty())727Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);728729return Chain;730}731732// Check whether any of the argument registers are reserved733static bool isAnyArgRegReserved(const SparcRegisterInfo *TRI,734const MachineFunction &MF) {735// The register window design means that outgoing parameters at O*736// will appear in the callee as I*.737// Be conservative and check both sides of the register names.738bool Outgoing =739llvm::any_of(SP::GPROutgoingArgRegClass, [TRI, &MF](MCPhysReg r) {740return TRI->isReservedReg(MF, r);741});742bool Incoming =743llvm::any_of(SP::GPRIncomingArgRegClass, [TRI, &MF](MCPhysReg r) {744return TRI->isReservedReg(MF, r);745});746return Outgoing || Incoming;747}748749static void emitReservedArgRegCallError(const MachineFunction &MF) {750const Function &F = MF.getFunction();751F.getContext().diagnose(DiagnosticInfoUnsupported{752F, ("SPARC doesn't support"753" function calls if any of the argument registers is reserved.")});754}755756SDValue757SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,758SmallVectorImpl<SDValue> &InVals) const {759if (Subtarget->is64Bit())760return LowerCall_64(CLI, InVals);761return LowerCall_32(CLI, InVals);762}763764static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,765const CallBase *Call) {766if (Call)767return Call->hasFnAttr(Attribute::ReturnsTwice);768769const Function *CalleeFn = nullptr;770if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {771CalleeFn = dyn_cast<Function>(G->getGlobal());772} else if (ExternalSymbolSDNode *E =773dyn_cast<ExternalSymbolSDNode>(Callee)) {774const Function &Fn = DAG.getMachineFunction().getFunction();775const Module *M = Fn.getParent();776const char *CalleeName = E->getSymbol();777CalleeFn = M->getFunction(CalleeName);778}779780if (!CalleeFn)781return false;782return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);783}784785/// IsEligibleForTailCallOptimization - Check whether the call is eligible786/// for tail call optimization.787bool SparcTargetLowering::IsEligibleForTailCallOptimization(788CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const {789790auto &Outs = CLI.Outs;791auto &Caller = MF.getFunction();792793// Do not tail call opt functions with "disable-tail-calls" attribute.794if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true")795return false;796797// Do not tail call opt if the stack is used to pass parameters.798// 64-bit targets have a slightly higher limit since the ABI requires799// to allocate some space even when all the parameters fit inside registers.800unsigned StackSizeLimit = Subtarget->is64Bit() ? 48 : 0;801if (CCInfo.getStackSize() > StackSizeLimit)802return false;803804// Do not tail call opt if either the callee or caller returns805// a struct and the other does not.806if (!Outs.empty() && Caller.hasStructRetAttr() != Outs[0].Flags.isSRet())807return false;808809// Byval parameters hand the function a pointer directly into the stack area810// we want to reuse during a tail call.811for (auto &Arg : Outs)812if (Arg.Flags.isByVal())813return false;814815return true;816}817818// Lower a call for the 32-bit ABI.819SDValue820SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,821SmallVectorImpl<SDValue> &InVals) const {822SelectionDAG &DAG = CLI.DAG;823SDLoc &dl = CLI.DL;824SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;825SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;826SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;827SDValue Chain = CLI.Chain;828SDValue Callee = CLI.Callee;829bool &isTailCall = CLI.IsTailCall;830CallingConv::ID CallConv = CLI.CallConv;831bool isVarArg = CLI.IsVarArg;832MachineFunction &MF = DAG.getMachineFunction();833834// Analyze operands of the call, assigning locations to each operand.835SmallVector<CCValAssign, 16> ArgLocs;836CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,837*DAG.getContext());838CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);839840isTailCall = isTailCall && IsEligibleForTailCallOptimization(841CCInfo, CLI, DAG.getMachineFunction());842843// Get the size of the outgoing arguments stack space requirement.844unsigned ArgsSize = CCInfo.getStackSize();845846// Keep stack frames 8-byte aligned.847ArgsSize = (ArgsSize+7) & ~7;848849MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();850851// Create local copies for byval args.852SmallVector<SDValue, 8> ByValArgs;853for (unsigned i = 0, e = Outs.size(); i != e; ++i) {854ISD::ArgFlagsTy Flags = Outs[i].Flags;855if (!Flags.isByVal())856continue;857858SDValue Arg = OutVals[i];859unsigned Size = Flags.getByValSize();860Align Alignment = Flags.getNonZeroByValAlign();861862if (Size > 0U) {863int FI = MFI.CreateStackObject(Size, Alignment, false);864SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));865SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);866867Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Alignment,868false, // isVolatile,869(Size <= 32), // AlwaysInline if size <= 32,870/*CI=*/nullptr, std::nullopt, MachinePointerInfo(),871MachinePointerInfo());872ByValArgs.push_back(FIPtr);873}874else {875SDValue nullVal;876ByValArgs.push_back(nullVal);877}878}879880assert(!isTailCall || ArgsSize == 0);881882if (!isTailCall)883Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);884885SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;886SmallVector<SDValue, 8> MemOpChains;887888const unsigned StackOffset = 92;889bool hasStructRetAttr = false;890unsigned SRetArgSize = 0;891// Walk the register/memloc assignments, inserting copies/loads.892for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();893i != e;894++i, ++realArgIdx) {895CCValAssign &VA = ArgLocs[i];896SDValue Arg = OutVals[realArgIdx];897898ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;899900// Use local copy if it is a byval arg.901if (Flags.isByVal()) {902Arg = ByValArgs[byvalArgIdx++];903if (!Arg) {904continue;905}906}907908// Promote the value if needed.909switch (VA.getLocInfo()) {910default: llvm_unreachable("Unknown loc info!");911case CCValAssign::Full: break;912case CCValAssign::SExt:913Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);914break;915case CCValAssign::ZExt:916Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);917break;918case CCValAssign::AExt:919Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);920break;921case CCValAssign::BCvt:922Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);923break;924}925926if (Flags.isSRet()) {927assert(VA.needsCustom());928929if (isTailCall)930continue;931932// store SRet argument in %sp+64933SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);934SDValue PtrOff = DAG.getIntPtrConstant(64, dl);935PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);936MemOpChains.push_back(937DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));938hasStructRetAttr = true;939// sret only allowed on first argument940assert(Outs[realArgIdx].OrigArgIndex == 0);941SRetArgSize =942DAG.getDataLayout().getTypeAllocSize(CLI.getArgs()[0].IndirectType);943continue;944}945946if (VA.needsCustom()) {947assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);948949if (VA.isMemLoc()) {950unsigned Offset = VA.getLocMemOffset() + StackOffset;951// if it is double-word aligned, just store.952if (Offset % 8 == 0) {953SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);954SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);955PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);956MemOpChains.push_back(957DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));958continue;959}960}961962if (VA.getLocVT() == MVT::f64) {963// Move from the float value from float registers into the964// integer registers.965if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Arg))966Arg = bitcastConstantFPToInt(C, dl, DAG);967else968Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);969}970971SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,972Arg,973DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));974SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,975Arg,976DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));977978if (VA.isRegLoc()) {979RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));980assert(i+1 != e);981CCValAssign &NextVA = ArgLocs[++i];982if (NextVA.isRegLoc()) {983RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));984} else {985// Store the second part in stack.986unsigned Offset = NextVA.getLocMemOffset() + StackOffset;987SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);988SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);989PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);990MemOpChains.push_back(991DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));992}993} else {994unsigned Offset = VA.getLocMemOffset() + StackOffset;995// Store the first part.996SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);997SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);998PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);999MemOpChains.push_back(1000DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));1001// Store the second part.1002PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);1003PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);1004MemOpChains.push_back(1005DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));1006}1007continue;1008}10091010// Arguments that can be passed on register must be kept at1011// RegsToPass vector1012if (VA.isRegLoc()) {1013if (VA.getLocVT() != MVT::f32) {1014RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));1015continue;1016}1017Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);1018RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));1019continue;1020}10211022assert(VA.isMemLoc());10231024// Create a store off the stack pointer for this argument.1025SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);1026SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + StackOffset,1027dl);1028PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);1029MemOpChains.push_back(1030DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));1031}103210331034// Emit all stores, make sure the occur before any copies into physregs.1035if (!MemOpChains.empty())1036Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);10371038// Build a sequence of copy-to-reg nodes chained together with token1039// chain and flag operands which copy the outgoing args into registers.1040// The InGlue in necessary since all emitted instructions must be1041// stuck together.1042SDValue InGlue;1043for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {1044Register Reg = RegsToPass[i].first;1045if (!isTailCall)1046Reg = toCallerWindow(Reg);1047Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InGlue);1048InGlue = Chain.getValue(1);1049}10501051bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);10521053// If the callee is a GlobalAddress node (quite common, every direct call is)1054// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.1055// Likewise ExternalSymbol -> TargetExternalSymbol.1056unsigned TF = isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT301057: SparcMCExpr::VK_Sparc_WDISP30;1058if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))1059Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF);1060else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))1061Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF);10621063// Returns a chain & a flag for retval copy to use1064SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);1065SmallVector<SDValue, 8> Ops;1066Ops.push_back(Chain);1067Ops.push_back(Callee);1068if (hasStructRetAttr)1069Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));1070for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {1071Register Reg = RegsToPass[i].first;1072if (!isTailCall)1073Reg = toCallerWindow(Reg);1074Ops.push_back(DAG.getRegister(Reg, RegsToPass[i].second.getValueType()));1075}10761077// Add a register mask operand representing the call-preserved registers.1078const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();1079const uint32_t *Mask =1080((hasReturnsTwice)1081? TRI->getRTCallPreservedMask(CallConv)1082: TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));10831084if (isAnyArgRegReserved(TRI, MF))1085emitReservedArgRegCallError(MF);10861087assert(Mask && "Missing call preserved mask for calling convention");1088Ops.push_back(DAG.getRegisterMask(Mask));10891090if (InGlue.getNode())1091Ops.push_back(InGlue);10921093if (isTailCall) {1094DAG.getMachineFunction().getFrameInfo().setHasTailCall();1095return DAG.getNode(SPISD::TAIL_CALL, dl, MVT::Other, Ops);1096}10971098Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);1099InGlue = Chain.getValue(1);11001101Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, dl);1102InGlue = Chain.getValue(1);11031104// Assign locations to each value returned by this call.1105SmallVector<CCValAssign, 16> RVLocs;1106CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,1107*DAG.getContext());11081109RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);11101111// Copy all of the result registers out of their specified physreg.1112for (unsigned i = 0; i != RVLocs.size(); ++i) {1113assert(RVLocs[i].isRegLoc() && "Can only return in registers!");1114if (RVLocs[i].getLocVT() == MVT::v2i32) {1115SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);1116SDValue Lo = DAG.getCopyFromReg(1117Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InGlue);1118Chain = Lo.getValue(1);1119InGlue = Lo.getValue(2);1120Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,1121DAG.getConstant(0, dl, MVT::i32));1122SDValue Hi = DAG.getCopyFromReg(1123Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InGlue);1124Chain = Hi.getValue(1);1125InGlue = Hi.getValue(2);1126Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,1127DAG.getConstant(1, dl, MVT::i32));1128InVals.push_back(Vec);1129} else {1130Chain =1131DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),1132RVLocs[i].getValVT(), InGlue)1133.getValue(1);1134InGlue = Chain.getValue(2);1135InVals.push_back(Chain.getValue(0));1136}1137}11381139return Chain;1140}11411142// FIXME? Maybe this could be a TableGen attribute on some registers and1143// this table could be generated automatically from RegInfo.1144Register SparcTargetLowering::getRegisterByName(const char* RegName, LLT VT,1145const MachineFunction &MF) const {1146Register Reg = StringSwitch<Register>(RegName)1147.Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)1148.Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)1149.Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)1150.Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)1151.Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)1152.Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)1153.Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)1154.Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)1155.Default(0);11561157// If we're directly referencing register names1158// (e.g in GCC C extension `register int r asm("g1");`),1159// make sure that said register is in the reserve list.1160const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();1161if (!TRI->isReservedReg(MF, Reg))1162Reg = 0;11631164if (Reg)1165return Reg;11661167report_fatal_error("Invalid register name global variable");1168}11691170// Fixup floating point arguments in the ... part of a varargs call.1171//1172// The SPARC v9 ABI requires that floating point arguments are treated the same1173// as integers when calling a varargs function. This does not apply to the1174// fixed arguments that are part of the function's prototype.1175//1176// This function post-processes a CCValAssign array created by1177// AnalyzeCallOperands().1178static void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs,1179ArrayRef<ISD::OutputArg> Outs) {1180for (CCValAssign &VA : ArgLocs) {1181MVT ValTy = VA.getLocVT();1182// FIXME: What about f32 arguments? C promotes them to f64 when calling1183// varargs functions.1184if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))1185continue;1186// The fixed arguments to a varargs function still go in FP registers.1187if (Outs[VA.getValNo()].IsFixed)1188continue;11891190// This floating point argument should be reassigned.1191// Determine the offset into the argument array.1192Register firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;1193unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;1194unsigned Offset = argSize * (VA.getLocReg() - firstReg);1195assert(Offset < 16*8 && "Offset out of range, bad register enum?");11961197if (Offset < 6*8) {1198// This argument should go in %i0-%i5.1199unsigned IReg = SP::I0 + Offset/8;1200if (ValTy == MVT::f64)1201// Full register, just bitconvert into i64.1202VA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), IReg, MVT::i64,1203CCValAssign::BCvt);1204else {1205assert(ValTy == MVT::f128 && "Unexpected type!");1206// Full register, just bitconvert into i128 -- We will lower this into1207// two i64s in LowerCall_64.1208VA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(), IReg,1209MVT::i128, CCValAssign::BCvt);1210}1211} else {1212// This needs to go to memory, we're out of integer registers.1213VA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(), Offset,1214VA.getLocVT(), VA.getLocInfo());1215}1216}1217}12181219// Lower a call for the 64-bit ABI.1220SDValue1221SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,1222SmallVectorImpl<SDValue> &InVals) const {1223SelectionDAG &DAG = CLI.DAG;1224SDLoc DL = CLI.DL;1225SDValue Chain = CLI.Chain;1226auto PtrVT = getPointerTy(DAG.getDataLayout());1227MachineFunction &MF = DAG.getMachineFunction();12281229// Analyze operands of the call, assigning locations to each operand.1230SmallVector<CCValAssign, 16> ArgLocs;1231CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,1232*DAG.getContext());1233CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);12341235CLI.IsTailCall = CLI.IsTailCall && IsEligibleForTailCallOptimization(1236CCInfo, CLI, DAG.getMachineFunction());12371238// Get the size of the outgoing arguments stack space requirement.1239// The stack offset computed by CC_Sparc64 includes all arguments.1240// Called functions expect 6 argument words to exist in the stack frame, used1241// or not.1242unsigned StackReserved = 6 * 8u;1243unsigned ArgsSize = std::max<unsigned>(StackReserved, CCInfo.getStackSize());12441245// Keep stack frames 16-byte aligned.1246ArgsSize = alignTo(ArgsSize, 16);12471248// Varargs calls require special treatment.1249if (CLI.IsVarArg)1250fixupVariableFloatArgs(ArgLocs, CLI.Outs);12511252assert(!CLI.IsTailCall || ArgsSize == StackReserved);12531254// Adjust the stack pointer to make room for the arguments.1255// FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls1256// with more than 6 arguments.1257if (!CLI.IsTailCall)1258Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);12591260// Collect the set of registers to pass to the function and their values.1261// This will be emitted as a sequence of CopyToReg nodes glued to the call1262// instruction.1263SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;12641265// Collect chains from all the memory opeations that copy arguments to the1266// stack. They must follow the stack pointer adjustment above and precede the1267// call instruction itself.1268SmallVector<SDValue, 8> MemOpChains;12691270for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {1271const CCValAssign &VA = ArgLocs[i];1272SDValue Arg = CLI.OutVals[i];12731274// Promote the value if needed.1275switch (VA.getLocInfo()) {1276default:1277llvm_unreachable("Unknown location info!");1278case CCValAssign::Full:1279break;1280case CCValAssign::SExt:1281Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);1282break;1283case CCValAssign::ZExt:1284Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);1285break;1286case CCValAssign::AExt:1287Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);1288break;1289case CCValAssign::BCvt:1290// fixupVariableFloatArgs() may create bitcasts from f128 to i128. But1291// SPARC does not support i128 natively. Lower it into two i64, see below.1292if (!VA.needsCustom() || VA.getValVT() != MVT::f1281293|| VA.getLocVT() != MVT::i128)1294Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);1295break;1296}12971298if (VA.isRegLoc()) {1299if (VA.needsCustom() && VA.getValVT() == MVT::f1281300&& VA.getLocVT() == MVT::i128) {1301// Store and reload into the integer register reg and reg+1.1302unsigned Offset = 8 * (VA.getLocReg() - SP::I0);1303unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;1304SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);1305SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);1306HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);1307SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);1308LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);13091310// Store to %sp+BIAS+128+Offset1311SDValue Store =1312DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());1313// Load into Reg and Reg+11314SDValue Hi64 =1315DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());1316SDValue Lo64 =1317DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());13181319Register HiReg = VA.getLocReg();1320Register LoReg = VA.getLocReg() + 1;1321if (!CLI.IsTailCall) {1322HiReg = toCallerWindow(HiReg);1323LoReg = toCallerWindow(LoReg);1324}13251326RegsToPass.push_back(std::make_pair(HiReg, Hi64));1327RegsToPass.push_back(std::make_pair(LoReg, Lo64));1328continue;1329}13301331// The custom bit on an i32 return value indicates that it should be1332// passed in the high bits of the register.1333if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {1334Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,1335DAG.getConstant(32, DL, MVT::i32));13361337// The next value may go in the low bits of the same register.1338// Handle both at once.1339if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&1340ArgLocs[i+1].getLocReg() == VA.getLocReg()) {1341SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64,1342CLI.OutVals[i+1]);1343Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);1344// Skip the next value, it's already done.1345++i;1346}1347}13481349Register Reg = VA.getLocReg();1350if (!CLI.IsTailCall)1351Reg = toCallerWindow(Reg);1352RegsToPass.push_back(std::make_pair(Reg, Arg));1353continue;1354}13551356assert(VA.isMemLoc());13571358// Create a store off the stack pointer for this argument.1359SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);1360// The argument area starts at %fp+BIAS+128 in the callee frame,1361// %sp+BIAS+128 in ours.1362SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +1363Subtarget->getStackPointerBias() +1364128, DL);1365PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);1366MemOpChains.push_back(1367DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));1368}13691370// Emit all stores, make sure they occur before the call.1371if (!MemOpChains.empty())1372Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);13731374// Build a sequence of CopyToReg nodes glued together with token chain and1375// glue operands which copy the outgoing args into registers. The InGlue is1376// necessary since all emitted instructions must be stuck together in order1377// to pass the live physical registers.1378SDValue InGlue;1379for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {1380Chain = DAG.getCopyToReg(Chain, DL,1381RegsToPass[i].first, RegsToPass[i].second, InGlue);1382InGlue = Chain.getValue(1);1383}13841385// If the callee is a GlobalAddress node (quite common, every direct call is)1386// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.1387// Likewise ExternalSymbol -> TargetExternalSymbol.1388SDValue Callee = CLI.Callee;1389bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);1390unsigned TF = isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT301391: SparcMCExpr::VK_Sparc_WDISP30;1392if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))1393Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF);1394else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))1395Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF);13961397// Build the operands for the call instruction itself.1398SmallVector<SDValue, 8> Ops;1399Ops.push_back(Chain);1400Ops.push_back(Callee);1401for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)1402Ops.push_back(DAG.getRegister(RegsToPass[i].first,1403RegsToPass[i].second.getValueType()));14041405// Add a register mask operand representing the call-preserved registers.1406const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();1407const uint32_t *Mask =1408((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)1409: TRI->getCallPreservedMask(DAG.getMachineFunction(),1410CLI.CallConv));14111412if (isAnyArgRegReserved(TRI, MF))1413emitReservedArgRegCallError(MF);14141415assert(Mask && "Missing call preserved mask for calling convention");1416Ops.push_back(DAG.getRegisterMask(Mask));14171418// Make sure the CopyToReg nodes are glued to the call instruction which1419// consumes the registers.1420if (InGlue.getNode())1421Ops.push_back(InGlue);14221423// Now the call itself.1424if (CLI.IsTailCall) {1425DAG.getMachineFunction().getFrameInfo().setHasTailCall();1426return DAG.getNode(SPISD::TAIL_CALL, DL, MVT::Other, Ops);1427}1428SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);1429Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);1430InGlue = Chain.getValue(1);14311432// Revert the stack pointer immediately after the call.1433Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, DL);1434InGlue = Chain.getValue(1);14351436// Now extract the return values. This is more or less the same as1437// LowerFormalArguments_64.14381439// Assign locations to each value returned by this call.1440SmallVector<CCValAssign, 16> RVLocs;1441CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,1442*DAG.getContext());14431444// Set inreg flag manually for codegen generated library calls that1445// return float.1446if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB)1447CLI.Ins[0].Flags.setInReg();14481449RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);14501451// Copy all of the result registers out of their specified physreg.1452for (unsigned i = 0; i != RVLocs.size(); ++i) {1453CCValAssign &VA = RVLocs[i];1454assert(VA.isRegLoc() && "Can only return in registers!");1455unsigned Reg = toCallerWindow(VA.getLocReg());14561457// When returning 'inreg {i32, i32 }', two consecutive i32 arguments can1458// reside in the same register in the high and low bits. Reuse the1459// CopyFromReg previous node to avoid duplicate copies.1460SDValue RV;1461if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))1462if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)1463RV = Chain.getValue(0);14641465// But usually we'll create a new CopyFromReg for a different register.1466if (!RV.getNode()) {1467RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);1468Chain = RV.getValue(1);1469InGlue = Chain.getValue(2);1470}14711472// Get the high bits for i32 struct elements.1473if (VA.getValVT() == MVT::i32 && VA.needsCustom())1474RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,1475DAG.getConstant(32, DL, MVT::i32));14761477// The callee promoted the return value, so insert an Assert?ext SDNode so1478// we won't promote the value again in this function.1479switch (VA.getLocInfo()) {1480case CCValAssign::SExt:1481RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,1482DAG.getValueType(VA.getValVT()));1483break;1484case CCValAssign::ZExt:1485RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,1486DAG.getValueType(VA.getValVT()));1487break;1488default:1489break;1490}14911492// Truncate the register down to the return value type.1493if (VA.isExtInLoc())1494RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);14951496InVals.push_back(RV);1497}14981499return Chain;1500}15011502//===----------------------------------------------------------------------===//1503// TargetLowering Implementation1504//===----------------------------------------------------------------------===//15051506TargetLowering::AtomicExpansionKind SparcTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {1507if (AI->getOperation() == AtomicRMWInst::Xchg &&1508AI->getType()->getPrimitiveSizeInBits() == 32)1509return AtomicExpansionKind::None; // Uses xchg instruction15101511return AtomicExpansionKind::CmpXChg;1512}15131514/// intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC1515/// rcond condition.1516static SPCC::CondCodes intCondCCodeToRcond(ISD::CondCode CC) {1517switch (CC) {1518default:1519llvm_unreachable("Unknown/unsigned integer condition code!");1520case ISD::SETEQ:1521return SPCC::REG_Z;1522case ISD::SETNE:1523return SPCC::REG_NZ;1524case ISD::SETLT:1525return SPCC::REG_LZ;1526case ISD::SETGT:1527return SPCC::REG_GZ;1528case ISD::SETLE:1529return SPCC::REG_LEZ;1530case ISD::SETGE:1531return SPCC::REG_GEZ;1532}1533}15341535/// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC1536/// condition.1537static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC) {1538switch (CC) {1539default: llvm_unreachable("Unknown integer condition code!");1540case ISD::SETEQ: return SPCC::ICC_E;1541case ISD::SETNE: return SPCC::ICC_NE;1542case ISD::SETLT: return SPCC::ICC_L;1543case ISD::SETGT: return SPCC::ICC_G;1544case ISD::SETLE: return SPCC::ICC_LE;1545case ISD::SETGE: return SPCC::ICC_GE;1546case ISD::SETULT: return SPCC::ICC_CS;1547case ISD::SETULE: return SPCC::ICC_LEU;1548case ISD::SETUGT: return SPCC::ICC_GU;1549case ISD::SETUGE: return SPCC::ICC_CC;1550}1551}15521553/// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC1554/// FCC condition.1555static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC) {1556switch (CC) {1557default: llvm_unreachable("Unknown fp condition code!");1558case ISD::SETEQ:1559case ISD::SETOEQ: return SPCC::FCC_E;1560case ISD::SETNE:1561case ISD::SETUNE: return SPCC::FCC_NE;1562case ISD::SETLT:1563case ISD::SETOLT: return SPCC::FCC_L;1564case ISD::SETGT:1565case ISD::SETOGT: return SPCC::FCC_G;1566case ISD::SETLE:1567case ISD::SETOLE: return SPCC::FCC_LE;1568case ISD::SETGE:1569case ISD::SETOGE: return SPCC::FCC_GE;1570case ISD::SETULT: return SPCC::FCC_UL;1571case ISD::SETULE: return SPCC::FCC_ULE;1572case ISD::SETUGT: return SPCC::FCC_UG;1573case ISD::SETUGE: return SPCC::FCC_UGE;1574case ISD::SETUO: return SPCC::FCC_U;1575case ISD::SETO: return SPCC::FCC_O;1576case ISD::SETONE: return SPCC::FCC_LG;1577case ISD::SETUEQ: return SPCC::FCC_UE;1578}1579}15801581SparcTargetLowering::SparcTargetLowering(const TargetMachine &TM,1582const SparcSubtarget &STI)1583: TargetLowering(TM), Subtarget(&STI) {1584MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));15851586// Instructions which use registers as conditionals examine all the1587// bits (as does the pseudo SELECT_CC expansion). I don't think it1588// matters much whether it's ZeroOrOneBooleanContent, or1589// ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the1590// former.1591setBooleanContents(ZeroOrOneBooleanContent);1592setBooleanVectorContents(ZeroOrOneBooleanContent);15931594// Set up the register classes.1595addRegisterClass(MVT::i32, &SP::IntRegsRegClass);1596if (!Subtarget->useSoftFloat()) {1597addRegisterClass(MVT::f32, &SP::FPRegsRegClass);1598addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);1599addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);1600}1601if (Subtarget->is64Bit()) {1602addRegisterClass(MVT::i64, &SP::I64RegsRegClass);1603} else {1604// On 32bit sparc, we define a double-register 32bit register1605// class, as well. This is modeled in LLVM as a 2-vector of i32.1606addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);16071608// ...but almost all operations must be expanded, so set that as1609// the default.1610for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {1611setOperationAction(Op, MVT::v2i32, Expand);1612}1613// Truncating/extending stores/loads are also not supported.1614for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {1615setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand);1616setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand);1617setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand);16181619setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand);1620setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand);1621setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand);16221623setTruncStoreAction(VT, MVT::v2i32, Expand);1624setTruncStoreAction(MVT::v2i32, VT, Expand);1625}1626// However, load and store *are* legal.1627setOperationAction(ISD::LOAD, MVT::v2i32, Legal);1628setOperationAction(ISD::STORE, MVT::v2i32, Legal);1629setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i32, Legal);1630setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Legal);16311632// And we need to promote i64 loads/stores into vector load/store1633setOperationAction(ISD::LOAD, MVT::i64, Custom);1634setOperationAction(ISD::STORE, MVT::i64, Custom);16351636// Sadly, this doesn't work:1637// AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);1638// AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);1639}16401641// Turn FP extload into load/fpextend1642for (MVT VT : MVT::fp_valuetypes()) {1643setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);1644setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);1645setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);1646}16471648// Sparc doesn't have i1 sign extending load1649for (MVT VT : MVT::integer_valuetypes())1650setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);16511652// Turn FP truncstore into trunc + store.1653setTruncStoreAction(MVT::f32, MVT::f16, Expand);1654setTruncStoreAction(MVT::f64, MVT::f16, Expand);1655setTruncStoreAction(MVT::f64, MVT::f32, Expand);1656setTruncStoreAction(MVT::f128, MVT::f16, Expand);1657setTruncStoreAction(MVT::f128, MVT::f32, Expand);1658setTruncStoreAction(MVT::f128, MVT::f64, Expand);16591660// Custom legalize GlobalAddress nodes into LO/HI parts.1661setOperationAction(ISD::GlobalAddress, PtrVT, Custom);1662setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom);1663setOperationAction(ISD::ConstantPool, PtrVT, Custom);1664setOperationAction(ISD::BlockAddress, PtrVT, Custom);16651666// Sparc doesn't have sext_inreg, replace them with shl/sra1667setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);1668setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);1669setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);16701671// Sparc has no REM or DIVREM operations.1672setOperationAction(ISD::UREM, MVT::i32, Expand);1673setOperationAction(ISD::SREM, MVT::i32, Expand);1674setOperationAction(ISD::SDIVREM, MVT::i32, Expand);1675setOperationAction(ISD::UDIVREM, MVT::i32, Expand);16761677// ... nor does SparcV9.1678if (Subtarget->is64Bit()) {1679setOperationAction(ISD::UREM, MVT::i64, Expand);1680setOperationAction(ISD::SREM, MVT::i64, Expand);1681setOperationAction(ISD::SDIVREM, MVT::i64, Expand);1682setOperationAction(ISD::UDIVREM, MVT::i64, Expand);1683}16841685// Custom expand fp<->sint1686setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);1687setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);1688setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);1689setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);16901691// Custom Expand fp<->uint1692setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);1693setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);1694setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);1695setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);16961697// Lower f16 conversion operations into library calls1698setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);1699setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);1700setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);1701setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);1702setOperationAction(ISD::FP16_TO_FP, MVT::f128, Expand);1703setOperationAction(ISD::FP_TO_FP16, MVT::f128, Expand);17041705setOperationAction(ISD::BITCAST, MVT::f32, Expand);1706setOperationAction(ISD::BITCAST, MVT::i32, Expand);17071708// Sparc has no select or setcc: expand to SELECT_CC.1709setOperationAction(ISD::SELECT, MVT::i32, Expand);1710setOperationAction(ISD::SELECT, MVT::f32, Expand);1711setOperationAction(ISD::SELECT, MVT::f64, Expand);1712setOperationAction(ISD::SELECT, MVT::f128, Expand);17131714setOperationAction(ISD::SETCC, MVT::i32, Expand);1715setOperationAction(ISD::SETCC, MVT::f32, Expand);1716setOperationAction(ISD::SETCC, MVT::f64, Expand);1717setOperationAction(ISD::SETCC, MVT::f128, Expand);17181719// Sparc doesn't have BRCOND either, it has BR_CC.1720setOperationAction(ISD::BRCOND, MVT::Other, Expand);1721setOperationAction(ISD::BRIND, MVT::Other, Expand);1722setOperationAction(ISD::BR_JT, MVT::Other, Expand);1723setOperationAction(ISD::BR_CC, MVT::i32, Custom);1724setOperationAction(ISD::BR_CC, MVT::f32, Custom);1725setOperationAction(ISD::BR_CC, MVT::f64, Custom);1726setOperationAction(ISD::BR_CC, MVT::f128, Custom);17271728setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);1729setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);1730setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);1731setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);17321733setOperationAction(ISD::ADDC, MVT::i32, Custom);1734setOperationAction(ISD::ADDE, MVT::i32, Custom);1735setOperationAction(ISD::SUBC, MVT::i32, Custom);1736setOperationAction(ISD::SUBE, MVT::i32, Custom);17371738if (Subtarget->is64Bit()) {1739setOperationAction(ISD::ADDC, MVT::i64, Custom);1740setOperationAction(ISD::ADDE, MVT::i64, Custom);1741setOperationAction(ISD::SUBC, MVT::i64, Custom);1742setOperationAction(ISD::SUBE, MVT::i64, Custom);1743setOperationAction(ISD::BITCAST, MVT::f64, Expand);1744setOperationAction(ISD::BITCAST, MVT::i64, Expand);1745setOperationAction(ISD::SELECT, MVT::i64, Expand);1746setOperationAction(ISD::SETCC, MVT::i64, Expand);1747setOperationAction(ISD::BR_CC, MVT::i64, Custom);1748setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);17491750setOperationAction(ISD::CTPOP, MVT::i64,1751Subtarget->usePopc() ? Legal : Expand);1752setOperationAction(ISD::CTTZ , MVT::i64, Expand);1753setOperationAction(ISD::CTLZ , MVT::i64, Expand);1754setOperationAction(ISD::BSWAP, MVT::i64, Expand);1755setOperationAction(ISD::ROTL , MVT::i64, Expand);1756setOperationAction(ISD::ROTR , MVT::i64, Expand);1757setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);1758}17591760// ATOMICs.1761// Atomics are supported on SparcV9. 32-bit atomics are also1762// supported by some Leon SparcV8 variants. Otherwise, atomics1763// are unsupported.1764if (Subtarget->isV9()) {1765// TODO: we _ought_ to be able to support 64-bit atomics on 32-bit sparcv9,1766// but it hasn't been implemented in the backend yet.1767if (Subtarget->is64Bit())1768setMaxAtomicSizeInBitsSupported(64);1769else1770setMaxAtomicSizeInBitsSupported(32);1771} else if (Subtarget->hasLeonCasa())1772setMaxAtomicSizeInBitsSupported(32);1773else1774setMaxAtomicSizeInBitsSupported(0);17751776setMinCmpXchgSizeInBits(32);17771778setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal);17791780setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal);17811782// Custom Lower Atomic LOAD/STORE1783setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);1784setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);17851786if (Subtarget->is64Bit()) {1787setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal);1788setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal);1789setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);1790setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom);1791}17921793if (!Subtarget->isV9()) {1794// SparcV8 does not have FNEGD and FABSD.1795setOperationAction(ISD::FNEG, MVT::f64, Custom);1796setOperationAction(ISD::FABS, MVT::f64, Custom);1797}17981799setOperationAction(ISD::FSIN , MVT::f128, Expand);1800setOperationAction(ISD::FCOS , MVT::f128, Expand);1801setOperationAction(ISD::FSINCOS, MVT::f128, Expand);1802setOperationAction(ISD::FREM , MVT::f128, Expand);1803setOperationAction(ISD::FMA , MVT::f128, Expand);1804setOperationAction(ISD::FSIN , MVT::f64, Expand);1805setOperationAction(ISD::FCOS , MVT::f64, Expand);1806setOperationAction(ISD::FSINCOS, MVT::f64, Expand);1807setOperationAction(ISD::FREM , MVT::f64, Expand);1808setOperationAction(ISD::FMA , MVT::f64, Expand);1809setOperationAction(ISD::FSIN , MVT::f32, Expand);1810setOperationAction(ISD::FCOS , MVT::f32, Expand);1811setOperationAction(ISD::FSINCOS, MVT::f32, Expand);1812setOperationAction(ISD::FREM , MVT::f32, Expand);1813setOperationAction(ISD::FMA , MVT::f32, Expand);1814setOperationAction(ISD::CTTZ , MVT::i32, Expand);1815setOperationAction(ISD::CTLZ , MVT::i32, Expand);1816setOperationAction(ISD::ROTL , MVT::i32, Expand);1817setOperationAction(ISD::ROTR , MVT::i32, Expand);1818setOperationAction(ISD::BSWAP, MVT::i32, Expand);1819setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);1820setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);1821setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);1822setOperationAction(ISD::FPOW , MVT::f128, Expand);1823setOperationAction(ISD::FPOW , MVT::f64, Expand);1824setOperationAction(ISD::FPOW , MVT::f32, Expand);18251826setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);1827setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);1828setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);18291830// Expands to [SU]MUL_LOHI.1831setOperationAction(ISD::MULHU, MVT::i32, Expand);1832setOperationAction(ISD::MULHS, MVT::i32, Expand);1833setOperationAction(ISD::MUL, MVT::i32, Expand);18341835if (Subtarget->useSoftMulDiv()) {1836// .umul works for both signed and unsigned1837setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);1838setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);1839setLibcallName(RTLIB::MUL_I32, ".umul");18401841setOperationAction(ISD::SDIV, MVT::i32, Expand);1842setLibcallName(RTLIB::SDIV_I32, ".div");18431844setOperationAction(ISD::UDIV, MVT::i32, Expand);1845setLibcallName(RTLIB::UDIV_I32, ".udiv");18461847setLibcallName(RTLIB::SREM_I32, ".rem");1848setLibcallName(RTLIB::UREM_I32, ".urem");1849}18501851if (Subtarget->is64Bit()) {1852setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);1853setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);1854setOperationAction(ISD::MULHU, MVT::i64, Expand);1855setOperationAction(ISD::MULHS, MVT::i64, Expand);18561857setOperationAction(ISD::UMULO, MVT::i64, Custom);1858setOperationAction(ISD::SMULO, MVT::i64, Custom);18591860setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);1861setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);1862setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);1863}18641865// VASTART needs to be custom lowered to use the VarArgsFrameIndex.1866setOperationAction(ISD::VASTART , MVT::Other, Custom);1867// VAARG needs to be lowered to not do unaligned accesses for doubles.1868setOperationAction(ISD::VAARG , MVT::Other, Custom);18691870setOperationAction(ISD::TRAP , MVT::Other, Legal);1871setOperationAction(ISD::DEBUGTRAP , MVT::Other, Legal);18721873// Use the default implementation.1874setOperationAction(ISD::VACOPY , MVT::Other, Expand);1875setOperationAction(ISD::VAEND , MVT::Other, Expand);1876setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);1877setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);1878setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);18791880setStackPointerRegisterToSaveRestore(SP::O6);18811882setOperationAction(ISD::CTPOP, MVT::i32,1883Subtarget->usePopc() ? Legal : Expand);18841885if (Subtarget->isV9() && Subtarget->hasHardQuad()) {1886setOperationAction(ISD::LOAD, MVT::f128, Legal);1887setOperationAction(ISD::STORE, MVT::f128, Legal);1888} else {1889setOperationAction(ISD::LOAD, MVT::f128, Custom);1890setOperationAction(ISD::STORE, MVT::f128, Custom);1891}18921893if (Subtarget->hasHardQuad()) {1894setOperationAction(ISD::FADD, MVT::f128, Legal);1895setOperationAction(ISD::FSUB, MVT::f128, Legal);1896setOperationAction(ISD::FMUL, MVT::f128, Legal);1897setOperationAction(ISD::FDIV, MVT::f128, Legal);1898setOperationAction(ISD::FSQRT, MVT::f128, Legal);1899setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);1900setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);1901if (Subtarget->isV9()) {1902setOperationAction(ISD::FNEG, MVT::f128, Legal);1903setOperationAction(ISD::FABS, MVT::f128, Legal);1904} else {1905setOperationAction(ISD::FNEG, MVT::f128, Custom);1906setOperationAction(ISD::FABS, MVT::f128, Custom);1907}19081909if (!Subtarget->is64Bit()) {1910setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");1911setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");1912setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");1913setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");1914}19151916} else {1917// Custom legalize f128 operations.19181919setOperationAction(ISD::FADD, MVT::f128, Custom);1920setOperationAction(ISD::FSUB, MVT::f128, Custom);1921setOperationAction(ISD::FMUL, MVT::f128, Custom);1922setOperationAction(ISD::FDIV, MVT::f128, Custom);1923setOperationAction(ISD::FSQRT, MVT::f128, Custom);1924setOperationAction(ISD::FNEG, MVT::f128, Custom);1925setOperationAction(ISD::FABS, MVT::f128, Custom);19261927setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);1928setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);1929setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);19301931// Setup Runtime library names.1932if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {1933setLibcallName(RTLIB::ADD_F128, "_Qp_add");1934setLibcallName(RTLIB::SUB_F128, "_Qp_sub");1935setLibcallName(RTLIB::MUL_F128, "_Qp_mul");1936setLibcallName(RTLIB::DIV_F128, "_Qp_div");1937setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt");1938setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi");1939setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui");1940setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq");1941setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq");1942setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox");1943setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux");1944setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq");1945setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq");1946setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq");1947setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");1948setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");1949setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod");1950} else if (!Subtarget->useSoftFloat()) {1951setLibcallName(RTLIB::ADD_F128, "_Q_add");1952setLibcallName(RTLIB::SUB_F128, "_Q_sub");1953setLibcallName(RTLIB::MUL_F128, "_Q_mul");1954setLibcallName(RTLIB::DIV_F128, "_Q_div");1955setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt");1956setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi");1957setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou");1958setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq");1959setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq");1960setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");1961setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");1962setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");1963setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");1964setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq");1965setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq");1966setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos");1967setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod");1968}1969}19701971if (Subtarget->fixAllFDIVSQRT()) {1972// Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as1973// the former instructions generate errata on LEON processors.1974setOperationAction(ISD::FDIV, MVT::f32, Promote);1975setOperationAction(ISD::FSQRT, MVT::f32, Promote);1976}19771978if (Subtarget->hasNoFMULS()) {1979setOperationAction(ISD::FMUL, MVT::f32, Promote);1980}19811982// Custom combine bitcast between f64 and v2i321983if (!Subtarget->is64Bit())1984setTargetDAGCombine(ISD::BITCAST);19851986if (Subtarget->hasLeonCycleCounter())1987setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);19881989setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);19901991setMinFunctionAlignment(Align(4));19921993computeRegisterProperties(Subtarget->getRegisterInfo());1994}19951996bool SparcTargetLowering::useSoftFloat() const {1997return Subtarget->useSoftFloat();1998}19992000const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {2001switch ((SPISD::NodeType)Opcode) {2002case SPISD::FIRST_NUMBER: break;2003case SPISD::CMPICC: return "SPISD::CMPICC";2004case SPISD::CMPFCC: return "SPISD::CMPFCC";2005case SPISD::CMPFCC_V9:2006return "SPISD::CMPFCC_V9";2007case SPISD::BRICC: return "SPISD::BRICC";2008case SPISD::BPICC:2009return "SPISD::BPICC";2010case SPISD::BPXCC:2011return "SPISD::BPXCC";2012case SPISD::BRFCC: return "SPISD::BRFCC";2013case SPISD::BRFCC_V9:2014return "SPISD::BRFCC_V9";2015case SPISD::BR_REG:2016return "SPISD::BR_REG";2017case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";2018case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";2019case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";2020case SPISD::SELECT_REG:2021return "SPISD::SELECT_REG";2022case SPISD::Hi: return "SPISD::Hi";2023case SPISD::Lo: return "SPISD::Lo";2024case SPISD::FTOI: return "SPISD::FTOI";2025case SPISD::ITOF: return "SPISD::ITOF";2026case SPISD::FTOX: return "SPISD::FTOX";2027case SPISD::XTOF: return "SPISD::XTOF";2028case SPISD::CALL: return "SPISD::CALL";2029case SPISD::RET_GLUE: return "SPISD::RET_GLUE";2030case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";2031case SPISD::FLUSHW: return "SPISD::FLUSHW";2032case SPISD::TLS_ADD: return "SPISD::TLS_ADD";2033case SPISD::TLS_LD: return "SPISD::TLS_LD";2034case SPISD::TLS_CALL: return "SPISD::TLS_CALL";2035case SPISD::TAIL_CALL: return "SPISD::TAIL_CALL";2036case SPISD::LOAD_GDOP: return "SPISD::LOAD_GDOP";2037}2038return nullptr;2039}20402041EVT SparcTargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &,2042EVT VT) const {2043if (!VT.isVector())2044return MVT::i32;2045return VT.changeVectorElementTypeToInteger();2046}20472048/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to2049/// be zero. Op is expected to be a target specific node. Used by DAG2050/// combiner.2051void SparcTargetLowering::computeKnownBitsForTargetNode2052(const SDValue Op,2053KnownBits &Known,2054const APInt &DemandedElts,2055const SelectionDAG &DAG,2056unsigned Depth) const {2057KnownBits Known2;2058Known.resetAll();20592060switch (Op.getOpcode()) {2061default: break;2062case SPISD::SELECT_ICC:2063case SPISD::SELECT_XCC:2064case SPISD::SELECT_FCC:2065Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);2066Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);20672068// Only known if known in both the LHS and RHS.2069Known = Known.intersectWith(Known2);2070break;2071}2072}20732074// Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so2075// set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.2076static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,2077ISD::CondCode CC, unsigned &SPCC) {2078if (isNullConstant(RHS) && CC == ISD::SETNE &&2079(((LHS.getOpcode() == SPISD::SELECT_ICC ||2080LHS.getOpcode() == SPISD::SELECT_XCC) &&2081LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||2082(LHS.getOpcode() == SPISD::SELECT_FCC &&2083(LHS.getOperand(3).getOpcode() == SPISD::CMPFCC ||2084LHS.getOperand(3).getOpcode() == SPISD::CMPFCC_V9))) &&2085isOneConstant(LHS.getOperand(0)) && isNullConstant(LHS.getOperand(1))) {2086SDValue CMPCC = LHS.getOperand(3);2087SPCC = LHS.getConstantOperandVal(2);2088LHS = CMPCC.getOperand(0);2089RHS = CMPCC.getOperand(1);2090}2091}20922093// Convert to a target node and set target flags.2094SDValue SparcTargetLowering::withTargetFlags(SDValue Op, unsigned TF,2095SelectionDAG &DAG) const {2096if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))2097return DAG.getTargetGlobalAddress(GA->getGlobal(),2098SDLoc(GA),2099GA->getValueType(0),2100GA->getOffset(), TF);21012102if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))2103return DAG.getTargetConstantPool(CP->getConstVal(), CP->getValueType(0),2104CP->getAlign(), CP->getOffset(), TF);21052106if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))2107return DAG.getTargetBlockAddress(BA->getBlockAddress(),2108Op.getValueType(),21090,2110TF);21112112if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))2113return DAG.getTargetExternalSymbol(ES->getSymbol(),2114ES->getValueType(0), TF);21152116llvm_unreachable("Unhandled address SDNode");2117}21182119// Split Op into high and low parts according to HiTF and LoTF.2120// Return an ADD node combining the parts.2121SDValue SparcTargetLowering::makeHiLoPair(SDValue Op,2122unsigned HiTF, unsigned LoTF,2123SelectionDAG &DAG) const {2124SDLoc DL(Op);2125EVT VT = Op.getValueType();2126SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));2127SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));2128return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);2129}21302131// Build SDNodes for producing an address from a GlobalAddress, ConstantPool,2132// or ExternalSymbol SDNode.2133SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const {2134SDLoc DL(Op);2135EVT VT = getPointerTy(DAG.getDataLayout());21362137// Handle PIC mode first. SPARC needs a got load for every variable!2138if (isPositionIndependent()) {2139const Module *M = DAG.getMachineFunction().getFunction().getParent();2140PICLevel::Level picLevel = M->getPICLevel();2141SDValue Idx;21422143if (picLevel == PICLevel::SmallPIC) {2144// This is the pic13 code model, the GOT is known to be smaller than 8KiB.2145Idx = DAG.getNode(SPISD::Lo, DL, Op.getValueType(),2146withTargetFlags(Op, SparcMCExpr::VK_Sparc_GOT13, DAG));2147} else {2148// This is the pic32 code model, the GOT is known to be smaller than 4GB.2149Idx = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_GOT22,2150SparcMCExpr::VK_Sparc_GOT10, DAG);2151}21522153SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);2154SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Idx);2155// GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this2156// function has calls.2157MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();2158MFI.setHasCalls(true);2159return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,2160MachinePointerInfo::getGOT(DAG.getMachineFunction()));2161}21622163// This is one of the absolute code models.2164switch(getTargetMachine().getCodeModel()) {2165default:2166llvm_unreachable("Unsupported absolute code model");2167case CodeModel::Small:2168// abs32.2169return makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI,2170SparcMCExpr::VK_Sparc_LO, DAG);2171case CodeModel::Medium: {2172// abs44.2173SDValue H44 = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_H44,2174SparcMCExpr::VK_Sparc_M44, DAG);2175H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));2176SDValue L44 = withTargetFlags(Op, SparcMCExpr::VK_Sparc_L44, DAG);2177L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);2178return DAG.getNode(ISD::ADD, DL, VT, H44, L44);2179}2180case CodeModel::Large: {2181// abs64.2182SDValue Hi = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HH,2183SparcMCExpr::VK_Sparc_HM, DAG);2184Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));2185SDValue Lo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI,2186SparcMCExpr::VK_Sparc_LO, DAG);2187return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);2188}2189}2190}21912192SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op,2193SelectionDAG &DAG) const {2194return makeAddress(Op, DAG);2195}21962197SDValue SparcTargetLowering::LowerConstantPool(SDValue Op,2198SelectionDAG &DAG) const {2199return makeAddress(Op, DAG);2200}22012202SDValue SparcTargetLowering::LowerBlockAddress(SDValue Op,2203SelectionDAG &DAG) const {2204return makeAddress(Op, DAG);2205}22062207SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op,2208SelectionDAG &DAG) const {22092210GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);2211if (DAG.getTarget().useEmulatedTLS())2212return LowerToTLSEmulatedModel(GA, DAG);22132214SDLoc DL(GA);2215const GlobalValue *GV = GA->getGlobal();2216EVT PtrVT = getPointerTy(DAG.getDataLayout());22172218TLSModel::Model model = getTargetMachine().getTLSModel(GV);22192220if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {2221unsigned HiTF = ((model == TLSModel::GeneralDynamic)2222? SparcMCExpr::VK_Sparc_TLS_GD_HI222223: SparcMCExpr::VK_Sparc_TLS_LDM_HI22);2224unsigned LoTF = ((model == TLSModel::GeneralDynamic)2225? SparcMCExpr::VK_Sparc_TLS_GD_LO102226: SparcMCExpr::VK_Sparc_TLS_LDM_LO10);2227unsigned addTF = ((model == TLSModel::GeneralDynamic)2228? SparcMCExpr::VK_Sparc_TLS_GD_ADD2229: SparcMCExpr::VK_Sparc_TLS_LDM_ADD);2230unsigned callTF = ((model == TLSModel::GeneralDynamic)2231? SparcMCExpr::VK_Sparc_TLS_GD_CALL2232: SparcMCExpr::VK_Sparc_TLS_LDM_CALL);22332234SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);2235SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);2236SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,2237withTargetFlags(Op, addTF, DAG));22382239SDValue Chain = DAG.getEntryNode();2240SDValue InGlue;22412242Chain = DAG.getCALLSEQ_START(Chain, 1, 0, DL);2243Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InGlue);2244InGlue = Chain.getValue(1);2245SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);2246SDValue Symbol = withTargetFlags(Op, callTF, DAG);22472248SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);2249const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(2250DAG.getMachineFunction(), CallingConv::C);2251assert(Mask && "Missing call preserved mask for calling convention");2252SDValue Ops[] = {Chain,2253Callee,2254Symbol,2255DAG.getRegister(SP::O0, PtrVT),2256DAG.getRegisterMask(Mask),2257InGlue};2258Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);2259InGlue = Chain.getValue(1);2260Chain = DAG.getCALLSEQ_END(Chain, 1, 0, InGlue, DL);2261InGlue = Chain.getValue(1);2262SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InGlue);22632264if (model != TLSModel::LocalDynamic)2265return Ret;22662267SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,2268withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_HIX22, DAG));2269SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,2270withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_LOX10, DAG));2271HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);2272return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,2273withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_ADD, DAG));2274}22752276if (model == TLSModel::InitialExec) {2277unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX2278: SparcMCExpr::VK_Sparc_TLS_IE_LD);22792280SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);22812282// GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this2283// function has calls.2284MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();2285MFI.setHasCalls(true);22862287SDValue TGA = makeHiLoPair(Op,2288SparcMCExpr::VK_Sparc_TLS_IE_HI22,2289SparcMCExpr::VK_Sparc_TLS_IE_LO10, DAG);2290SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);2291SDValue Offset = DAG.getNode(SPISD::TLS_LD,2292DL, PtrVT, Ptr,2293withTargetFlags(Op, ldTF, DAG));2294return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,2295DAG.getRegister(SP::G7, PtrVT), Offset,2296withTargetFlags(Op,2297SparcMCExpr::VK_Sparc_TLS_IE_ADD, DAG));2298}22992300assert(model == TLSModel::LocalExec);2301SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,2302withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_HIX22, DAG));2303SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,2304withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_LOX10, DAG));2305SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);23062307return DAG.getNode(ISD::ADD, DL, PtrVT,2308DAG.getRegister(SP::G7, PtrVT), Offset);2309}23102311SDValue SparcTargetLowering::LowerF128_LibCallArg(SDValue Chain,2312ArgListTy &Args, SDValue Arg,2313const SDLoc &DL,2314SelectionDAG &DAG) const {2315MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();2316EVT ArgVT = Arg.getValueType();2317Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());23182319ArgListEntry Entry;2320Entry.Node = Arg;2321Entry.Ty = ArgTy;23222323if (ArgTy->isFP128Ty()) {2324// Create a stack object and pass the pointer to the library function.2325int FI = MFI.CreateStackObject(16, Align(8), false);2326SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));2327Chain = DAG.getStore(Chain, DL, Entry.Node, FIPtr, MachinePointerInfo(),2328Align(8));23292330Entry.Node = FIPtr;2331Entry.Ty = PointerType::getUnqual(ArgTy);2332}2333Args.push_back(Entry);2334return Chain;2335}23362337SDValue2338SparcTargetLowering::LowerF128Op(SDValue Op, SelectionDAG &DAG,2339const char *LibFuncName,2340unsigned numArgs) const {23412342ArgListTy Args;23432344MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();2345auto PtrVT = getPointerTy(DAG.getDataLayout());23462347SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);2348Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());2349Type *RetTyABI = RetTy;2350SDValue Chain = DAG.getEntryNode();2351SDValue RetPtr;23522353if (RetTy->isFP128Ty()) {2354// Create a Stack Object to receive the return value of type f128.2355ArgListEntry Entry;2356int RetFI = MFI.CreateStackObject(16, Align(8), false);2357RetPtr = DAG.getFrameIndex(RetFI, PtrVT);2358Entry.Node = RetPtr;2359Entry.Ty = PointerType::getUnqual(RetTy);2360if (!Subtarget->is64Bit()) {2361Entry.IsSRet = true;2362Entry.IndirectType = RetTy;2363}2364Entry.IsReturned = false;2365Args.push_back(Entry);2366RetTyABI = Type::getVoidTy(*DAG.getContext());2367}23682369assert(Op->getNumOperands() >= numArgs && "Not enough operands!");2370for (unsigned i = 0, e = numArgs; i != e; ++i) {2371Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);2372}2373TargetLowering::CallLoweringInfo CLI(DAG);2374CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)2375.setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));23762377std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);23782379// chain is in second result.2380if (RetTyABI == RetTy)2381return CallInfo.first;23822383assert (RetTy->isFP128Ty() && "Unexpected return type!");23842385Chain = CallInfo.second;23862387// Load RetPtr to get the return value.2388return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,2389MachinePointerInfo(), Align(8));2390}23912392SDValue SparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS,2393unsigned &SPCC, const SDLoc &DL,2394SelectionDAG &DAG) const {23952396const char *LibCall = nullptr;2397bool is64Bit = Subtarget->is64Bit();2398switch(SPCC) {2399default: llvm_unreachable("Unhandled conditional code!");2400case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;2401case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;2402case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;2403case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;2404case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;2405case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;2406case SPCC::FCC_UL :2407case SPCC::FCC_ULE:2408case SPCC::FCC_UG :2409case SPCC::FCC_UGE:2410case SPCC::FCC_U :2411case SPCC::FCC_O :2412case SPCC::FCC_LG :2413case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;2414}24152416auto PtrVT = getPointerTy(DAG.getDataLayout());2417SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);2418Type *RetTy = Type::getInt32Ty(*DAG.getContext());2419ArgListTy Args;2420SDValue Chain = DAG.getEntryNode();2421Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);2422Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);24232424TargetLowering::CallLoweringInfo CLI(DAG);2425CLI.setDebugLoc(DL).setChain(Chain)2426.setCallee(CallingConv::C, RetTy, Callee, std::move(Args));24272428std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);24292430// result is in first, and chain is in second result.2431SDValue Result = CallInfo.first;24322433switch(SPCC) {2434default: {2435SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());2436SPCC = SPCC::ICC_NE;2437return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);2438}2439case SPCC::FCC_UL : {2440SDValue Mask = DAG.getConstant(1, DL, Result.getValueType());2441Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);2442SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());2443SPCC = SPCC::ICC_NE;2444return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);2445}2446case SPCC::FCC_ULE: {2447SDValue RHS = DAG.getConstant(2, DL, Result.getValueType());2448SPCC = SPCC::ICC_NE;2449return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);2450}2451case SPCC::FCC_UG : {2452SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());2453SPCC = SPCC::ICC_G;2454return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);2455}2456case SPCC::FCC_UGE: {2457SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());2458SPCC = SPCC::ICC_NE;2459return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);2460}24612462case SPCC::FCC_U : {2463SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());2464SPCC = SPCC::ICC_E;2465return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);2466}2467case SPCC::FCC_O : {2468SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());2469SPCC = SPCC::ICC_NE;2470return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);2471}2472case SPCC::FCC_LG : {2473SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());2474Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);2475SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());2476SPCC = SPCC::ICC_NE;2477return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);2478}2479case SPCC::FCC_UE : {2480SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());2481Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);2482SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());2483SPCC = SPCC::ICC_E;2484return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);2485}2486}2487}24882489static SDValue2490LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG,2491const SparcTargetLowering &TLI) {24922493if (Op.getOperand(0).getValueType() == MVT::f64)2494return TLI.LowerF128Op(Op, DAG,2495TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);24962497if (Op.getOperand(0).getValueType() == MVT::f32)2498return TLI.LowerF128Op(Op, DAG,2499TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);25002501llvm_unreachable("fpextend with non-float operand!");2502return SDValue();2503}25042505static SDValue2506LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG,2507const SparcTargetLowering &TLI) {2508// FP_ROUND on f64 and f32 are legal.2509if (Op.getOperand(0).getValueType() != MVT::f128)2510return Op;25112512if (Op.getValueType() == MVT::f64)2513return TLI.LowerF128Op(Op, DAG,2514TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);2515if (Op.getValueType() == MVT::f32)2516return TLI.LowerF128Op(Op, DAG,2517TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);25182519llvm_unreachable("fpround to non-float!");2520return SDValue();2521}25222523static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG,2524const SparcTargetLowering &TLI,2525bool hasHardQuad) {2526SDLoc dl(Op);2527EVT VT = Op.getValueType();2528assert(VT == MVT::i32 || VT == MVT::i64);25292530// Expand f128 operations to fp128 abi calls.2531if (Op.getOperand(0).getValueType() == MVT::f1282532&& (!hasHardQuad || !TLI.isTypeLegal(VT))) {2533const char *libName = TLI.getLibcallName(VT == MVT::i322534? RTLIB::FPTOSINT_F128_I322535: RTLIB::FPTOSINT_F128_I64);2536return TLI.LowerF128Op(Op, DAG, libName, 1);2537}25382539// Expand if the resulting type is illegal.2540if (!TLI.isTypeLegal(VT))2541return SDValue();25422543// Otherwise, Convert the fp value to integer in an FP register.2544if (VT == MVT::i32)2545Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));2546else2547Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));25482549return DAG.getNode(ISD::BITCAST, dl, VT, Op);2550}25512552static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG,2553const SparcTargetLowering &TLI,2554bool hasHardQuad) {2555SDLoc dl(Op);2556EVT OpVT = Op.getOperand(0).getValueType();2557assert(OpVT == MVT::i32 || (OpVT == MVT::i64));25582559EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;25602561// Expand f128 operations to fp128 ABI calls.2562if (Op.getValueType() == MVT::f1282563&& (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {2564const char *libName = TLI.getLibcallName(OpVT == MVT::i322565? RTLIB::SINTTOFP_I32_F1282566: RTLIB::SINTTOFP_I64_F128);2567return TLI.LowerF128Op(Op, DAG, libName, 1);2568}25692570// Expand if the operand type is illegal.2571if (!TLI.isTypeLegal(OpVT))2572return SDValue();25732574// Otherwise, Convert the int value to FP in an FP register.2575SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));2576unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;2577return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);2578}25792580static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG,2581const SparcTargetLowering &TLI,2582bool hasHardQuad) {2583SDLoc dl(Op);2584EVT VT = Op.getValueType();25852586// Expand if it does not involve f128 or the target has support for2587// quad floating point instructions and the resulting type is legal.2588if (Op.getOperand(0).getValueType() != MVT::f128 ||2589(hasHardQuad && TLI.isTypeLegal(VT)))2590return SDValue();25912592assert(VT == MVT::i32 || VT == MVT::i64);25932594return TLI.LowerF128Op(Op, DAG,2595TLI.getLibcallName(VT == MVT::i322596? RTLIB::FPTOUINT_F128_I322597: RTLIB::FPTOUINT_F128_I64),25981);2599}26002601static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG,2602const SparcTargetLowering &TLI,2603bool hasHardQuad) {2604SDLoc dl(Op);2605EVT OpVT = Op.getOperand(0).getValueType();2606assert(OpVT == MVT::i32 || OpVT == MVT::i64);26072608// Expand if it does not involve f128 or the target has support for2609// quad floating point instructions and the operand type is legal.2610if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))2611return SDValue();26122613return TLI.LowerF128Op(Op, DAG,2614TLI.getLibcallName(OpVT == MVT::i322615? RTLIB::UINTTOFP_I32_F1282616: RTLIB::UINTTOFP_I64_F128),26171);2618}26192620static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG,2621const SparcTargetLowering &TLI, bool hasHardQuad,2622bool isV9, bool is64Bit) {2623SDValue Chain = Op.getOperand(0);2624ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();2625SDValue LHS = Op.getOperand(2);2626SDValue RHS = Op.getOperand(3);2627SDValue Dest = Op.getOperand(4);2628SDLoc dl(Op);2629unsigned Opc, SPCC = ~0U;26302631// If this is a br_cc of a "setcc", and if the setcc got lowered into2632// an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.2633LookThroughSetCC(LHS, RHS, CC, SPCC);2634assert(LHS.getValueType() == RHS.getValueType());26352636// Get the condition flag.2637SDValue CompareFlag;2638if (LHS.getValueType().isInteger()) {2639// On V9 processors running in 64-bit mode, if CC compares two `i64`s2640// and the RHS is zero we might be able to use a specialized branch.2641if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&2642isNullConstant(RHS) && !ISD::isUnsignedIntSetCC(CC))2643return DAG.getNode(SPISD::BR_REG, dl, MVT::Other, Chain, Dest,2644DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32),2645LHS);26462647CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);2648if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);2649if (isV9)2650// 32-bit compares use the icc flags, 64-bit uses the xcc flags.2651Opc = LHS.getValueType() == MVT::i32 ? SPISD::BPICC : SPISD::BPXCC;2652else2653// Non-v9 targets don't have xcc.2654Opc = SPISD::BRICC;2655} else {2656if (!hasHardQuad && LHS.getValueType() == MVT::f128) {2657if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);2658CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);2659Opc = isV9 ? SPISD::BPICC : SPISD::BRICC;2660} else {2661unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;2662CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);2663if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);2664Opc = isV9 ? SPISD::BRFCC_V9 : SPISD::BRFCC;2665}2666}2667return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,2668DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);2669}26702671static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,2672const SparcTargetLowering &TLI, bool hasHardQuad,2673bool isV9, bool is64Bit) {2674SDValue LHS = Op.getOperand(0);2675SDValue RHS = Op.getOperand(1);2676ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();2677SDValue TrueVal = Op.getOperand(2);2678SDValue FalseVal = Op.getOperand(3);2679SDLoc dl(Op);2680unsigned Opc, SPCC = ~0U;26812682// If this is a select_cc of a "setcc", and if the setcc got lowered into2683// an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.2684LookThroughSetCC(LHS, RHS, CC, SPCC);2685assert(LHS.getValueType() == RHS.getValueType());26862687SDValue CompareFlag;2688if (LHS.getValueType().isInteger()) {2689// On V9 processors running in 64-bit mode, if CC compares two `i64`s2690// and the RHS is zero we might be able to use a specialized select.2691// All SELECT_CC between any two scalar integer types are eligible for2692// lowering to specialized instructions. Additionally, f32 and f64 types2693// are also eligible, but for f128 we can only use the specialized2694// instruction when we have hardquad.2695EVT ValType = TrueVal.getValueType();2696bool IsEligibleType = ValType.isScalarInteger() || ValType == MVT::f32 ||2697ValType == MVT::f64 ||2698(ValType == MVT::f128 && hasHardQuad);2699if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&2700isNullConstant(RHS) && !ISD::isUnsignedIntSetCC(CC) && IsEligibleType)2701return DAG.getNode(2702SPISD::SELECT_REG, dl, TrueVal.getValueType(), TrueVal, FalseVal,2703DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32), LHS);27042705CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);2706Opc = LHS.getValueType() == MVT::i32 ?2707SPISD::SELECT_ICC : SPISD::SELECT_XCC;2708if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);2709} else {2710if (!hasHardQuad && LHS.getValueType() == MVT::f128) {2711if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);2712CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);2713Opc = SPISD::SELECT_ICC;2714} else {2715unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;2716CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);2717Opc = SPISD::SELECT_FCC;2718if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);2719}2720}2721return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,2722DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);2723}27242725static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,2726const SparcTargetLowering &TLI) {2727MachineFunction &MF = DAG.getMachineFunction();2728SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();2729auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());27302731// Need frame address to find the address of VarArgsFrameIndex.2732MF.getFrameInfo().setFrameAddressIsTaken(true);27332734// vastart just stores the address of the VarArgsFrameIndex slot into the2735// memory location argument.2736SDLoc DL(Op);2737SDValue Offset =2738DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),2739DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));2740const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();2741return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),2742MachinePointerInfo(SV));2743}27442745static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {2746SDNode *Node = Op.getNode();2747EVT VT = Node->getValueType(0);2748SDValue InChain = Node->getOperand(0);2749SDValue VAListPtr = Node->getOperand(1);2750EVT PtrVT = VAListPtr.getValueType();2751const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();2752SDLoc DL(Node);2753SDValue VAList =2754DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));2755// Increment the pointer, VAList, to the next vaarg.2756SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,2757DAG.getIntPtrConstant(VT.getSizeInBits()/8,2758DL));2759// Store the incremented VAList to the legalized pointer.2760InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,2761MachinePointerInfo(SV));2762// Load the actual argument out of the pointer VAList.2763// We can't count on greater alignment than the word size.2764return DAG.getLoad(2765VT, DL, InChain, VAList, MachinePointerInfo(),2766Align(std::min(PtrVT.getFixedSizeInBits(), VT.getFixedSizeInBits()) / 8));2767}27682769static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG,2770const SparcSubtarget *Subtarget) {2771SDValue Chain = Op.getOperand(0); // Legalize the chain.2772SDValue Size = Op.getOperand(1); // Legalize the size.2773MaybeAlign Alignment =2774cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue();2775Align StackAlign = Subtarget->getFrameLowering()->getStackAlign();2776EVT VT = Size->getValueType(0);2777SDLoc dl(Op);27782779// TODO: implement over-aligned alloca. (Note: also implies2780// supporting support for overaligned function frames + dynamic2781// allocations, at all, which currently isn't supported)2782if (Alignment && *Alignment > StackAlign) {2783const MachineFunction &MF = DAG.getMachineFunction();2784report_fatal_error("Function \"" + Twine(MF.getName()) + "\": "2785"over-aligned dynamic alloca not supported.");2786}27872788// The resultant pointer needs to be above the register spill area2789// at the bottom of the stack.2790unsigned regSpillArea;2791if (Subtarget->is64Bit()) {2792regSpillArea = 128;2793} else {2794// On Sparc32, the size of the spill area is 92. Unfortunately,2795// that's only 4-byte aligned, not 8-byte aligned (the stack2796// pointer is 8-byte aligned). So, if the user asked for an 8-byte2797// aligned dynamic allocation, we actually need to add 96 to the2798// bottom of the stack, instead of 92, to ensure 8-byte alignment.27992800// That also means adding 4 to the size of the allocation --2801// before applying the 8-byte rounding. Unfortunately, we the2802// value we get here has already had rounding applied. So, we need2803// to add 8, instead, wasting a bit more memory.28042805// Further, this only actually needs to be done if the required2806// alignment is > 4, but, we've lost that info by this point, too,2807// so we always apply it.28082809// (An alternative approach would be to always reserve 96 bytes2810// instead of the required 92, but then we'd waste 4 extra bytes2811// in every frame, not just those with dynamic stack allocations)28122813// TODO: modify code in SelectionDAGBuilder to make this less sad.28142815Size = DAG.getNode(ISD::ADD, dl, VT, Size,2816DAG.getConstant(8, dl, VT));2817regSpillArea = 96;2818}28192820unsigned SPReg = SP::O6;2821SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);2822SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value2823Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain28242825regSpillArea += Subtarget->getStackPointerBias();28262827SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP,2828DAG.getConstant(regSpillArea, dl, VT));2829SDValue Ops[2] = { NewVal, Chain };2830return DAG.getMergeValues(Ops, dl);2831}283228332834static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG) {2835SDLoc dl(Op);2836SDValue Chain = DAG.getNode(SPISD::FLUSHW,2837dl, MVT::Other, DAG.getEntryNode());2838return Chain;2839}28402841static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG,2842const SparcSubtarget *Subtarget,2843bool AlwaysFlush = false) {2844MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();2845MFI.setFrameAddressIsTaken(true);28462847EVT VT = Op.getValueType();2848SDLoc dl(Op);2849unsigned FrameReg = SP::I6;2850unsigned stackBias = Subtarget->getStackPointerBias();28512852SDValue FrameAddr;2853SDValue Chain;28542855// flush first to make sure the windowed registers' values are in stack2856Chain = (depth || AlwaysFlush) ? getFLUSHW(Op, DAG) : DAG.getEntryNode();28572858FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);28592860unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;28612862while (depth--) {2863SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,2864DAG.getIntPtrConstant(Offset, dl));2865FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());2866}2867if (Subtarget->is64Bit())2868FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,2869DAG.getIntPtrConstant(stackBias, dl));2870return FrameAddr;2871}287228732874static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG,2875const SparcSubtarget *Subtarget) {28762877uint64_t depth = Op.getConstantOperandVal(0);28782879return getFRAMEADDR(depth, Op, DAG, Subtarget);28802881}28822883static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG,2884const SparcTargetLowering &TLI,2885const SparcSubtarget *Subtarget) {2886MachineFunction &MF = DAG.getMachineFunction();2887MachineFrameInfo &MFI = MF.getFrameInfo();2888MFI.setReturnAddressIsTaken(true);28892890if (TLI.verifyReturnAddressArgumentIsConstant(Op, DAG))2891return SDValue();28922893EVT VT = Op.getValueType();2894SDLoc dl(Op);2895uint64_t depth = Op.getConstantOperandVal(0);28962897SDValue RetAddr;2898if (depth == 0) {2899auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());2900Register RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));2901RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);2902return RetAddr;2903}29042905// Need frame address to find return address of the caller.2906SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget, true);29072908unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;2909SDValue Ptr = DAG.getNode(ISD::ADD,2910dl, VT,2911FrameAddr,2912DAG.getIntPtrConstant(Offset, dl));2913RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());29142915return RetAddr;2916}29172918static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,2919unsigned opcode) {2920assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");2921assert(opcode == ISD::FNEG || opcode == ISD::FABS);29222923// Lower fneg/fabs on f64 to fneg/fabs on f32.2924// fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.2925// fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.29262927// Note: in little-endian, the floating-point value is stored in the2928// registers are in the opposite order, so the subreg with the sign2929// bit is the highest-numbered (odd), rather than the2930// lowest-numbered (even).29312932SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,2933SrcReg64);2934SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,2935SrcReg64);29362937if (DAG.getDataLayout().isLittleEndian())2938Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);2939else2940Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);29412942SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,2943dl, MVT::f64), 0);2944DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,2945DstReg64, Hi32);2946DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,2947DstReg64, Lo32);2948return DstReg64;2949}29502951// Lower a f128 load into two f64 loads.2952static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)2953{2954SDLoc dl(Op);2955LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());2956assert(LdNode->getOffset().isUndef() && "Unexpected node type");29572958Align Alignment = commonAlignment(LdNode->getOriginalAlign(), 8);29592960SDValue Hi64 =2961DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),2962LdNode->getPointerInfo(), Alignment);2963EVT addrVT = LdNode->getBasePtr().getValueType();2964SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,2965LdNode->getBasePtr(),2966DAG.getConstant(8, dl, addrVT));2967SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,2968LdNode->getPointerInfo().getWithOffset(8),2969Alignment);29702971SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);2972SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);29732974SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,2975dl, MVT::f128);2976InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,2977MVT::f128,2978SDValue(InFP128, 0),2979Hi64,2980SubRegEven);2981InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,2982MVT::f128,2983SDValue(InFP128, 0),2984Lo64,2985SubRegOdd);2986SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),2987SDValue(Lo64.getNode(), 1) };2988SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);2989SDValue Ops[2] = {SDValue(InFP128,0), OutChain};2990return DAG.getMergeValues(Ops, dl);2991}29922993static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)2994{2995LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());29962997EVT MemVT = LdNode->getMemoryVT();2998if (MemVT == MVT::f128)2999return LowerF128Load(Op, DAG);30003001return Op;3002}30033004// Lower a f128 store into two f64 stores.3005static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) {3006SDLoc dl(Op);3007StoreSDNode *StNode = cast<StoreSDNode>(Op.getNode());3008assert(StNode->getOffset().isUndef() && "Unexpected node type");30093010SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);3011SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);30123013SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,3014dl,3015MVT::f64,3016StNode->getValue(),3017SubRegEven);3018SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,3019dl,3020MVT::f64,3021StNode->getValue(),3022SubRegOdd);30233024Align Alignment = commonAlignment(StNode->getOriginalAlign(), 8);30253026SDValue OutChains[2];3027OutChains[0] =3028DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),3029StNode->getBasePtr(), StNode->getPointerInfo(),3030Alignment);3031EVT addrVT = StNode->getBasePtr().getValueType();3032SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,3033StNode->getBasePtr(),3034DAG.getConstant(8, dl, addrVT));3035OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,3036StNode->getPointerInfo().getWithOffset(8),3037Alignment);3038return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);3039}30403041static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)3042{3043SDLoc dl(Op);3044StoreSDNode *St = cast<StoreSDNode>(Op.getNode());30453046EVT MemVT = St->getMemoryVT();3047if (MemVT == MVT::f128)3048return LowerF128Store(Op, DAG);30493050if (MemVT == MVT::i64) {3051// Custom handling for i64 stores: turn it into a bitcast and a3052// v2i32 store.3053SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());3054SDValue Chain = DAG.getStore(3055St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),3056St->getOriginalAlign(), St->getMemOperand()->getFlags(),3057St->getAAInfo());3058return Chain;3059}30603061return SDValue();3062}30633064static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {3065assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)3066&& "invalid opcode");30673068SDLoc dl(Op);30693070if (Op.getValueType() == MVT::f64)3071return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());3072if (Op.getValueType() != MVT::f128)3073return Op;30743075// Lower fabs/fneg on f128 to fabs/fneg on f643076// fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd643077// (As with LowerF64Op, on little-endian, we need to negate the odd3078// subreg)30793080SDValue SrcReg128 = Op.getOperand(0);3081SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,3082SrcReg128);3083SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,3084SrcReg128);30853086if (DAG.getDataLayout().isLittleEndian()) {3087if (isV9)3088Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);3089else3090Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());3091} else {3092if (isV9)3093Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);3094else3095Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());3096}30973098SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,3099dl, MVT::f128), 0);3100DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,3101DstReg128, Hi64);3102DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,3103DstReg128, Lo64);3104return DstReg128;3105}31063107static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {31083109if (Op.getValueType() != MVT::i64)3110return Op;31113112SDLoc dl(Op);3113SDValue Src1 = Op.getOperand(0);3114SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1);3115SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1,3116DAG.getConstant(32, dl, MVT::i64));3117Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi);31183119SDValue Src2 = Op.getOperand(1);3120SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2);3121SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2,3122DAG.getConstant(32, dl, MVT::i64));3123Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi);312431253126bool hasChain = false;3127unsigned hiOpc = Op.getOpcode();3128switch (Op.getOpcode()) {3129default: llvm_unreachable("Invalid opcode");3130case ISD::ADDC: hiOpc = ISD::ADDE; break;3131case ISD::ADDE: hasChain = true; break;3132case ISD::SUBC: hiOpc = ISD::SUBE; break;3133case ISD::SUBE: hasChain = true; break;3134}3135SDValue Lo;3136SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue);3137if (hasChain) {3138Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo,3139Op.getOperand(2));3140} else {3141Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo);3142}3143SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1));3144SDValue Carry = Hi.getValue(1);31453146Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo);3147Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi);3148Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi,3149DAG.getConstant(32, dl, MVT::i64));31503151SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);3152SDValue Ops[2] = { Dst, Carry };3153return DAG.getMergeValues(Ops, dl);3154}31553156// Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()3157// in LegalizeDAG.cpp except the order of arguments to the library function.3158static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG,3159const SparcTargetLowering &TLI)3160{3161unsigned opcode = Op.getOpcode();3162assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode.");31633164bool isSigned = (opcode == ISD::SMULO);3165EVT VT = MVT::i64;3166EVT WideVT = MVT::i128;3167SDLoc dl(Op);3168SDValue LHS = Op.getOperand(0);31693170if (LHS.getValueType() != VT)3171return Op;31723173SDValue ShiftAmt = DAG.getConstant(63, dl, VT);31743175SDValue RHS = Op.getOperand(1);3176SDValue HiLHS, HiRHS;3177if (isSigned) {3178HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt);3179HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt);3180} else {3181HiLHS = DAG.getConstant(0, dl, VT);3182HiRHS = DAG.getConstant(0, dl, MVT::i64);3183}31843185SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };31863187TargetLowering::MakeLibCallOptions CallOptions;3188CallOptions.setSExt(isSigned);3189SDValue MulResult = TLI.makeLibCall(DAG,3190RTLIB::MUL_I128, WideVT,3191Args, CallOptions, dl).first;3192SDValue BottomHalf, TopHalf;3193std::tie(BottomHalf, TopHalf) = DAG.SplitScalar(MulResult, dl, VT, VT);3194if (isSigned) {3195SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);3196TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE);3197} else {3198TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT),3199ISD::SETNE);3200}3201// MulResult is a node with an illegal type. Because such things are not3202// generally permitted during this phase of legalization, ensure that3203// nothing is left using the node. The above EXTRACT_ELEMENT nodes should have3204// been folded.3205assert(MulResult->use_empty() && "Illegally typed node still in use!");32063207SDValue Ops[2] = { BottomHalf, TopHalf } ;3208return DAG.getMergeValues(Ops, dl);3209}32103211static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) {3212if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getSuccessOrdering())) {3213// Expand with a fence.3214return SDValue();3215}32163217// Monotonic load/stores are legal.3218return Op;3219}32203221SDValue SparcTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,3222SelectionDAG &DAG) const {3223unsigned IntNo = Op.getConstantOperandVal(0);3224SDLoc dl(Op);3225switch (IntNo) {3226default: return SDValue(); // Don't custom lower most intrinsics.3227case Intrinsic::thread_pointer: {3228EVT PtrVT = getPointerTy(DAG.getDataLayout());3229return DAG.getRegister(SP::G7, PtrVT);3230}3231}3232}32333234SDValue SparcTargetLowering::3235LowerOperation(SDValue Op, SelectionDAG &DAG) const {32363237bool hasHardQuad = Subtarget->hasHardQuad();3238bool isV9 = Subtarget->isV9();3239bool is64Bit = Subtarget->is64Bit();32403241switch (Op.getOpcode()) {3242default: llvm_unreachable("Should not custom lower this!");32433244case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,3245Subtarget);3246case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,3247Subtarget);3248case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);3249case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);3250case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);3251case ISD::ConstantPool: return LowerConstantPool(Op, DAG);3252case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,3253hasHardQuad);3254case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,3255hasHardQuad);3256case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,3257hasHardQuad);3258case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,3259hasHardQuad);3260case ISD::BR_CC:3261return LowerBR_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);3262case ISD::SELECT_CC:3263return LowerSELECT_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);3264case ISD::VASTART: return LowerVASTART(Op, DAG, *this);3265case ISD::VAARG: return LowerVAARG(Op, DAG);3266case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,3267Subtarget);32683269case ISD::LOAD: return LowerLOAD(Op, DAG);3270case ISD::STORE: return LowerSTORE(Op, DAG);3271case ISD::FADD: return LowerF128Op(Op, DAG,3272getLibcallName(RTLIB::ADD_F128), 2);3273case ISD::FSUB: return LowerF128Op(Op, DAG,3274getLibcallName(RTLIB::SUB_F128), 2);3275case ISD::FMUL: return LowerF128Op(Op, DAG,3276getLibcallName(RTLIB::MUL_F128), 2);3277case ISD::FDIV: return LowerF128Op(Op, DAG,3278getLibcallName(RTLIB::DIV_F128), 2);3279case ISD::FSQRT: return LowerF128Op(Op, DAG,3280getLibcallName(RTLIB::SQRT_F128),1);3281case ISD::FABS:3282case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);3283case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);3284case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);3285case ISD::ADDC:3286case ISD::ADDE:3287case ISD::SUBC:3288case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);3289case ISD::UMULO:3290case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this);3291case ISD::ATOMIC_LOAD:3292case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);3293case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);3294}3295}32963297SDValue SparcTargetLowering::bitcastConstantFPToInt(ConstantFPSDNode *C,3298const SDLoc &DL,3299SelectionDAG &DAG) const {3300APInt V = C->getValueAPF().bitcastToAPInt();3301SDValue Lo = DAG.getConstant(V.zextOrTrunc(32), DL, MVT::i32);3302SDValue Hi = DAG.getConstant(V.lshr(32).zextOrTrunc(32), DL, MVT::i32);3303if (DAG.getDataLayout().isLittleEndian())3304std::swap(Lo, Hi);3305return DAG.getBuildVector(MVT::v2i32, DL, {Hi, Lo});3306}33073308SDValue SparcTargetLowering::PerformBITCASTCombine(SDNode *N,3309DAGCombinerInfo &DCI) const {3310SDLoc dl(N);3311SDValue Src = N->getOperand(0);33123313if (isa<ConstantFPSDNode>(Src) && N->getSimpleValueType(0) == MVT::v2i32 &&3314Src.getSimpleValueType() == MVT::f64)3315return bitcastConstantFPToInt(cast<ConstantFPSDNode>(Src), dl, DCI.DAG);33163317return SDValue();3318}33193320SDValue SparcTargetLowering::PerformDAGCombine(SDNode *N,3321DAGCombinerInfo &DCI) const {3322switch (N->getOpcode()) {3323default:3324break;3325case ISD::BITCAST:3326return PerformBITCASTCombine(N, DCI);3327}3328return SDValue();3329}33303331MachineBasicBlock *3332SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,3333MachineBasicBlock *BB) const {3334switch (MI.getOpcode()) {3335default: llvm_unreachable("Unknown SELECT_CC!");3336case SP::SELECT_CC_Int_ICC:3337case SP::SELECT_CC_FP_ICC:3338case SP::SELECT_CC_DFP_ICC:3339case SP::SELECT_CC_QFP_ICC:3340if (Subtarget->isV9())3341return expandSelectCC(MI, BB, SP::BPICC);3342return expandSelectCC(MI, BB, SP::BCOND);3343case SP::SELECT_CC_Int_XCC:3344case SP::SELECT_CC_FP_XCC:3345case SP::SELECT_CC_DFP_XCC:3346case SP::SELECT_CC_QFP_XCC:3347return expandSelectCC(MI, BB, SP::BPXCC);3348case SP::SELECT_CC_Int_FCC:3349case SP::SELECT_CC_FP_FCC:3350case SP::SELECT_CC_DFP_FCC:3351case SP::SELECT_CC_QFP_FCC:3352if (Subtarget->isV9())3353return expandSelectCC(MI, BB, SP::FBCOND_V9);3354return expandSelectCC(MI, BB, SP::FBCOND);3355}3356}33573358MachineBasicBlock *3359SparcTargetLowering::expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB,3360unsigned BROpcode) const {3361const TargetInstrInfo &TII = *Subtarget->getInstrInfo();3362DebugLoc dl = MI.getDebugLoc();3363unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();33643365// To "insert" a SELECT_CC instruction, we actually have to insert the3366// triangle control-flow pattern. The incoming instruction knows the3367// destination vreg to set, the condition code register to branch on, the3368// true/false values to select between, and the condition code for the branch.3369//3370// We produce the following control flow:3371// ThisMBB3372// | \3373// | IfFalseMBB3374// | /3375// SinkMBB3376const BasicBlock *LLVM_BB = BB->getBasicBlock();3377MachineFunction::iterator It = ++BB->getIterator();33783379MachineBasicBlock *ThisMBB = BB;3380MachineFunction *F = BB->getParent();3381MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);3382MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);3383F->insert(It, IfFalseMBB);3384F->insert(It, SinkMBB);33853386// Transfer the remainder of ThisMBB and its successor edges to SinkMBB.3387SinkMBB->splice(SinkMBB->begin(), ThisMBB,3388std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());3389SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);33903391// Set the new successors for ThisMBB.3392ThisMBB->addSuccessor(IfFalseMBB);3393ThisMBB->addSuccessor(SinkMBB);33943395BuildMI(ThisMBB, dl, TII.get(BROpcode))3396.addMBB(SinkMBB)3397.addImm(CC);33983399// IfFalseMBB just falls through to SinkMBB.3400IfFalseMBB->addSuccessor(SinkMBB);34013402// %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]3403BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),3404MI.getOperand(0).getReg())3405.addReg(MI.getOperand(1).getReg())3406.addMBB(ThisMBB)3407.addReg(MI.getOperand(2).getReg())3408.addMBB(IfFalseMBB);34093410MI.eraseFromParent(); // The pseudo instruction is gone now.3411return SinkMBB;3412}34133414//===----------------------------------------------------------------------===//3415// Sparc Inline Assembly Support3416//===----------------------------------------------------------------------===//34173418/// getConstraintType - Given a constraint letter, return the type of3419/// constraint it is for this target.3420SparcTargetLowering::ConstraintType3421SparcTargetLowering::getConstraintType(StringRef Constraint) const {3422if (Constraint.size() == 1) {3423switch (Constraint[0]) {3424default: break;3425case 'r':3426case 'f':3427case 'e':3428return C_RegisterClass;3429case 'I': // SIMM133430return C_Immediate;3431}3432}34333434return TargetLowering::getConstraintType(Constraint);3435}34363437TargetLowering::ConstraintWeight SparcTargetLowering::3438getSingleConstraintMatchWeight(AsmOperandInfo &info,3439const char *constraint) const {3440ConstraintWeight weight = CW_Invalid;3441Value *CallOperandVal = info.CallOperandVal;3442// If we don't have a value, we can't do a match,3443// but allow it at the lowest weight.3444if (!CallOperandVal)3445return CW_Default;34463447// Look at the constraint type.3448switch (*constraint) {3449default:3450weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);3451break;3452case 'I': // SIMM133453if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {3454if (isInt<13>(C->getSExtValue()))3455weight = CW_Constant;3456}3457break;3458}3459return weight;3460}34613462/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops3463/// vector. If it is invalid, don't add anything to Ops.3464void SparcTargetLowering::LowerAsmOperandForConstraint(3465SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,3466SelectionDAG &DAG) const {3467SDValue Result;34683469// Only support length 1 constraints for now.3470if (Constraint.size() > 1)3471return;34723473char ConstraintLetter = Constraint[0];3474switch (ConstraintLetter) {3475default: break;3476case 'I':3477if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {3478if (isInt<13>(C->getSExtValue())) {3479Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),3480Op.getValueType());3481break;3482}3483return;3484}3485}34863487if (Result.getNode()) {3488Ops.push_back(Result);3489return;3490}3491TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);3492}34933494std::pair<unsigned, const TargetRegisterClass *>3495SparcTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,3496StringRef Constraint,3497MVT VT) const {3498if (Constraint.empty())3499return std::make_pair(0U, nullptr);35003501if (Constraint.size() == 1) {3502switch (Constraint[0]) {3503case 'r':3504if (VT == MVT::v2i32)3505return std::make_pair(0U, &SP::IntPairRegClass);3506else if (Subtarget->is64Bit())3507return std::make_pair(0U, &SP::I64RegsRegClass);3508else3509return std::make_pair(0U, &SP::IntRegsRegClass);3510case 'f':3511if (VT == MVT::f32 || VT == MVT::i32)3512return std::make_pair(0U, &SP::FPRegsRegClass);3513else if (VT == MVT::f64 || VT == MVT::i64)3514return std::make_pair(0U, &SP::LowDFPRegsRegClass);3515else if (VT == MVT::f128)3516return std::make_pair(0U, &SP::LowQFPRegsRegClass);3517// This will generate an error message3518return std::make_pair(0U, nullptr);3519case 'e':3520if (VT == MVT::f32 || VT == MVT::i32)3521return std::make_pair(0U, &SP::FPRegsRegClass);3522else if (VT == MVT::f64 || VT == MVT::i64 )3523return std::make_pair(0U, &SP::DFPRegsRegClass);3524else if (VT == MVT::f128)3525return std::make_pair(0U, &SP::QFPRegsRegClass);3526// This will generate an error message3527return std::make_pair(0U, nullptr);3528}3529}35303531if (Constraint.front() != '{')3532return std::make_pair(0U, nullptr);35333534assert(Constraint.back() == '}' && "Not a brace enclosed constraint?");3535StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);3536if (RegName.empty())3537return std::make_pair(0U, nullptr);35383539unsigned long long RegNo;3540// Handle numbered register aliases.3541if (RegName[0] == 'r' &&3542getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {3543// r0-r7 -> g0-g73544// r8-r15 -> o0-o73545// r16-r23 -> l0-l73546// r24-r31 -> i0-i73547if (RegNo > 31)3548return std::make_pair(0U, nullptr);3549const char RegTypes[] = {'g', 'o', 'l', 'i'};3550char RegType = RegTypes[RegNo / 8];3551char RegIndex = '0' + (RegNo % 8);3552char Tmp[] = {'{', RegType, RegIndex, '}', 0};3553return getRegForInlineAsmConstraint(TRI, Tmp, VT);3554}35553556// Rewrite the fN constraint according to the value type if needed.3557if (VT != MVT::f32 && VT != MVT::Other && RegName[0] == 'f' &&3558getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {3559if (VT == MVT::f64 && (RegNo % 2 == 0)) {3560return getRegForInlineAsmConstraint(3561TRI, StringRef("{d" + utostr(RegNo / 2) + "}"), VT);3562} else if (VT == MVT::f128 && (RegNo % 4 == 0)) {3563return getRegForInlineAsmConstraint(3564TRI, StringRef("{q" + utostr(RegNo / 4) + "}"), VT);3565} else {3566return std::make_pair(0U, nullptr);3567}3568}35693570auto ResultPair =3571TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);3572if (!ResultPair.second)3573return std::make_pair(0U, nullptr);35743575// Force the use of I64Regs over IntRegs for 64-bit values.3576if (Subtarget->is64Bit() && VT == MVT::i64) {3577assert(ResultPair.second == &SP::IntRegsRegClass &&3578"Unexpected register class");3579return std::make_pair(ResultPair.first, &SP::I64RegsRegClass);3580}35813582return ResultPair;3583}35843585bool3586SparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {3587// The Sparc target isn't yet aware of offsets.3588return false;3589}35903591void SparcTargetLowering::ReplaceNodeResults(SDNode *N,3592SmallVectorImpl<SDValue>& Results,3593SelectionDAG &DAG) const {35943595SDLoc dl(N);35963597RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;35983599switch (N->getOpcode()) {3600default:3601llvm_unreachable("Do not know how to custom type legalize this operation!");36023603case ISD::FP_TO_SINT:3604case ISD::FP_TO_UINT:3605// Custom lower only if it involves f128 or i64.3606if (N->getOperand(0).getValueType() != MVT::f1283607|| N->getValueType(0) != MVT::i64)3608return;3609libCall = ((N->getOpcode() == ISD::FP_TO_SINT)3610? RTLIB::FPTOSINT_F128_I643611: RTLIB::FPTOUINT_F128_I64);36123613Results.push_back(LowerF128Op(SDValue(N, 0),3614DAG,3615getLibcallName(libCall),36161));3617return;3618case ISD::READCYCLECOUNTER: {3619assert(Subtarget->hasLeonCycleCounter());3620SDValue Lo = DAG.getCopyFromReg(N->getOperand(0), dl, SP::ASR23, MVT::i32);3621SDValue Hi = DAG.getCopyFromReg(Lo, dl, SP::G0, MVT::i32);3622SDValue Ops[] = { Lo, Hi };3623SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops);3624Results.push_back(Pair);3625Results.push_back(N->getOperand(0));3626return;3627}3628case ISD::SINT_TO_FP:3629case ISD::UINT_TO_FP:3630// Custom lower only if it involves f128 or i64.3631if (N->getValueType(0) != MVT::f1283632|| N->getOperand(0).getValueType() != MVT::i64)3633return;36343635libCall = ((N->getOpcode() == ISD::SINT_TO_FP)3636? RTLIB::SINTTOFP_I64_F1283637: RTLIB::UINTTOFP_I64_F128);36383639Results.push_back(LowerF128Op(SDValue(N, 0),3640DAG,3641getLibcallName(libCall),36421));3643return;3644case ISD::LOAD: {3645LoadSDNode *Ld = cast<LoadSDNode>(N);3646// Custom handling only for i64: turn i64 load into a v2i32 load,3647// and a bitcast.3648if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)3649return;36503651SDLoc dl(N);3652SDValue LoadRes = DAG.getExtLoad(3653Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),3654Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32,3655Ld->getOriginalAlign(), Ld->getMemOperand()->getFlags(),3656Ld->getAAInfo());36573658SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);3659Results.push_back(Res);3660Results.push_back(LoadRes.getValue(1));3661return;3662}3663}3664}36653666// Override to enable LOAD_STACK_GUARD lowering on Linux.3667bool SparcTargetLowering::useLoadStackGuardNode() const {3668if (!Subtarget->isTargetLinux())3669return TargetLowering::useLoadStackGuardNode();3670return true;3671}36723673// Override to disable global variable loading on Linux.3674void SparcTargetLowering::insertSSPDeclarations(Module &M) const {3675if (!Subtarget->isTargetLinux())3676return TargetLowering::insertSSPDeclarations(M);3677}36783679void SparcTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,3680SDNode *Node) const {3681assert(MI.getOpcode() == SP::SUBCCrr || MI.getOpcode() == SP::SUBCCri);3682// If the result is dead, replace it with %g0.3683if (!Node->hasAnyUseOfValue(0))3684MI.getOperand(0).setReg(SP::G0);3685}368636873688