Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/llvm/lib/Target/ARC/ARCISelLowering.cpp
35269 views
1
//===- ARCISelLowering.cpp - ARC DAG Lowering Impl --------------*- C++ -*-===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file implements the ARCTargetLowering class.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "ARCISelLowering.h"
14
#include "ARC.h"
15
#include "ARCMachineFunctionInfo.h"
16
#include "ARCSubtarget.h"
17
#include "ARCTargetMachine.h"
18
#include "MCTargetDesc/ARCInfo.h"
19
#include "llvm/CodeGen/CallingConvLower.h"
20
#include "llvm/CodeGen/MachineFrameInfo.h"
21
#include "llvm/CodeGen/MachineFunction.h"
22
#include "llvm/CodeGen/MachineInstrBuilder.h"
23
#include "llvm/CodeGen/MachineJumpTableInfo.h"
24
#include "llvm/CodeGen/MachineRegisterInfo.h"
25
#include "llvm/CodeGen/ValueTypes.h"
26
#include "llvm/IR/CallingConv.h"
27
#include "llvm/IR/Intrinsics.h"
28
#include "llvm/Support/Debug.h"
29
#include <algorithm>
30
31
#define DEBUG_TYPE "arc-lower"
32
33
using namespace llvm;
34
35
static SDValue lowerCallResult(SDValue Chain, SDValue InGlue,
36
const SmallVectorImpl<CCValAssign> &RVLocs,
37
SDLoc dl, SelectionDAG &DAG,
38
SmallVectorImpl<SDValue> &InVals);
39
40
static ARCCC::CondCode ISDCCtoARCCC(ISD::CondCode isdCC) {
41
switch (isdCC) {
42
case ISD::SETUEQ:
43
return ARCCC::EQ;
44
case ISD::SETUGT:
45
return ARCCC::HI;
46
case ISD::SETUGE:
47
return ARCCC::HS;
48
case ISD::SETULT:
49
return ARCCC::LO;
50
case ISD::SETULE:
51
return ARCCC::LS;
52
case ISD::SETUNE:
53
return ARCCC::NE;
54
case ISD::SETEQ:
55
return ARCCC::EQ;
56
case ISD::SETGT:
57
return ARCCC::GT;
58
case ISD::SETGE:
59
return ARCCC::GE;
60
case ISD::SETLT:
61
return ARCCC::LT;
62
case ISD::SETLE:
63
return ARCCC::LE;
64
case ISD::SETNE:
65
return ARCCC::NE;
66
default:
67
llvm_unreachable("Unhandled ISDCC code.");
68
}
69
}
70
71
void ARCTargetLowering::ReplaceNodeResults(SDNode *N,
72
SmallVectorImpl<SDValue> &Results,
73
SelectionDAG &DAG) const {
74
LLVM_DEBUG(dbgs() << "[ARC-ISEL] ReplaceNodeResults ");
75
LLVM_DEBUG(N->dump(&DAG));
76
LLVM_DEBUG(dbgs() << "; use_count=" << N->use_size() << "\n");
77
78
switch (N->getOpcode()) {
79
case ISD::READCYCLECOUNTER:
80
if (N->getValueType(0) == MVT::i64) {
81
// We read the TIMER0 and zero-extend it to 64-bits as the intrinsic
82
// requires.
83
SDValue V =
84
DAG.getNode(ISD::READCYCLECOUNTER, SDLoc(N),
85
DAG.getVTList(MVT::i32, MVT::Other), N->getOperand(0));
86
SDValue Op = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), MVT::i64, V);
87
Results.push_back(Op);
88
Results.push_back(V.getValue(1));
89
}
90
break;
91
default:
92
break;
93
}
94
}
95
96
ARCTargetLowering::ARCTargetLowering(const TargetMachine &TM,
97
const ARCSubtarget &Subtarget)
98
: TargetLowering(TM), Subtarget(Subtarget) {
99
// Set up the register classes.
100
addRegisterClass(MVT::i32, &ARC::GPR32RegClass);
101
102
// Compute derived properties from the register classes
103
computeRegisterProperties(Subtarget.getRegisterInfo());
104
105
setStackPointerRegisterToSaveRestore(ARC::SP);
106
107
setSchedulingPreference(Sched::Source);
108
109
// Use i32 for setcc operations results (slt, sgt, ...).
110
setBooleanContents(ZeroOrOneBooleanContent);
111
setBooleanVectorContents(ZeroOrOneBooleanContent);
112
113
for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
114
setOperationAction(Opc, MVT::i32, Expand);
115
116
// Operations to get us off of the ground.
117
// Basic.
118
setOperationAction(ISD::ADD, MVT::i32, Legal);
119
setOperationAction(ISD::SUB, MVT::i32, Legal);
120
setOperationAction(ISD::AND, MVT::i32, Legal);
121
setOperationAction(ISD::SMAX, MVT::i32, Legal);
122
setOperationAction(ISD::SMIN, MVT::i32, Legal);
123
124
setOperationAction(ISD::ADDC, MVT::i32, Legal);
125
setOperationAction(ISD::ADDE, MVT::i32, Legal);
126
setOperationAction(ISD::SUBC, MVT::i32, Legal);
127
setOperationAction(ISD::SUBE, MVT::i32, Legal);
128
129
// Need barrel shifter.
130
setOperationAction(ISD::SHL, MVT::i32, Legal);
131
setOperationAction(ISD::SRA, MVT::i32, Legal);
132
setOperationAction(ISD::SRL, MVT::i32, Legal);
133
setOperationAction(ISD::ROTR, MVT::i32, Legal);
134
135
setOperationAction(ISD::Constant, MVT::i32, Legal);
136
setOperationAction(ISD::UNDEF, MVT::i32, Legal);
137
138
// Need multiplier
139
setOperationAction(ISD::MUL, MVT::i32, Legal);
140
setOperationAction(ISD::MULHS, MVT::i32, Legal);
141
setOperationAction(ISD::MULHU, MVT::i32, Legal);
142
setOperationAction(ISD::LOAD, MVT::i32, Legal);
143
setOperationAction(ISD::STORE, MVT::i32, Legal);
144
145
setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
146
setOperationAction(ISD::BR_CC, MVT::i32, Custom);
147
setOperationAction(ISD::BRCOND, MVT::Other, Expand);
148
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
149
setOperationAction(ISD::JumpTable, MVT::i32, Custom);
150
151
// Have pseudo instruction for frame addresses.
152
setOperationAction(ISD::FRAMEADDR, MVT::i32, Legal);
153
// Custom lower global addresses.
154
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
155
156
// Expand var-args ops.
157
setOperationAction(ISD::VASTART, MVT::Other, Custom);
158
setOperationAction(ISD::VAEND, MVT::Other, Expand);
159
setOperationAction(ISD::VAARG, MVT::Other, Expand);
160
setOperationAction(ISD::VACOPY, MVT::Other, Expand);
161
162
// Other expansions
163
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
164
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
165
166
// Sign extend inreg
167
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Custom);
168
169
// TODO: Predicate these with `options.hasBitScan() ? Legal : Expand`
170
// when the HasBitScan predicate is available.
171
setOperationAction(ISD::CTLZ, MVT::i32, Legal);
172
setOperationAction(ISD::CTTZ, MVT::i32, Legal);
173
174
setOperationAction(ISD::READCYCLECOUNTER, MVT::i32, Legal);
175
setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
176
isTypeLegal(MVT::i64) ? Legal : Custom);
177
178
setMaxAtomicSizeInBitsSupported(0);
179
}
180
181
const char *ARCTargetLowering::getTargetNodeName(unsigned Opcode) const {
182
switch (Opcode) {
183
case ARCISD::BL:
184
return "ARCISD::BL";
185
case ARCISD::CMOV:
186
return "ARCISD::CMOV";
187
case ARCISD::CMP:
188
return "ARCISD::CMP";
189
case ARCISD::BRcc:
190
return "ARCISD::BRcc";
191
case ARCISD::RET:
192
return "ARCISD::RET";
193
case ARCISD::GAWRAPPER:
194
return "ARCISD::GAWRAPPER";
195
}
196
return nullptr;
197
}
198
199
//===----------------------------------------------------------------------===//
200
// Misc Lower Operation implementation
201
//===----------------------------------------------------------------------===//
202
203
SDValue ARCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
204
SDValue LHS = Op.getOperand(0);
205
SDValue RHS = Op.getOperand(1);
206
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
207
SDValue TVal = Op.getOperand(2);
208
SDValue FVal = Op.getOperand(3);
209
SDLoc dl(Op);
210
ARCCC::CondCode ArcCC = ISDCCtoARCCC(CC);
211
assert(LHS.getValueType() == MVT::i32 && "Only know how to SELECT_CC i32");
212
SDValue Cmp = DAG.getNode(ARCISD::CMP, dl, MVT::Glue, LHS, RHS);
213
return DAG.getNode(ARCISD::CMOV, dl, TVal.getValueType(), TVal, FVal,
214
DAG.getConstant(ArcCC, dl, MVT::i32), Cmp);
215
}
216
217
SDValue ARCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
218
SelectionDAG &DAG) const {
219
SDValue Op0 = Op.getOperand(0);
220
SDLoc dl(Op);
221
assert(Op.getValueType() == MVT::i32 &&
222
"Unhandled target sign_extend_inreg.");
223
// These are legal
224
unsigned Width = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
225
if (Width == 16 || Width == 8)
226
return Op;
227
if (Width >= 32) {
228
return {};
229
}
230
SDValue LS = DAG.getNode(ISD::SHL, dl, MVT::i32, Op0,
231
DAG.getConstant(32 - Width, dl, MVT::i32));
232
SDValue SR = DAG.getNode(ISD::SRA, dl, MVT::i32, LS,
233
DAG.getConstant(32 - Width, dl, MVT::i32));
234
return SR;
235
}
236
237
SDValue ARCTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
238
SDValue Chain = Op.getOperand(0);
239
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
240
SDValue LHS = Op.getOperand(2);
241
SDValue RHS = Op.getOperand(3);
242
SDValue Dest = Op.getOperand(4);
243
SDLoc dl(Op);
244
ARCCC::CondCode arcCC = ISDCCtoARCCC(CC);
245
assert(LHS.getValueType() == MVT::i32 && "Only know how to BR_CC i32");
246
return DAG.getNode(ARCISD::BRcc, dl, MVT::Other, Chain, Dest, LHS, RHS,
247
DAG.getConstant(arcCC, dl, MVT::i32));
248
}
249
250
SDValue ARCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
251
auto *N = cast<JumpTableSDNode>(Op);
252
SDValue GA = DAG.getTargetJumpTable(N->getIndex(), MVT::i32);
253
return DAG.getNode(ARCISD::GAWRAPPER, SDLoc(N), MVT::i32, GA);
254
}
255
256
#include "ARCGenCallingConv.inc"
257
258
//===----------------------------------------------------------------------===//
259
// Call Calling Convention Implementation
260
//===----------------------------------------------------------------------===//
261
262
/// ARC call implementation
263
SDValue ARCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
264
SmallVectorImpl<SDValue> &InVals) const {
265
SelectionDAG &DAG = CLI.DAG;
266
SDLoc &dl = CLI.DL;
267
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
268
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
269
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
270
SDValue Chain = CLI.Chain;
271
SDValue Callee = CLI.Callee;
272
CallingConv::ID CallConv = CLI.CallConv;
273
bool IsVarArg = CLI.IsVarArg;
274
bool &IsTailCall = CLI.IsTailCall;
275
276
IsTailCall = false; // Do not support tail calls yet.
277
278
SmallVector<CCValAssign, 16> ArgLocs;
279
CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
280
*DAG.getContext());
281
282
CCInfo.AnalyzeCallOperands(Outs, CC_ARC);
283
284
SmallVector<CCValAssign, 16> RVLocs;
285
// Analyze return values to determine the number of bytes of stack required.
286
CCState RetCCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
287
*DAG.getContext());
288
RetCCInfo.AllocateStack(CCInfo.getStackSize(), Align(4));
289
RetCCInfo.AnalyzeCallResult(Ins, RetCC_ARC);
290
291
// Get a count of how many bytes are to be pushed on the stack.
292
unsigned NumBytes = RetCCInfo.getStackSize();
293
294
Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
295
296
SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass;
297
SmallVector<SDValue, 12> MemOpChains;
298
299
SDValue StackPtr;
300
// Walk the register/memloc assignments, inserting copies/loads.
301
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
302
CCValAssign &VA = ArgLocs[i];
303
SDValue Arg = OutVals[i];
304
305
// Promote the value if needed.
306
switch (VA.getLocInfo()) {
307
default:
308
llvm_unreachable("Unknown loc info!");
309
case CCValAssign::Full:
310
break;
311
case CCValAssign::SExt:
312
Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
313
break;
314
case CCValAssign::ZExt:
315
Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
316
break;
317
case CCValAssign::AExt:
318
Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
319
break;
320
}
321
322
// Arguments that can be passed on register must be kept at
323
// RegsToPass vector
324
if (VA.isRegLoc()) {
325
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
326
} else {
327
assert(VA.isMemLoc() && "Must be register or memory argument.");
328
if (!StackPtr.getNode())
329
StackPtr = DAG.getCopyFromReg(Chain, dl, ARC::SP,
330
getPointerTy(DAG.getDataLayout()));
331
// Calculate the stack position.
332
SDValue SOffset = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
333
SDValue PtrOff = DAG.getNode(
334
ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), StackPtr, SOffset);
335
336
SDValue Store =
337
DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
338
MemOpChains.push_back(Store);
339
IsTailCall = false;
340
}
341
}
342
343
// Transform all store nodes into one single node because
344
// all store nodes are independent of each other.
345
if (!MemOpChains.empty())
346
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
347
348
// Build a sequence of copy-to-reg nodes chained together with token
349
// chain and flag operands which copy the outgoing args into registers.
350
// The Glue in necessary since all emitted instructions must be
351
// stuck together.
352
SDValue Glue;
353
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
354
Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
355
RegsToPass[i].second, Glue);
356
Glue = Chain.getValue(1);
357
}
358
359
// If the callee is a GlobalAddress node (quite common, every direct call is)
360
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
361
// Likewise ExternalSymbol -> TargetExternalSymbol.
362
bool IsDirect = true;
363
if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee))
364
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
365
else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee))
366
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
367
else
368
IsDirect = false;
369
// Branch + Link = #chain, #target_address, #opt_in_flags...
370
// = Chain, Callee, Reg#1, Reg#2, ...
371
//
372
// Returns a chain & a glue for retval copy to use.
373
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
374
SmallVector<SDValue, 8> Ops;
375
Ops.push_back(Chain);
376
Ops.push_back(Callee);
377
378
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
379
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
380
RegsToPass[i].second.getValueType()));
381
382
// Add a register mask operand representing the call-preserved registers.
383
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
384
const uint32_t *Mask =
385
TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);
386
assert(Mask && "Missing call preserved mask for calling convention");
387
Ops.push_back(DAG.getRegisterMask(Mask));
388
389
if (Glue.getNode())
390
Ops.push_back(Glue);
391
392
Chain = DAG.getNode(IsDirect ? ARCISD::BL : ARCISD::JL, dl, NodeTys, Ops);
393
Glue = Chain.getValue(1);
394
395
// Create the CALLSEQ_END node.
396
Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, Glue, dl);
397
Glue = Chain.getValue(1);
398
399
// Handle result values, copying them out of physregs into vregs that we
400
// return.
401
if (IsTailCall)
402
return Chain;
403
return lowerCallResult(Chain, Glue, RVLocs, dl, DAG, InVals);
404
}
405
406
/// Lower the result values of a call into the appropriate copies out of
407
/// physical registers / memory locations.
408
static SDValue lowerCallResult(SDValue Chain, SDValue Glue,
409
const SmallVectorImpl<CCValAssign> &RVLocs,
410
SDLoc dl, SelectionDAG &DAG,
411
SmallVectorImpl<SDValue> &InVals) {
412
SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs;
413
// Copy results out of physical registers.
414
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
415
const CCValAssign &VA = RVLocs[i];
416
if (VA.isRegLoc()) {
417
SDValue RetValue;
418
RetValue =
419
DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(), Glue);
420
Chain = RetValue.getValue(1);
421
Glue = RetValue.getValue(2);
422
InVals.push_back(RetValue);
423
} else {
424
assert(VA.isMemLoc() && "Must be memory location.");
425
ResultMemLocs.push_back(
426
std::make_pair(VA.getLocMemOffset(), InVals.size()));
427
428
// Reserve space for this result.
429
InVals.push_back(SDValue());
430
}
431
}
432
433
// Copy results out of memory.
434
SmallVector<SDValue, 4> MemOpChains;
435
for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) {
436
int Offset = ResultMemLocs[i].first;
437
unsigned Index = ResultMemLocs[i].second;
438
SDValue StackPtr = DAG.getRegister(ARC::SP, MVT::i32);
439
SDValue SpLoc = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr,
440
DAG.getConstant(Offset, dl, MVT::i32));
441
SDValue Load =
442
DAG.getLoad(MVT::i32, dl, Chain, SpLoc, MachinePointerInfo());
443
InVals[Index] = Load;
444
MemOpChains.push_back(Load.getValue(1));
445
}
446
447
// Transform all loads nodes into one single node because
448
// all load nodes are independent of each other.
449
if (!MemOpChains.empty())
450
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
451
452
return Chain;
453
}
454
455
//===----------------------------------------------------------------------===//
456
// Formal Arguments Calling Convention Implementation
457
//===----------------------------------------------------------------------===//
458
459
namespace {
460
461
struct ArgDataPair {
462
SDValue SDV;
463
ISD::ArgFlagsTy Flags;
464
};
465
466
} // end anonymous namespace
467
468
/// ARC formal arguments implementation
469
SDValue ARCTargetLowering::LowerFormalArguments(
470
SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
471
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
472
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
473
switch (CallConv) {
474
default:
475
llvm_unreachable("Unsupported calling convention");
476
case CallingConv::C:
477
case CallingConv::Fast:
478
return LowerCallArguments(Chain, CallConv, IsVarArg, Ins, dl, DAG, InVals);
479
}
480
}
481
482
/// Transform physical registers into virtual registers, and generate load
483
/// operations for argument places on the stack.
484
SDValue ARCTargetLowering::LowerCallArguments(
485
SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
486
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG,
487
SmallVectorImpl<SDValue> &InVals) const {
488
MachineFunction &MF = DAG.getMachineFunction();
489
MachineFrameInfo &MFI = MF.getFrameInfo();
490
MachineRegisterInfo &RegInfo = MF.getRegInfo();
491
auto *AFI = MF.getInfo<ARCFunctionInfo>();
492
493
// Assign locations to all of the incoming arguments.
494
SmallVector<CCValAssign, 16> ArgLocs;
495
CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
496
*DAG.getContext());
497
498
CCInfo.AnalyzeFormalArguments(Ins, CC_ARC);
499
500
unsigned StackSlotSize = 4;
501
502
if (!IsVarArg)
503
AFI->setReturnStackOffset(CCInfo.getStackSize());
504
505
// All getCopyFromReg ops must precede any getMemcpys to prevent the
506
// scheduler clobbering a register before it has been copied.
507
// The stages are:
508
// 1. CopyFromReg (and load) arg & vararg registers.
509
// 2. Chain CopyFromReg nodes into a TokenFactor.
510
// 3. Memcpy 'byVal' args & push final InVals.
511
// 4. Chain mem ops nodes into a TokenFactor.
512
SmallVector<SDValue, 4> CFRegNode;
513
SmallVector<ArgDataPair, 4> ArgData;
514
SmallVector<SDValue, 4> MemOps;
515
516
// 1a. CopyFromReg (and load) arg registers.
517
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
518
CCValAssign &VA = ArgLocs[i];
519
SDValue ArgIn;
520
521
if (VA.isRegLoc()) {
522
// Arguments passed in registers
523
EVT RegVT = VA.getLocVT();
524
switch (RegVT.getSimpleVT().SimpleTy) {
525
default: {
526
LLVM_DEBUG(errs() << "LowerFormalArguments Unhandled argument type: "
527
<< (unsigned)RegVT.getSimpleVT().SimpleTy << "\n");
528
llvm_unreachable("Unhandled LowerFormalArguments type.");
529
}
530
case MVT::i32:
531
unsigned VReg = RegInfo.createVirtualRegister(&ARC::GPR32RegClass);
532
RegInfo.addLiveIn(VA.getLocReg(), VReg);
533
ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
534
CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1));
535
}
536
} else {
537
// Only arguments passed on the stack should make it here.
538
assert(VA.isMemLoc());
539
// Load the argument to a virtual register
540
unsigned ObjSize = VA.getLocVT().getStoreSize();
541
assert((ObjSize <= StackSlotSize) && "Unhandled argument");
542
543
// Create the frame index object for this incoming parameter...
544
int FI = MFI.CreateFixedObject(ObjSize, VA.getLocMemOffset(), true);
545
546
// Create the SelectionDAG nodes corresponding to a load
547
// from this parameter
548
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
549
ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
550
MachinePointerInfo::getFixedStack(MF, FI));
551
}
552
const ArgDataPair ADP = {ArgIn, Ins[i].Flags};
553
ArgData.push_back(ADP);
554
}
555
556
// 1b. CopyFromReg vararg registers.
557
if (IsVarArg) {
558
// Argument registers
559
static const MCPhysReg ArgRegs[] = {ARC::R0, ARC::R1, ARC::R2, ARC::R3,
560
ARC::R4, ARC::R5, ARC::R6, ARC::R7};
561
auto *AFI = MF.getInfo<ARCFunctionInfo>();
562
unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs);
563
if (FirstVAReg < std::size(ArgRegs)) {
564
int Offset = 0;
565
// Save remaining registers, storing higher register numbers at a higher
566
// address
567
// There are (std::size(ArgRegs) - FirstVAReg) registers which
568
// need to be saved.
569
int VarFI = MFI.CreateFixedObject((std::size(ArgRegs) - FirstVAReg) * 4,
570
CCInfo.getStackSize(), true);
571
AFI->setVarArgsFrameIndex(VarFI);
572
SDValue FIN = DAG.getFrameIndex(VarFI, MVT::i32);
573
for (unsigned i = FirstVAReg; i < std::size(ArgRegs); i++) {
574
// Move argument from phys reg -> virt reg
575
unsigned VReg = RegInfo.createVirtualRegister(&ARC::GPR32RegClass);
576
RegInfo.addLiveIn(ArgRegs[i], VReg);
577
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
578
CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1));
579
SDValue VAObj = DAG.getNode(ISD::ADD, dl, MVT::i32, FIN,
580
DAG.getConstant(Offset, dl, MVT::i32));
581
// Move argument from virt reg -> stack
582
SDValue Store =
583
DAG.getStore(Val.getValue(1), dl, Val, VAObj, MachinePointerInfo());
584
MemOps.push_back(Store);
585
Offset += 4;
586
}
587
} else {
588
llvm_unreachable("Too many var args parameters.");
589
}
590
}
591
592
// 2. Chain CopyFromReg nodes into a TokenFactor.
593
if (!CFRegNode.empty())
594
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode);
595
596
// 3. Memcpy 'byVal' args & push final InVals.
597
// Aggregates passed "byVal" need to be copied by the callee.
598
// The callee will use a pointer to this copy, rather than the original
599
// pointer.
600
for (const auto &ArgDI : ArgData) {
601
if (ArgDI.Flags.isByVal() && ArgDI.Flags.getByValSize()) {
602
unsigned Size = ArgDI.Flags.getByValSize();
603
Align Alignment =
604
std::max(Align(StackSlotSize), ArgDI.Flags.getNonZeroByValAlign());
605
// Create a new object on the stack and copy the pointee into it.
606
int FI = MFI.CreateStackObject(Size, Alignment, false);
607
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
608
InVals.push_back(FIN);
609
MemOps.push_back(DAG.getMemcpy(
610
Chain, dl, FIN, ArgDI.SDV, DAG.getConstant(Size, dl, MVT::i32),
611
Alignment, false, false, /*CI=*/nullptr, false, MachinePointerInfo(),
612
MachinePointerInfo()));
613
} else {
614
InVals.push_back(ArgDI.SDV);
615
}
616
}
617
618
// 4. Chain mem ops nodes into a TokenFactor.
619
if (!MemOps.empty()) {
620
MemOps.push_back(Chain);
621
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
622
}
623
624
return Chain;
625
}
626
627
//===----------------------------------------------------------------------===//
628
// Return Value Calling Convention Implementation
629
//===----------------------------------------------------------------------===//
630
631
bool ARCTargetLowering::CanLowerReturn(
632
CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
633
const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
634
SmallVector<CCValAssign, 16> RVLocs;
635
CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
636
if (!CCInfo.CheckReturn(Outs, RetCC_ARC))
637
return false;
638
if (CCInfo.getStackSize() != 0 && IsVarArg)
639
return false;
640
return true;
641
}
642
643
SDValue
644
ARCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
645
bool IsVarArg,
646
const SmallVectorImpl<ISD::OutputArg> &Outs,
647
const SmallVectorImpl<SDValue> &OutVals,
648
const SDLoc &dl, SelectionDAG &DAG) const {
649
auto *AFI = DAG.getMachineFunction().getInfo<ARCFunctionInfo>();
650
MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
651
652
// CCValAssign - represent the assignment of
653
// the return value to a location
654
SmallVector<CCValAssign, 16> RVLocs;
655
656
// CCState - Info about the registers and stack slot.
657
CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
658
*DAG.getContext());
659
660
// Analyze return values.
661
if (!IsVarArg)
662
CCInfo.AllocateStack(AFI->getReturnStackOffset(), Align(4));
663
664
CCInfo.AnalyzeReturn(Outs, RetCC_ARC);
665
666
SDValue Glue;
667
SmallVector<SDValue, 4> RetOps(1, Chain);
668
SmallVector<SDValue, 4> MemOpChains;
669
// Handle return values that must be copied to memory.
670
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
671
CCValAssign &VA = RVLocs[i];
672
if (VA.isRegLoc())
673
continue;
674
assert(VA.isMemLoc());
675
if (IsVarArg) {
676
report_fatal_error("Can't return value from vararg function in memory");
677
}
678
679
int Offset = VA.getLocMemOffset();
680
unsigned ObjSize = VA.getLocVT().getStoreSize();
681
// Create the frame index object for the memory location.
682
int FI = MFI.CreateFixedObject(ObjSize, Offset, false);
683
684
// Create a SelectionDAG node corresponding to a store
685
// to this memory location.
686
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
687
MemOpChains.push_back(DAG.getStore(
688
Chain, dl, OutVals[i], FIN,
689
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
690
}
691
692
// Transform all store nodes into one single node because
693
// all stores are independent of each other.
694
if (!MemOpChains.empty())
695
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
696
697
// Now handle return values copied to registers.
698
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
699
CCValAssign &VA = RVLocs[i];
700
if (!VA.isRegLoc())
701
continue;
702
// Copy the result values into the output registers.
703
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Glue);
704
705
// guarantee that all emitted copies are
706
// stuck together, avoiding something bad
707
Glue = Chain.getValue(1);
708
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
709
}
710
711
RetOps[0] = Chain; // Update chain.
712
713
// Add the glue if we have it.
714
if (Glue.getNode())
715
RetOps.push_back(Glue);
716
717
// What to do with the RetOps?
718
return DAG.getNode(ARCISD::RET, dl, MVT::Other, RetOps);
719
}
720
721
//===----------------------------------------------------------------------===//
722
// Target Optimization Hooks
723
//===----------------------------------------------------------------------===//
724
725
SDValue ARCTargetLowering::PerformDAGCombine(SDNode *N,
726
DAGCombinerInfo &DCI) const {
727
return {};
728
}
729
730
//===----------------------------------------------------------------------===//
731
// Addressing mode description hooks
732
//===----------------------------------------------------------------------===//
733
734
/// Return true if the addressing mode represented by AM is legal for this
735
/// target, for a load/store of the specified type.
736
bool ARCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
737
const AddrMode &AM, Type *Ty,
738
unsigned AS,
739
Instruction *I) const {
740
return AM.Scale == 0;
741
}
742
743
// Don't emit tail calls for the time being.
744
bool ARCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
745
return false;
746
}
747
748
SDValue ARCTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
749
const ARCRegisterInfo &ARI = *Subtarget.getRegisterInfo();
750
MachineFunction &MF = DAG.getMachineFunction();
751
MachineFrameInfo &MFI = MF.getFrameInfo();
752
MFI.setFrameAddressIsTaken(true);
753
754
EVT VT = Op.getValueType();
755
SDLoc dl(Op);
756
assert(Op.getConstantOperandVal(0) == 0 &&
757
"Only support lowering frame addr of current frame.");
758
Register FrameReg = ARI.getFrameRegister(MF);
759
return DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
760
}
761
762
SDValue ARCTargetLowering::LowerGlobalAddress(SDValue Op,
763
SelectionDAG &DAG) const {
764
const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
765
const GlobalValue *GV = GN->getGlobal();
766
SDLoc dl(GN);
767
int64_t Offset = GN->getOffset();
768
SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, Offset);
769
return DAG.getNode(ARCISD::GAWRAPPER, dl, MVT::i32, GA);
770
}
771
772
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
773
MachineFunction &MF = DAG.getMachineFunction();
774
auto *FuncInfo = MF.getInfo<ARCFunctionInfo>();
775
776
// vastart just stores the address of the VarArgsFrameIndex slot into the
777
// memory location argument.
778
SDLoc dl(Op);
779
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
780
SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
781
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
782
return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
783
MachinePointerInfo(SV));
784
}
785
786
SDValue ARCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
787
switch (Op.getOpcode()) {
788
case ISD::GlobalAddress:
789
return LowerGlobalAddress(Op, DAG);
790
case ISD::FRAMEADDR:
791
return LowerFRAMEADDR(Op, DAG);
792
case ISD::SELECT_CC:
793
return LowerSELECT_CC(Op, DAG);
794
case ISD::BR_CC:
795
return LowerBR_CC(Op, DAG);
796
case ISD::SIGN_EXTEND_INREG:
797
return LowerSIGN_EXTEND_INREG(Op, DAG);
798
case ISD::JumpTable:
799
return LowerJumpTable(Op, DAG);
800
case ISD::VASTART:
801
return LowerVASTART(Op, DAG);
802
case ISD::READCYCLECOUNTER:
803
// As of LLVM 3.8, the lowering code insists that we customize it even
804
// though we've declared the i32 version as legal. This is because it only
805
// thinks i64 is the truly supported version. We've already converted the
806
// i64 version to a widened i32.
807
assert(Op.getSimpleValueType() == MVT::i32);
808
return Op;
809
default:
810
llvm_unreachable("unimplemented operand");
811
}
812
}
813
814