Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/llvm/lib/Target/M68k/M68kISelLowering.cpp
35294 views
1
//===-- M68kISelLowering.cpp - M68k DAG Lowering Impl -----------*- C++ -*-===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
///
9
/// \file
10
/// This file defines the interfaces that M68k uses to lower LLVM code into a
11
/// selection DAG.
12
///
13
//===----------------------------------------------------------------------===//
14
15
#include "M68kISelLowering.h"
16
#include "M68kCallingConv.h"
17
#include "M68kMachineFunction.h"
18
#include "M68kSubtarget.h"
19
#include "M68kTargetMachine.h"
20
#include "M68kTargetObjectFile.h"
21
22
#include "llvm/ADT/Statistic.h"
23
#include "llvm/CodeGen/CallingConvLower.h"
24
#include "llvm/CodeGen/MachineFrameInfo.h"
25
#include "llvm/CodeGen/MachineFunction.h"
26
#include "llvm/CodeGen/MachineInstrBuilder.h"
27
#include "llvm/CodeGen/MachineJumpTableInfo.h"
28
#include "llvm/CodeGen/MachineRegisterInfo.h"
29
#include "llvm/CodeGen/SelectionDAG.h"
30
#include "llvm/CodeGen/ValueTypes.h"
31
#include "llvm/IR/CallingConv.h"
32
#include "llvm/IR/DerivedTypes.h"
33
#include "llvm/IR/GlobalVariable.h"
34
#include "llvm/Support/CommandLine.h"
35
#include "llvm/Support/Debug.h"
36
#include "llvm/Support/ErrorHandling.h"
37
#include "llvm/Support/KnownBits.h"
38
#include "llvm/Support/raw_ostream.h"
39
40
using namespace llvm;
41
42
#define DEBUG_TYPE "M68k-isel"
43
44
STATISTIC(NumTailCalls, "Number of tail calls");
45
46
M68kTargetLowering::M68kTargetLowering(const M68kTargetMachine &TM,
47
const M68kSubtarget &STI)
48
: TargetLowering(TM), Subtarget(STI), TM(TM) {
49
50
MVT PtrVT = MVT::i32;
51
52
setBooleanContents(ZeroOrOneBooleanContent);
53
54
auto *RegInfo = Subtarget.getRegisterInfo();
55
setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
56
57
// Set up the register classes.
58
addRegisterClass(MVT::i8, &M68k::DR8RegClass);
59
addRegisterClass(MVT::i16, &M68k::XR16RegClass);
60
addRegisterClass(MVT::i32, &M68k::XR32RegClass);
61
62
for (auto VT : MVT::integer_valuetypes()) {
63
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
64
setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
65
setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
66
}
67
68
// We don't accept any truncstore of integer registers.
69
setTruncStoreAction(MVT::i64, MVT::i32, Expand);
70
setTruncStoreAction(MVT::i64, MVT::i16, Expand);
71
setTruncStoreAction(MVT::i64, MVT::i8, Expand);
72
setTruncStoreAction(MVT::i32, MVT::i16, Expand);
73
setTruncStoreAction(MVT::i32, MVT::i8, Expand);
74
setTruncStoreAction(MVT::i16, MVT::i8, Expand);
75
76
setOperationAction({ISD::MUL, ISD::SDIV, ISD::UDIV}, MVT::i8, Promote);
77
setOperationAction({ISD::MUL, ISD::SDIV, ISD::UDIV}, MVT::i16, Legal);
78
if (Subtarget.atLeastM68020())
79
setOperationAction({ISD::MUL, ISD::SDIV, ISD::UDIV}, MVT::i32, Legal);
80
else
81
setOperationAction({ISD::MUL, ISD::SDIV, ISD::UDIV}, MVT::i32, LibCall);
82
setOperationAction(ISD::MUL, MVT::i64, LibCall);
83
84
for (auto OP :
85
{ISD::SREM, ISD::UREM, ISD::UDIVREM, ISD::SDIVREM,
86
ISD::MULHS, ISD::MULHU, ISD::UMUL_LOHI, ISD::SMUL_LOHI}) {
87
setOperationAction(OP, MVT::i8, Promote);
88
setOperationAction(OP, MVT::i16, Legal);
89
setOperationAction(OP, MVT::i32, LibCall);
90
}
91
92
for (auto OP : {ISD::UMUL_LOHI, ISD::SMUL_LOHI}) {
93
setOperationAction(OP, MVT::i8, Expand);
94
setOperationAction(OP, MVT::i16, Expand);
95
}
96
97
for (auto OP : {ISD::SMULO, ISD::UMULO}) {
98
setOperationAction(OP, MVT::i8, Custom);
99
setOperationAction(OP, MVT::i16, Custom);
100
setOperationAction(OP, MVT::i32, Custom);
101
}
102
103
for (auto OP : {ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS})
104
setOperationAction(OP, MVT::i32, Custom);
105
106
// Add/Sub overflow ops with MVT::Glues are lowered to CCR dependences.
107
for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
108
setOperationAction(ISD::ADDC, VT, Custom);
109
setOperationAction(ISD::ADDE, VT, Custom);
110
setOperationAction(ISD::SUBC, VT, Custom);
111
setOperationAction(ISD::SUBE, VT, Custom);
112
}
113
114
// SADDO and friends are legal with this setup, i hope
115
for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
116
setOperationAction(ISD::SADDO, VT, Custom);
117
setOperationAction(ISD::UADDO, VT, Custom);
118
setOperationAction(ISD::SSUBO, VT, Custom);
119
setOperationAction(ISD::USUBO, VT, Custom);
120
}
121
122
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
123
setOperationAction(ISD::BRCOND, MVT::Other, Custom);
124
125
for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
126
setOperationAction(ISD::BR_CC, VT, Expand);
127
setOperationAction(ISD::SELECT, VT, Custom);
128
setOperationAction(ISD::SELECT_CC, VT, Expand);
129
setOperationAction(ISD::SETCC, VT, Custom);
130
setOperationAction(ISD::SETCCCARRY, VT, Custom);
131
}
132
133
for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
134
setOperationAction(ISD::BSWAP, VT, Expand);
135
setOperationAction(ISD::CTTZ, VT, Expand);
136
setOperationAction(ISD::CTLZ, VT, Expand);
137
setOperationAction(ISD::CTPOP, VT, Expand);
138
}
139
140
setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
141
setOperationAction(ISD::JumpTable, MVT::i32, Custom);
142
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
143
setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
144
setOperationAction(ISD::ExternalSymbol, MVT::i32, Custom);
145
setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
146
147
setOperationAction(ISD::VASTART, MVT::Other, Custom);
148
setOperationAction(ISD::VAEND, MVT::Other, Expand);
149
setOperationAction(ISD::VAARG, MVT::Other, Expand);
150
setOperationAction(ISD::VACOPY, MVT::Other, Expand);
151
152
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
153
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
154
155
setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
156
157
computeRegisterProperties(STI.getRegisterInfo());
158
159
// We lower the `atomic-compare-and-swap` to `__sync_val_compare_and_swap`
160
// for subtarget < M68020
161
setMaxAtomicSizeInBitsSupported(32);
162
setOperationAction(ISD::ATOMIC_CMP_SWAP, {MVT::i8, MVT::i16, MVT::i32},
163
Subtarget.atLeastM68020() ? Legal : LibCall);
164
165
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
166
167
// M68k does not have native read-modify-write support, so expand all of them
168
// to `__sync_fetch_*` for target < M68020, otherwise expand to CmpxChg.
169
// See `shouldExpandAtomicRMWInIR` below.
170
setOperationAction(
171
{
172
ISD::ATOMIC_LOAD_ADD,
173
ISD::ATOMIC_LOAD_SUB,
174
ISD::ATOMIC_LOAD_AND,
175
ISD::ATOMIC_LOAD_OR,
176
ISD::ATOMIC_LOAD_XOR,
177
ISD::ATOMIC_LOAD_NAND,
178
ISD::ATOMIC_LOAD_MIN,
179
ISD::ATOMIC_LOAD_MAX,
180
ISD::ATOMIC_LOAD_UMIN,
181
ISD::ATOMIC_LOAD_UMAX,
182
ISD::ATOMIC_SWAP,
183
},
184
{MVT::i8, MVT::i16, MVT::i32}, LibCall);
185
186
setMinFunctionAlignment(Align(2));
187
}
188
189
TargetLoweringBase::AtomicExpansionKind
190
M68kTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
191
return Subtarget.atLeastM68020()
192
? TargetLoweringBase::AtomicExpansionKind::CmpXChg
193
: TargetLoweringBase::AtomicExpansionKind::None;
194
}
195
196
Register
197
M68kTargetLowering::getExceptionPointerRegister(const Constant *) const {
198
return M68k::D0;
199
}
200
201
Register
202
M68kTargetLowering::getExceptionSelectorRegister(const Constant *) const {
203
return M68k::D1;
204
}
205
206
InlineAsm::ConstraintCode
207
M68kTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
208
return StringSwitch<InlineAsm::ConstraintCode>(ConstraintCode)
209
.Case("Q", InlineAsm::ConstraintCode::Q)
210
// We borrow ConstraintCode::Um for 'U'.
211
.Case("U", InlineAsm::ConstraintCode::Um)
212
.Default(TargetLowering::getInlineAsmMemConstraint(ConstraintCode));
213
}
214
215
EVT M68kTargetLowering::getSetCCResultType(const DataLayout &DL,
216
LLVMContext &Context, EVT VT) const {
217
// M68k SETcc producess either 0x00 or 0xFF
218
return MVT::i8;
219
}
220
221
MVT M68kTargetLowering::getScalarShiftAmountTy(const DataLayout &DL,
222
EVT Ty) const {
223
if (Ty.isSimple()) {
224
return Ty.getSimpleVT();
225
}
226
return MVT::getIntegerVT(DL.getPointerSizeInBits(0));
227
}
228
229
#include "M68kGenCallingConv.inc"
230
231
enum StructReturnType { NotStructReturn, RegStructReturn, StackStructReturn };
232
233
static StructReturnType
234
callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
235
if (Outs.empty())
236
return NotStructReturn;
237
238
const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
239
if (!Flags.isSRet())
240
return NotStructReturn;
241
if (Flags.isInReg())
242
return RegStructReturn;
243
return StackStructReturn;
244
}
245
246
/// Determines whether a function uses struct return semantics.
247
static StructReturnType
248
argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
249
if (Ins.empty())
250
return NotStructReturn;
251
252
const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
253
if (!Flags.isSRet())
254
return NotStructReturn;
255
if (Flags.isInReg())
256
return RegStructReturn;
257
return StackStructReturn;
258
}
259
260
/// Make a copy of an aggregate at address specified by "Src" to address
261
/// "Dst" with size and alignment information specified by the specific
262
/// parameter attribute. The copy will be passed as a byval function parameter.
263
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
264
SDValue Chain, ISD::ArgFlagsTy Flags,
265
SelectionDAG &DAG, const SDLoc &DL) {
266
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), DL, MVT::i32);
267
268
return DAG.getMemcpy(
269
Chain, DL, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
270
/*isVolatile=*/false, /*AlwaysInline=*/true,
271
/*CI=*/nullptr, std::nullopt, MachinePointerInfo(), MachinePointerInfo());
272
}
273
274
/// Return true if the calling convention is one that we can guarantee TCO for.
275
static bool canGuaranteeTCO(CallingConv::ID CC) { return false; }
276
277
/// Return true if we might ever do TCO for calls with this calling convention.
278
static bool mayTailCallThisCC(CallingConv::ID CC) {
279
switch (CC) {
280
// C calling conventions:
281
case CallingConv::C:
282
return true;
283
default:
284
return canGuaranteeTCO(CC);
285
}
286
}
287
288
/// Return true if the function is being made into a tailcall target by
289
/// changing its ABI.
290
static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
291
return GuaranteedTailCallOpt && canGuaranteeTCO(CC);
292
}
293
294
/// Return true if the given stack call argument is already available in the
295
/// same position (relatively) of the caller's incoming argument stack.
296
static bool MatchingStackOffset(SDValue Arg, unsigned Offset,
297
ISD::ArgFlagsTy Flags, MachineFrameInfo &MFI,
298
const MachineRegisterInfo *MRI,
299
const M68kInstrInfo *TII,
300
const CCValAssign &VA) {
301
unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
302
303
for (;;) {
304
// Look through nodes that don't alter the bits of the incoming value.
305
unsigned Op = Arg.getOpcode();
306
if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
307
Arg = Arg.getOperand(0);
308
continue;
309
}
310
if (Op == ISD::TRUNCATE) {
311
const SDValue &TruncInput = Arg.getOperand(0);
312
if (TruncInput.getOpcode() == ISD::AssertZext &&
313
cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
314
Arg.getValueType()) {
315
Arg = TruncInput.getOperand(0);
316
continue;
317
}
318
}
319
break;
320
}
321
322
int FI = INT_MAX;
323
if (Arg.getOpcode() == ISD::CopyFromReg) {
324
Register VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
325
if (!Register::isVirtualRegister(VR))
326
return false;
327
MachineInstr *Def = MRI->getVRegDef(VR);
328
if (!Def)
329
return false;
330
if (!Flags.isByVal()) {
331
if (!TII->isLoadFromStackSlot(*Def, FI))
332
return false;
333
} else {
334
unsigned Opcode = Def->getOpcode();
335
if ((Opcode == M68k::LEA32p || Opcode == M68k::LEA32f) &&
336
Def->getOperand(1).isFI()) {
337
FI = Def->getOperand(1).getIndex();
338
Bytes = Flags.getByValSize();
339
} else
340
return false;
341
}
342
} else if (auto *Ld = dyn_cast<LoadSDNode>(Arg)) {
343
if (Flags.isByVal())
344
// ByVal argument is passed in as a pointer but it's now being
345
// dereferenced. e.g.
346
// define @foo(%struct.X* %A) {
347
// tail call @bar(%struct.X* byval %A)
348
// }
349
return false;
350
SDValue Ptr = Ld->getBasePtr();
351
FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
352
if (!FINode)
353
return false;
354
FI = FINode->getIndex();
355
} else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
356
FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
357
FI = FINode->getIndex();
358
Bytes = Flags.getByValSize();
359
} else
360
return false;
361
362
assert(FI != INT_MAX);
363
if (!MFI.isFixedObjectIndex(FI))
364
return false;
365
366
if (Offset != MFI.getObjectOffset(FI))
367
return false;
368
369
if (VA.getLocVT().getSizeInBits() > Arg.getValueType().getSizeInBits()) {
370
// If the argument location is wider than the argument type, check that any
371
// extension flags match.
372
if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
373
Flags.isSExt() != MFI.isObjectSExt(FI)) {
374
return false;
375
}
376
}
377
378
return Bytes == MFI.getObjectSize(FI);
379
}
380
381
SDValue
382
M68kTargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
383
MachineFunction &MF = DAG.getMachineFunction();
384
M68kMachineFunctionInfo *FuncInfo = MF.getInfo<M68kMachineFunctionInfo>();
385
int ReturnAddrIndex = FuncInfo->getRAIndex();
386
387
if (ReturnAddrIndex == 0) {
388
// Set up a frame object for the return address.
389
unsigned SlotSize = Subtarget.getSlotSize();
390
ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(
391
SlotSize, -(int64_t)SlotSize, false);
392
FuncInfo->setRAIndex(ReturnAddrIndex);
393
}
394
395
return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
396
}
397
398
SDValue M68kTargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
399
SDValue &OutRetAddr,
400
SDValue Chain,
401
bool IsTailCall, int FPDiff,
402
const SDLoc &DL) const {
403
EVT VT = getPointerTy(DAG.getDataLayout());
404
OutRetAddr = getReturnAddressFrameIndex(DAG);
405
406
// Load the "old" Return address.
407
OutRetAddr = DAG.getLoad(VT, DL, Chain, OutRetAddr, MachinePointerInfo());
408
return SDValue(OutRetAddr.getNode(), 1);
409
}
410
411
SDValue M68kTargetLowering::EmitTailCallStoreRetAddr(
412
SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue RetFI,
413
EVT PtrVT, unsigned SlotSize, int FPDiff, const SDLoc &DL) const {
414
if (!FPDiff)
415
return Chain;
416
417
// Calculate the new stack slot for the return address.
418
int NewFO = MF.getFrameInfo().CreateFixedObject(
419
SlotSize, (int64_t)FPDiff - SlotSize, false);
420
421
SDValue NewFI = DAG.getFrameIndex(NewFO, PtrVT);
422
// Store the return address to the appropriate stack slot.
423
Chain = DAG.getStore(
424
Chain, DL, RetFI, NewFI,
425
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), NewFO));
426
return Chain;
427
}
428
429
SDValue
430
M68kTargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
431
const SmallVectorImpl<ISD::InputArg> &Ins,
432
const SDLoc &DL, SelectionDAG &DAG,
433
const CCValAssign &VA,
434
MachineFrameInfo &MFI,
435
unsigned ArgIdx) const {
436
// Create the nodes corresponding to a load from this parameter slot.
437
ISD::ArgFlagsTy Flags = Ins[ArgIdx].Flags;
438
EVT ValVT;
439
440
// If value is passed by pointer we have address passed instead of the value
441
// itself.
442
if (VA.getLocInfo() == CCValAssign::Indirect)
443
ValVT = VA.getLocVT();
444
else
445
ValVT = VA.getValVT();
446
447
// Because we are dealing with BE architecture we need to offset loading of
448
// partial types
449
int Offset = VA.getLocMemOffset();
450
if (VA.getValVT() == MVT::i8) {
451
Offset += 3;
452
} else if (VA.getValVT() == MVT::i16) {
453
Offset += 2;
454
}
455
456
// TODO Interrupt handlers
457
// Calculate SP offset of interrupt parameter, re-arrange the slot normally
458
// taken by a return address.
459
460
// FIXME For now, all byval parameter objects are marked mutable. This can
461
// be changed with more analysis. In case of tail call optimization mark all
462
// arguments mutable. Since they could be overwritten by lowering of arguments
463
// in case of a tail call.
464
bool AlwaysUseMutable = shouldGuaranteeTCO(
465
CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
466
bool IsImmutable = !AlwaysUseMutable && !Flags.isByVal();
467
468
if (Flags.isByVal()) {
469
unsigned Bytes = Flags.getByValSize();
470
if (Bytes == 0)
471
Bytes = 1; // Don't create zero-sized stack objects.
472
int FI = MFI.CreateFixedObject(Bytes, Offset, IsImmutable);
473
// TODO Interrupt handlers
474
// Adjust SP offset of interrupt parameter.
475
return DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
476
} else {
477
int FI =
478
MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, Offset, IsImmutable);
479
480
// Set SExt or ZExt flag.
481
if (VA.getLocInfo() == CCValAssign::ZExt) {
482
MFI.setObjectZExt(FI, true);
483
} else if (VA.getLocInfo() == CCValAssign::SExt) {
484
MFI.setObjectSExt(FI, true);
485
}
486
487
// TODO Interrupt handlers
488
// Adjust SP offset of interrupt parameter.
489
490
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
491
SDValue Val = DAG.getLoad(
492
ValVT, DL, Chain, FIN,
493
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
494
return VA.isExtInLoc() ? DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val)
495
: Val;
496
}
497
}
498
499
SDValue M68kTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
500
SDValue Arg, const SDLoc &DL,
501
SelectionDAG &DAG,
502
const CCValAssign &VA,
503
ISD::ArgFlagsTy Flags) const {
504
unsigned LocMemOffset = VA.getLocMemOffset();
505
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, DL);
506
PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()),
507
StackPtr, PtrOff);
508
if (Flags.isByVal())
509
return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, DL);
510
511
return DAG.getStore(
512
Chain, DL, Arg, PtrOff,
513
MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
514
}
515
516
//===----------------------------------------------------------------------===//
517
// Call
518
//===----------------------------------------------------------------------===//
519
520
SDValue M68kTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
521
SmallVectorImpl<SDValue> &InVals) const {
522
SelectionDAG &DAG = CLI.DAG;
523
SDLoc &DL = CLI.DL;
524
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
525
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
526
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
527
SDValue Chain = CLI.Chain;
528
SDValue Callee = CLI.Callee;
529
CallingConv::ID CallConv = CLI.CallConv;
530
bool &IsTailCall = CLI.IsTailCall;
531
bool IsVarArg = CLI.IsVarArg;
532
533
MachineFunction &MF = DAG.getMachineFunction();
534
StructReturnType SR = callIsStructReturn(Outs);
535
bool IsSibcall = false;
536
M68kMachineFunctionInfo *MFI = MF.getInfo<M68kMachineFunctionInfo>();
537
// const M68kRegisterInfo *TRI = Subtarget.getRegisterInfo();
538
539
if (CallConv == CallingConv::M68k_INTR)
540
report_fatal_error("M68k interrupts may not be called directly");
541
542
auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
543
if (Attr.getValueAsBool())
544
IsTailCall = false;
545
546
// FIXME Add tailcalls support
547
548
bool IsMustTail = CLI.CB && CLI.CB->isMustTailCall();
549
if (IsMustTail) {
550
// Force this to be a tail call. The verifier rules are enough to ensure
551
// that we can lower this successfully without moving the return address
552
// around.
553
IsTailCall = true;
554
} else if (IsTailCall) {
555
// Check if it's really possible to do a tail call.
556
IsTailCall = IsEligibleForTailCallOptimization(
557
Callee, CallConv, IsVarArg, SR != NotStructReturn,
558
MF.getFunction().hasStructRetAttr(), CLI.RetTy, Outs, OutVals, Ins,
559
DAG);
560
561
// Sibcalls are automatically detected tailcalls which do not require
562
// ABI changes.
563
if (!MF.getTarget().Options.GuaranteedTailCallOpt && IsTailCall)
564
IsSibcall = true;
565
566
if (IsTailCall)
567
++NumTailCalls;
568
}
569
570
assert(!(IsVarArg && canGuaranteeTCO(CallConv)) &&
571
"Var args not supported with calling convention fastcc");
572
573
// Analyze operands of the call, assigning locations to each operand.
574
SmallVector<CCValAssign, 16> ArgLocs;
575
SmallVector<Type *, 4> ArgTypes;
576
for (const auto &Arg : CLI.getArgs())
577
ArgTypes.emplace_back(Arg.Ty);
578
M68kCCState CCInfo(ArgTypes, CallConv, IsVarArg, MF, ArgLocs,
579
*DAG.getContext());
580
CCInfo.AnalyzeCallOperands(Outs, CC_M68k);
581
582
// Get a count of how many bytes are to be pushed on the stack.
583
unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
584
if (IsSibcall) {
585
// This is a sibcall. The memory operands are available in caller's
586
// own caller's stack.
587
NumBytes = 0;
588
} else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
589
canGuaranteeTCO(CallConv)) {
590
NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
591
}
592
593
int FPDiff = 0;
594
if (IsTailCall && !IsSibcall && !IsMustTail) {
595
// Lower arguments at fp - stackoffset + fpdiff.
596
unsigned NumBytesCallerPushed = MFI->getBytesToPopOnReturn();
597
598
FPDiff = NumBytesCallerPushed - NumBytes;
599
600
// Set the delta of movement of the returnaddr stackslot.
601
// But only set if delta is greater than previous delta.
602
if (FPDiff < MFI->getTCReturnAddrDelta())
603
MFI->setTCReturnAddrDelta(FPDiff);
604
}
605
606
unsigned NumBytesToPush = NumBytes;
607
unsigned NumBytesToPop = NumBytes;
608
609
// If we have an inalloca argument, all stack space has already been allocated
610
// for us and be right at the top of the stack. We don't support multiple
611
// arguments passed in memory when using inalloca.
612
if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
613
NumBytesToPush = 0;
614
if (!ArgLocs.back().isMemLoc())
615
report_fatal_error("cannot use inalloca attribute on a register "
616
"parameter");
617
if (ArgLocs.back().getLocMemOffset() != 0)
618
report_fatal_error("any parameter with the inalloca attribute must be "
619
"the only memory argument");
620
}
621
622
if (!IsSibcall)
623
Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
624
NumBytes - NumBytesToPush, DL);
625
626
SDValue RetFI;
627
// Load return address for tail calls.
628
if (IsTailCall && FPDiff)
629
Chain = EmitTailCallLoadRetAddr(DAG, RetFI, Chain, IsTailCall, FPDiff, DL);
630
631
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
632
SmallVector<SDValue, 8> MemOpChains;
633
SDValue StackPtr;
634
635
// Walk the register/memloc assignments, inserting copies/loads. In the case
636
// of tail call optimization arguments are handle later.
637
const M68kRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
638
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
639
ISD::ArgFlagsTy Flags = Outs[i].Flags;
640
641
// Skip inalloca arguments, they have already been written.
642
if (Flags.isInAlloca())
643
continue;
644
645
CCValAssign &VA = ArgLocs[i];
646
EVT RegVT = VA.getLocVT();
647
SDValue Arg = OutVals[i];
648
bool IsByVal = Flags.isByVal();
649
650
// Promote the value if needed.
651
switch (VA.getLocInfo()) {
652
default:
653
llvm_unreachable("Unknown loc info!");
654
case CCValAssign::Full:
655
break;
656
case CCValAssign::SExt:
657
Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, RegVT, Arg);
658
break;
659
case CCValAssign::ZExt:
660
Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, RegVT, Arg);
661
break;
662
case CCValAssign::AExt:
663
Arg = DAG.getNode(ISD::ANY_EXTEND, DL, RegVT, Arg);
664
break;
665
case CCValAssign::BCvt:
666
Arg = DAG.getBitcast(RegVT, Arg);
667
break;
668
case CCValAssign::Indirect: {
669
// Store the argument.
670
SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
671
int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
672
Chain = DAG.getStore(
673
Chain, DL, Arg, SpillSlot,
674
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
675
Arg = SpillSlot;
676
break;
677
}
678
}
679
680
if (VA.isRegLoc()) {
681
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
682
} else if (!IsSibcall && (!IsTailCall || IsByVal)) {
683
assert(VA.isMemLoc());
684
if (!StackPtr.getNode()) {
685
StackPtr = DAG.getCopyFromReg(Chain, DL, RegInfo->getStackRegister(),
686
getPointerTy(DAG.getDataLayout()));
687
}
688
MemOpChains.push_back(
689
LowerMemOpCallTo(Chain, StackPtr, Arg, DL, DAG, VA, Flags));
690
}
691
}
692
693
if (!MemOpChains.empty())
694
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
695
696
// FIXME Make sure PIC style GOT works as expected
697
// The only time GOT is really needed is for Medium-PIC static data
698
// otherwise we are happy with pc-rel or static references
699
700
if (IsVarArg && IsMustTail) {
701
const auto &Forwards = MFI->getForwardedMustTailRegParms();
702
for (const auto &F : Forwards) {
703
SDValue Val = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT);
704
RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
705
}
706
}
707
708
// For tail calls lower the arguments to the 'real' stack slots. Sibcalls
709
// don't need this because the eligibility check rejects calls that require
710
// shuffling arguments passed in memory.
711
if (!IsSibcall && IsTailCall) {
712
// Force all the incoming stack arguments to be loaded from the stack
713
// before any new outgoing arguments are stored to the stack, because the
714
// outgoing stack slots may alias the incoming argument stack slots, and
715
// the alias isn't otherwise explicit. This is slightly more conservative
716
// than necessary, because it means that each store effectively depends
717
// on every argument instead of just those arguments it would clobber.
718
SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
719
720
SmallVector<SDValue, 8> MemOpChains2;
721
SDValue FIN;
722
int FI = 0;
723
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
724
CCValAssign &VA = ArgLocs[i];
725
if (VA.isRegLoc())
726
continue;
727
assert(VA.isMemLoc());
728
SDValue Arg = OutVals[i];
729
ISD::ArgFlagsTy Flags = Outs[i].Flags;
730
// Skip inalloca arguments. They don't require any work.
731
if (Flags.isInAlloca())
732
continue;
733
// Create frame index.
734
int32_t Offset = VA.getLocMemOffset() + FPDiff;
735
uint32_t OpSize = (VA.getLocVT().getSizeInBits() + 7) / 8;
736
FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
737
FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
738
739
if (Flags.isByVal()) {
740
// Copy relative to framepointer.
741
SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), DL);
742
if (!StackPtr.getNode()) {
743
StackPtr = DAG.getCopyFromReg(Chain, DL, RegInfo->getStackRegister(),
744
getPointerTy(DAG.getDataLayout()));
745
}
746
Source = DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()),
747
StackPtr, Source);
748
749
MemOpChains2.push_back(
750
CreateCopyOfByValArgument(Source, FIN, ArgChain, Flags, DAG, DL));
751
} else {
752
// Store relative to framepointer.
753
MemOpChains2.push_back(DAG.getStore(
754
ArgChain, DL, Arg, FIN,
755
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
756
}
757
}
758
759
if (!MemOpChains2.empty())
760
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains2);
761
762
// Store the return address to the appropriate stack slot.
763
Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetFI,
764
getPointerTy(DAG.getDataLayout()),
765
Subtarget.getSlotSize(), FPDiff, DL);
766
}
767
768
// Build a sequence of copy-to-reg nodes chained together with token chain
769
// and flag operands which copy the outgoing args into registers.
770
SDValue InGlue;
771
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
772
Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[i].first,
773
RegsToPass[i].second, InGlue);
774
InGlue = Chain.getValue(1);
775
}
776
777
if (Callee->getOpcode() == ISD::GlobalAddress) {
778
// If the callee is a GlobalAddress node (quite common, every direct call
779
// is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
780
// it.
781
GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
782
783
// We should use extra load for direct calls to dllimported functions in
784
// non-JIT mode.
785
const GlobalValue *GV = G->getGlobal();
786
if (!GV->hasDLLImportStorageClass()) {
787
unsigned char OpFlags = Subtarget.classifyGlobalFunctionReference(GV);
788
789
Callee = DAG.getTargetGlobalAddress(
790
GV, DL, getPointerTy(DAG.getDataLayout()), G->getOffset(), OpFlags);
791
792
if (OpFlags == M68kII::MO_GOTPCREL) {
793
794
// Add a wrapper.
795
Callee = DAG.getNode(M68kISD::WrapperPC, DL,
796
getPointerTy(DAG.getDataLayout()), Callee);
797
798
// Add extra indirection
799
Callee = DAG.getLoad(
800
getPointerTy(DAG.getDataLayout()), DL, DAG.getEntryNode(), Callee,
801
MachinePointerInfo::getGOT(DAG.getMachineFunction()));
802
}
803
}
804
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
805
const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
806
unsigned char OpFlags =
807
Subtarget.classifyGlobalFunctionReference(nullptr, *Mod);
808
809
Callee = DAG.getTargetExternalSymbol(
810
S->getSymbol(), getPointerTy(DAG.getDataLayout()), OpFlags);
811
}
812
813
// Returns a chain & a flag for retval copy to use.
814
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
815
SmallVector<SDValue, 8> Ops;
816
817
if (!IsSibcall && IsTailCall) {
818
Chain = DAG.getCALLSEQ_END(Chain, NumBytesToPop, 0, InGlue, DL);
819
InGlue = Chain.getValue(1);
820
}
821
822
Ops.push_back(Chain);
823
Ops.push_back(Callee);
824
825
if (IsTailCall)
826
Ops.push_back(DAG.getConstant(FPDiff, DL, MVT::i32));
827
828
// Add argument registers to the end of the list so that they are known live
829
// into the call.
830
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
831
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
832
RegsToPass[i].second.getValueType()));
833
834
// Add a register mask operand representing the call-preserved registers.
835
const uint32_t *Mask = RegInfo->getCallPreservedMask(MF, CallConv);
836
assert(Mask && "Missing call preserved mask for calling convention");
837
838
Ops.push_back(DAG.getRegisterMask(Mask));
839
840
if (InGlue.getNode())
841
Ops.push_back(InGlue);
842
843
if (IsTailCall) {
844
MF.getFrameInfo().setHasTailCall();
845
return DAG.getNode(M68kISD::TC_RETURN, DL, NodeTys, Ops);
846
}
847
848
Chain = DAG.getNode(M68kISD::CALL, DL, NodeTys, Ops);
849
InGlue = Chain.getValue(1);
850
851
// Create the CALLSEQ_END node.
852
unsigned NumBytesForCalleeToPop;
853
if (M68k::isCalleePop(CallConv, IsVarArg,
854
DAG.getTarget().Options.GuaranteedTailCallOpt)) {
855
NumBytesForCalleeToPop = NumBytes; // Callee pops everything
856
} else if (!canGuaranteeTCO(CallConv) && SR == StackStructReturn) {
857
// If this is a call to a struct-return function, the callee
858
// pops the hidden struct pointer, so we have to push it back.
859
NumBytesForCalleeToPop = 4;
860
} else {
861
NumBytesForCalleeToPop = 0; // Callee pops nothing.
862
}
863
864
if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) {
865
// No need to reset the stack after the call if the call doesn't return. To
866
// make the MI verify, we'll pretend the callee does it for us.
867
NumBytesForCalleeToPop = NumBytes;
868
}
869
870
// Returns a flag for retval copy to use.
871
if (!IsSibcall) {
872
Chain = DAG.getCALLSEQ_END(Chain, NumBytesToPop, NumBytesForCalleeToPop,
873
InGlue, DL);
874
InGlue = Chain.getValue(1);
875
}
876
877
// Handle result values, copying them out of physregs into vregs that we
878
// return.
879
return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, DL, DAG,
880
InVals);
881
}
882
883
SDValue M68kTargetLowering::LowerCallResult(
884
SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool IsVarArg,
885
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
886
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
887
888
// Assign locations to each value returned by this call.
889
SmallVector<CCValAssign, 16> RVLocs;
890
CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
891
*DAG.getContext());
892
CCInfo.AnalyzeCallResult(Ins, RetCC_M68k);
893
894
// Copy all of the result registers out of their specified physreg.
895
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
896
CCValAssign &VA = RVLocs[i];
897
EVT CopyVT = VA.getLocVT();
898
899
/// ??? is this correct?
900
Chain = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), CopyVT, InGlue)
901
.getValue(1);
902
SDValue Val = Chain.getValue(0);
903
904
if (VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1)
905
Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
906
907
InGlue = Chain.getValue(2);
908
InVals.push_back(Val);
909
}
910
911
return Chain;
912
}
913
914
//===----------------------------------------------------------------------===//
915
// Formal Arguments Calling Convention Implementation
916
//===----------------------------------------------------------------------===//
917
918
SDValue M68kTargetLowering::LowerFormalArguments(
919
SDValue Chain, CallingConv::ID CCID, bool IsVarArg,
920
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
921
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
922
MachineFunction &MF = DAG.getMachineFunction();
923
M68kMachineFunctionInfo *MMFI = MF.getInfo<M68kMachineFunctionInfo>();
924
// const TargetFrameLowering &TFL = *Subtarget.getFrameLowering();
925
926
MachineFrameInfo &MFI = MF.getFrameInfo();
927
928
// Assign locations to all of the incoming arguments.
929
SmallVector<CCValAssign, 16> ArgLocs;
930
SmallVector<Type *, 4> ArgTypes;
931
for (const Argument &Arg : MF.getFunction().args())
932
ArgTypes.emplace_back(Arg.getType());
933
M68kCCState CCInfo(ArgTypes, CCID, IsVarArg, MF, ArgLocs, *DAG.getContext());
934
935
CCInfo.AnalyzeFormalArguments(Ins, CC_M68k);
936
937
unsigned LastVal = ~0U;
938
SDValue ArgValue;
939
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
940
CCValAssign &VA = ArgLocs[i];
941
assert(VA.getValNo() != LastVal && "Same value in different locations");
942
(void)LastVal;
943
944
LastVal = VA.getValNo();
945
946
if (VA.isRegLoc()) {
947
EVT RegVT = VA.getLocVT();
948
const TargetRegisterClass *RC;
949
if (RegVT == MVT::i32)
950
RC = &M68k::XR32RegClass;
951
else
952
llvm_unreachable("Unknown argument type!");
953
954
Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
955
ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
956
957
// If this is an 8 or 16-bit value, it is really passed promoted to 32
958
// bits. Insert an assert[sz]ext to capture this, then truncate to the
959
// right size.
960
if (VA.getLocInfo() == CCValAssign::SExt) {
961
ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue,
962
DAG.getValueType(VA.getValVT()));
963
} else if (VA.getLocInfo() == CCValAssign::ZExt) {
964
ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue,
965
DAG.getValueType(VA.getValVT()));
966
} else if (VA.getLocInfo() == CCValAssign::BCvt) {
967
ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
968
}
969
970
if (VA.isExtInLoc()) {
971
ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue);
972
}
973
} else {
974
assert(VA.isMemLoc());
975
ArgValue = LowerMemArgument(Chain, CCID, Ins, DL, DAG, VA, MFI, i);
976
}
977
978
// If value is passed via pointer - do a load.
979
// TODO Make sure this handling on indirect arguments is correct
980
if (VA.getLocInfo() == CCValAssign::Indirect)
981
ArgValue =
982
DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, MachinePointerInfo());
983
984
InVals.push_back(ArgValue);
985
}
986
987
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
988
// Swift calling convention does not require we copy the sret argument
989
// into %D0 for the return. We don't set SRetReturnReg for Swift.
990
if (CCID == CallingConv::Swift)
991
continue;
992
993
// ABI require that for returning structs by value we copy the sret argument
994
// into %D0 for the return. Save the argument into a virtual register so
995
// that we can access it from the return points.
996
if (Ins[i].Flags.isSRet()) {
997
unsigned Reg = MMFI->getSRetReturnReg();
998
if (!Reg) {
999
MVT PtrTy = getPointerTy(DAG.getDataLayout());
1000
Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
1001
MMFI->setSRetReturnReg(Reg);
1002
}
1003
SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[i]);
1004
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
1005
break;
1006
}
1007
}
1008
1009
unsigned StackSize = CCInfo.getStackSize();
1010
// Align stack specially for tail calls.
1011
if (shouldGuaranteeTCO(CCID, MF.getTarget().Options.GuaranteedTailCallOpt))
1012
StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
1013
1014
// If the function takes variable number of arguments, make a frame index for
1015
// the start of the first vararg value... for expansion of llvm.va_start. We
1016
// can skip this if there are no va_start calls.
1017
if (MFI.hasVAStart()) {
1018
MMFI->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
1019
}
1020
1021
if (IsVarArg && MFI.hasMustTailInVarArgFunc()) {
1022
// We forward some GPRs and some vector types.
1023
SmallVector<MVT, 2> RegParmTypes;
1024
MVT IntVT = MVT::i32;
1025
RegParmTypes.push_back(IntVT);
1026
1027
// Compute the set of forwarded registers. The rest are scratch.
1028
// ??? what is this for?
1029
SmallVectorImpl<ForwardedRegister> &Forwards =
1030
MMFI->getForwardedMustTailRegParms();
1031
CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_M68k);
1032
1033
// Copy all forwards from physical to virtual registers.
1034
for (ForwardedRegister &F : Forwards) {
1035
// FIXME Can we use a less constrained schedule?
1036
SDValue RegVal = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT);
1037
F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
1038
Chain = DAG.getCopyToReg(Chain, DL, F.VReg, RegVal);
1039
}
1040
}
1041
1042
// Some CCs need callee pop.
1043
if (M68k::isCalleePop(CCID, IsVarArg,
1044
MF.getTarget().Options.GuaranteedTailCallOpt)) {
1045
MMFI->setBytesToPopOnReturn(StackSize); // Callee pops everything.
1046
} else {
1047
MMFI->setBytesToPopOnReturn(0); // Callee pops nothing.
1048
// If this is an sret function, the return should pop the hidden pointer.
1049
if (!canGuaranteeTCO(CCID) && argsAreStructReturn(Ins) == StackStructReturn)
1050
MMFI->setBytesToPopOnReturn(4);
1051
}
1052
1053
MMFI->setArgumentStackSize(StackSize);
1054
1055
return Chain;
1056
}
1057
1058
//===----------------------------------------------------------------------===//
1059
// Return Value Calling Convention Implementation
1060
//===----------------------------------------------------------------------===//
1061
1062
bool M68kTargetLowering::CanLowerReturn(
1063
CallingConv::ID CCID, MachineFunction &MF, bool IsVarArg,
1064
const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
1065
SmallVector<CCValAssign, 16> RVLocs;
1066
CCState CCInfo(CCID, IsVarArg, MF, RVLocs, Context);
1067
return CCInfo.CheckReturn(Outs, RetCC_M68k);
1068
}
1069
1070
SDValue
1071
M68kTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CCID,
1072
bool IsVarArg,
1073
const SmallVectorImpl<ISD::OutputArg> &Outs,
1074
const SmallVectorImpl<SDValue> &OutVals,
1075
const SDLoc &DL, SelectionDAG &DAG) const {
1076
MachineFunction &MF = DAG.getMachineFunction();
1077
M68kMachineFunctionInfo *MFI = MF.getInfo<M68kMachineFunctionInfo>();
1078
1079
SmallVector<CCValAssign, 16> RVLocs;
1080
CCState CCInfo(CCID, IsVarArg, MF, RVLocs, *DAG.getContext());
1081
CCInfo.AnalyzeReturn(Outs, RetCC_M68k);
1082
1083
SDValue Glue;
1084
SmallVector<SDValue, 6> RetOps;
1085
// Operand #0 = Chain (updated below)
1086
RetOps.push_back(Chain);
1087
// Operand #1 = Bytes To Pop
1088
RetOps.push_back(
1089
DAG.getTargetConstant(MFI->getBytesToPopOnReturn(), DL, MVT::i32));
1090
1091
// Copy the result values into the output registers.
1092
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1093
CCValAssign &VA = RVLocs[i];
1094
assert(VA.isRegLoc() && "Can only return in registers!");
1095
SDValue ValToCopy = OutVals[i];
1096
EVT ValVT = ValToCopy.getValueType();
1097
1098
// Promote values to the appropriate types.
1099
if (VA.getLocInfo() == CCValAssign::SExt)
1100
ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), ValToCopy);
1101
else if (VA.getLocInfo() == CCValAssign::ZExt)
1102
ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), ValToCopy);
1103
else if (VA.getLocInfo() == CCValAssign::AExt) {
1104
if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
1105
ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), ValToCopy);
1106
else
1107
ValToCopy = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), ValToCopy);
1108
} else if (VA.getLocInfo() == CCValAssign::BCvt)
1109
ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
1110
1111
Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), ValToCopy, Glue);
1112
Glue = Chain.getValue(1);
1113
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1114
}
1115
1116
// Swift calling convention does not require we copy the sret argument
1117
// into %d0 for the return, and SRetReturnReg is not set for Swift.
1118
1119
// ABI require that for returning structs by value we copy the sret argument
1120
// into %D0 for the return. Save the argument into a virtual register so that
1121
// we can access it from the return points.
1122
//
1123
// Checking Function.hasStructRetAttr() here is insufficient because the IR
1124
// may not have an explicit sret argument. If MFI.CanLowerReturn is
1125
// false, then an sret argument may be implicitly inserted in the SelDAG. In
1126
// either case MFI->setSRetReturnReg() will have been called.
1127
if (unsigned SRetReg = MFI->getSRetReturnReg()) {
1128
// ??? Can i just move this to the top and escape this explanation?
1129
// When we have both sret and another return value, we should use the
1130
// original Chain stored in RetOps[0], instead of the current Chain updated
1131
// in the above loop. If we only have sret, RetOps[0] equals to Chain.
1132
1133
// For the case of sret and another return value, we have
1134
// Chain_0 at the function entry
1135
// Chain_1 = getCopyToReg(Chain_0) in the above loop
1136
// If we use Chain_1 in getCopyFromReg, we will have
1137
// Val = getCopyFromReg(Chain_1)
1138
// Chain_2 = getCopyToReg(Chain_1, Val) from below
1139
1140
// getCopyToReg(Chain_0) will be glued together with
1141
// getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
1142
// in Unit B, and we will have cyclic dependency between Unit A and Unit B:
1143
// Data dependency from Unit B to Unit A due to usage of Val in
1144
// getCopyToReg(Chain_1, Val)
1145
// Chain dependency from Unit A to Unit B
1146
1147
// So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
1148
SDValue Val = DAG.getCopyFromReg(RetOps[0], DL, SRetReg,
1149
getPointerTy(MF.getDataLayout()));
1150
1151
// ??? How will this work if CC does not use registers for args passing?
1152
// ??? What if I return multiple structs?
1153
unsigned RetValReg = M68k::D0;
1154
Chain = DAG.getCopyToReg(Chain, DL, RetValReg, Val, Glue);
1155
Glue = Chain.getValue(1);
1156
1157
RetOps.push_back(
1158
DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
1159
}
1160
1161
RetOps[0] = Chain; // Update chain.
1162
1163
// Add the glue if we have it.
1164
if (Glue.getNode())
1165
RetOps.push_back(Glue);
1166
1167
return DAG.getNode(M68kISD::RET, DL, MVT::Other, RetOps);
1168
}
1169
1170
//===----------------------------------------------------------------------===//
1171
// Fast Calling Convention (tail call) implementation
1172
//===----------------------------------------------------------------------===//
1173
1174
// Like std call, callee cleans arguments, convention except that ECX is
1175
// reserved for storing the tail called function address. Only 2 registers are
1176
// free for argument passing (inreg). Tail call optimization is performed
1177
// provided:
1178
// * tailcallopt is enabled
1179
// * caller/callee are fastcc
1180
// On M68k_64 architecture with GOT-style position independent code only
1181
// local (within module) calls are supported at the moment. To keep the stack
1182
// aligned according to platform abi the function GetAlignedArgumentStackSize
1183
// ensures that argument delta is always multiples of stack alignment. (Dynamic
1184
// linkers need this - darwin's dyld for example) If a tail called function
1185
// callee has more arguments than the caller the caller needs to make sure that
1186
// there is room to move the RETADDR to. This is achieved by reserving an area
1187
// the size of the argument delta right after the original RETADDR, but before
1188
// the saved framepointer or the spilled registers e.g. caller(arg1, arg2)
1189
// calls callee(arg1, arg2,arg3,arg4) stack layout:
1190
// arg1
1191
// arg2
1192
// RETADDR
1193
// [ new RETADDR
1194
// move area ]
1195
// (possible EBP)
1196
// ESI
1197
// EDI
1198
// local1 ..
1199
1200
/// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
1201
/// requirement.
1202
unsigned
1203
M68kTargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
1204
SelectionDAG &DAG) const {
1205
const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
1206
unsigned StackAlignment = TFI.getStackAlignment();
1207
uint64_t AlignMask = StackAlignment - 1;
1208
int64_t Offset = StackSize;
1209
unsigned SlotSize = Subtarget.getSlotSize();
1210
if ((Offset & AlignMask) <= (StackAlignment - SlotSize)) {
1211
// Number smaller than 12 so just add the difference.
1212
Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
1213
} else {
1214
// Mask out lower bits, add stackalignment once plus the 12 bytes.
1215
Offset =
1216
((~AlignMask) & Offset) + StackAlignment + (StackAlignment - SlotSize);
1217
}
1218
return Offset;
1219
}
1220
1221
/// Check whether the call is eligible for tail call optimization. Targets
1222
/// that want to do tail call optimization should implement this function.
1223
bool M68kTargetLowering::IsEligibleForTailCallOptimization(
1224
SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
1225
bool IsCalleeStructRet, bool IsCallerStructRet, Type *RetTy,
1226
const SmallVectorImpl<ISD::OutputArg> &Outs,
1227
const SmallVectorImpl<SDValue> &OutVals,
1228
const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
1229
if (!mayTailCallThisCC(CalleeCC))
1230
return false;
1231
1232
// If -tailcallopt is specified, make fastcc functions tail-callable.
1233
MachineFunction &MF = DAG.getMachineFunction();
1234
const auto &CallerF = MF.getFunction();
1235
1236
CallingConv::ID CallerCC = CallerF.getCallingConv();
1237
bool CCMatch = CallerCC == CalleeCC;
1238
1239
if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
1240
if (canGuaranteeTCO(CalleeCC) && CCMatch)
1241
return true;
1242
return false;
1243
}
1244
1245
// Look for obvious safe cases to perform tail call optimization that do not
1246
// require ABI changes. This is what gcc calls sibcall.
1247
1248
// Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
1249
// emit a special epilogue.
1250
const M68kRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
1251
if (RegInfo->hasStackRealignment(MF))
1252
return false;
1253
1254
// Also avoid sibcall optimization if either caller or callee uses struct
1255
// return semantics.
1256
if (IsCalleeStructRet || IsCallerStructRet)
1257
return false;
1258
1259
// Do not sibcall optimize vararg calls unless all arguments are passed via
1260
// registers.
1261
LLVMContext &C = *DAG.getContext();
1262
if (IsVarArg && !Outs.empty()) {
1263
1264
SmallVector<CCValAssign, 16> ArgLocs;
1265
CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, C);
1266
1267
CCInfo.AnalyzeCallOperands(Outs, CC_M68k);
1268
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
1269
if (!ArgLocs[i].isRegLoc())
1270
return false;
1271
}
1272
1273
// Check that the call results are passed in the same way.
1274
if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, RetCC_M68k,
1275
RetCC_M68k))
1276
return false;
1277
1278
// The callee has to preserve all registers the caller needs to preserve.
1279
const M68kRegisterInfo *TRI = Subtarget.getRegisterInfo();
1280
const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
1281
if (!CCMatch) {
1282
const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
1283
if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
1284
return false;
1285
}
1286
1287
unsigned StackArgsSize = 0;
1288
1289
// If the callee takes no arguments then go on to check the results of the
1290
// call.
1291
if (!Outs.empty()) {
1292
// Check if stack adjustment is needed. For now, do not do this if any
1293
// argument is passed on the stack.
1294
SmallVector<CCValAssign, 16> ArgLocs;
1295
CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, C);
1296
1297
CCInfo.AnalyzeCallOperands(Outs, CC_M68k);
1298
StackArgsSize = CCInfo.getStackSize();
1299
1300
if (StackArgsSize) {
1301
// Check if the arguments are already laid out in the right way as
1302
// the caller's fixed stack objects.
1303
MachineFrameInfo &MFI = MF.getFrameInfo();
1304
const MachineRegisterInfo *MRI = &MF.getRegInfo();
1305
const M68kInstrInfo *TII = Subtarget.getInstrInfo();
1306
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1307
CCValAssign &VA = ArgLocs[i];
1308
SDValue Arg = OutVals[i];
1309
ISD::ArgFlagsTy Flags = Outs[i].Flags;
1310
if (VA.getLocInfo() == CCValAssign::Indirect)
1311
return false;
1312
if (!VA.isRegLoc()) {
1313
if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, MFI, MRI,
1314
TII, VA))
1315
return false;
1316
}
1317
}
1318
}
1319
1320
bool PositionIndependent = isPositionIndependent();
1321
// If the tailcall address may be in a register, then make sure it's
1322
// possible to register allocate for it. The call address can
1323
// only target %A0 or %A1 since the tail call must be scheduled after
1324
// callee-saved registers are restored. These happen to be the same
1325
// registers used to pass 'inreg' arguments so watch out for those.
1326
if ((!isa<GlobalAddressSDNode>(Callee) &&
1327
!isa<ExternalSymbolSDNode>(Callee)) ||
1328
PositionIndependent) {
1329
unsigned NumInRegs = 0;
1330
// In PIC we need an extra register to formulate the address computation
1331
// for the callee.
1332
unsigned MaxInRegs = PositionIndependent ? 1 : 2;
1333
1334
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1335
CCValAssign &VA = ArgLocs[i];
1336
if (!VA.isRegLoc())
1337
continue;
1338
Register Reg = VA.getLocReg();
1339
switch (Reg) {
1340
default:
1341
break;
1342
case M68k::A0:
1343
case M68k::A1:
1344
if (++NumInRegs == MaxInRegs)
1345
return false;
1346
break;
1347
}
1348
}
1349
}
1350
1351
const MachineRegisterInfo &MRI = MF.getRegInfo();
1352
if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
1353
return false;
1354
}
1355
1356
bool CalleeWillPop = M68k::isCalleePop(
1357
CalleeCC, IsVarArg, MF.getTarget().Options.GuaranteedTailCallOpt);
1358
1359
if (unsigned BytesToPop =
1360
MF.getInfo<M68kMachineFunctionInfo>()->getBytesToPopOnReturn()) {
1361
// If we have bytes to pop, the callee must pop them.
1362
bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
1363
if (!CalleePopMatches)
1364
return false;
1365
} else if (CalleeWillPop && StackArgsSize > 0) {
1366
// If we don't have bytes to pop, make sure the callee doesn't pop any.
1367
return false;
1368
}
1369
1370
return true;
1371
}
1372
1373
//===----------------------------------------------------------------------===//
1374
// Custom Lower
1375
//===----------------------------------------------------------------------===//
1376
1377
SDValue M68kTargetLowering::LowerOperation(SDValue Op,
1378
SelectionDAG &DAG) const {
1379
switch (Op.getOpcode()) {
1380
default:
1381
llvm_unreachable("Should not custom lower this!");
1382
case ISD::SADDO:
1383
case ISD::UADDO:
1384
case ISD::SSUBO:
1385
case ISD::USUBO:
1386
case ISD::SMULO:
1387
case ISD::UMULO:
1388
return LowerXALUO(Op, DAG);
1389
case ISD::SETCC:
1390
return LowerSETCC(Op, DAG);
1391
case ISD::SETCCCARRY:
1392
return LowerSETCCCARRY(Op, DAG);
1393
case ISD::SELECT:
1394
return LowerSELECT(Op, DAG);
1395
case ISD::BRCOND:
1396
return LowerBRCOND(Op, DAG);
1397
case ISD::ADDC:
1398
case ISD::ADDE:
1399
case ISD::SUBC:
1400
case ISD::SUBE:
1401
return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
1402
case ISD::ConstantPool:
1403
return LowerConstantPool(Op, DAG);
1404
case ISD::GlobalAddress:
1405
return LowerGlobalAddress(Op, DAG);
1406
case ISD::ExternalSymbol:
1407
return LowerExternalSymbol(Op, DAG);
1408
case ISD::BlockAddress:
1409
return LowerBlockAddress(Op, DAG);
1410
case ISD::JumpTable:
1411
return LowerJumpTable(Op, DAG);
1412
case ISD::VASTART:
1413
return LowerVASTART(Op, DAG);
1414
case ISD::DYNAMIC_STACKALLOC:
1415
return LowerDYNAMIC_STACKALLOC(Op, DAG);
1416
case ISD::SHL_PARTS:
1417
return LowerShiftLeftParts(Op, DAG);
1418
case ISD::SRA_PARTS:
1419
return LowerShiftRightParts(Op, DAG, true);
1420
case ISD::SRL_PARTS:
1421
return LowerShiftRightParts(Op, DAG, false);
1422
case ISD::ATOMIC_FENCE:
1423
return LowerATOMICFENCE(Op, DAG);
1424
case ISD::GlobalTLSAddress:
1425
return LowerGlobalTLSAddress(Op, DAG);
1426
}
1427
}
1428
1429
SDValue M68kTargetLowering::LowerExternalSymbolCall(SelectionDAG &DAG,
1430
SDLoc Loc,
1431
llvm::StringRef SymbolName,
1432
ArgListTy &&ArgList) const {
1433
PointerType *PtrTy = PointerType::get(*DAG.getContext(), 0);
1434
CallLoweringInfo CLI(DAG);
1435
CLI.setDebugLoc(Loc)
1436
.setChain(DAG.getEntryNode())
1437
.setLibCallee(CallingConv::C, PtrTy,
1438
DAG.getExternalSymbol(SymbolName.data(),
1439
getPointerMemTy(DAG.getDataLayout())),
1440
std::move(ArgList));
1441
return LowerCallTo(CLI).first;
1442
}
1443
1444
SDValue M68kTargetLowering::getTLSGetAddr(GlobalAddressSDNode *GA,
1445
SelectionDAG &DAG,
1446
unsigned TargetFlags) const {
1447
SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
1448
SDValue TGA = DAG.getTargetGlobalAddress(
1449
GA->getGlobal(), GA, GA->getValueType(0), GA->getOffset(), TargetFlags);
1450
SDValue Arg = DAG.getNode(ISD::ADD, SDLoc(GA), MVT::i32, GOT, TGA);
1451
1452
PointerType *PtrTy = PointerType::get(*DAG.getContext(), 0);
1453
1454
ArgListTy Args;
1455
ArgListEntry Entry;
1456
Entry.Node = Arg;
1457
Entry.Ty = PtrTy;
1458
Args.push_back(Entry);
1459
return LowerExternalSymbolCall(DAG, SDLoc(GA), "__tls_get_addr",
1460
std::move(Args));
1461
}
1462
1463
SDValue M68kTargetLowering::getM68kReadTp(SDLoc Loc, SelectionDAG &DAG) const {
1464
return LowerExternalSymbolCall(DAG, Loc, "__m68k_read_tp", ArgListTy());
1465
}
1466
1467
SDValue M68kTargetLowering::LowerTLSGeneralDynamic(GlobalAddressSDNode *GA,
1468
SelectionDAG &DAG) const {
1469
return getTLSGetAddr(GA, DAG, M68kII::MO_TLSGD);
1470
}
1471
1472
SDValue M68kTargetLowering::LowerTLSLocalDynamic(GlobalAddressSDNode *GA,
1473
SelectionDAG &DAG) const {
1474
SDValue Addr = getTLSGetAddr(GA, DAG, M68kII::MO_TLSLDM);
1475
SDValue TGA =
1476
DAG.getTargetGlobalAddress(GA->getGlobal(), GA, GA->getValueType(0),
1477
GA->getOffset(), M68kII::MO_TLSLD);
1478
return DAG.getNode(ISD::ADD, SDLoc(GA), MVT::i32, TGA, Addr);
1479
}
1480
1481
SDValue M68kTargetLowering::LowerTLSInitialExec(GlobalAddressSDNode *GA,
1482
SelectionDAG &DAG) const {
1483
SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
1484
SDValue Tp = getM68kReadTp(SDLoc(GA), DAG);
1485
SDValue TGA =
1486
DAG.getTargetGlobalAddress(GA->getGlobal(), GA, GA->getValueType(0),
1487
GA->getOffset(), M68kII::MO_TLSIE);
1488
SDValue Addr = DAG.getNode(ISD::ADD, SDLoc(GA), MVT::i32, TGA, GOT);
1489
SDValue Offset =
1490
DAG.getLoad(MVT::i32, SDLoc(GA), DAG.getEntryNode(), Addr,
1491
MachinePointerInfo::getGOT(DAG.getMachineFunction()));
1492
1493
return DAG.getNode(ISD::ADD, SDLoc(GA), MVT::i32, Offset, Tp);
1494
}
1495
1496
SDValue M68kTargetLowering::LowerTLSLocalExec(GlobalAddressSDNode *GA,
1497
SelectionDAG &DAG) const {
1498
SDValue Tp = getM68kReadTp(SDLoc(GA), DAG);
1499
SDValue TGA =
1500
DAG.getTargetGlobalAddress(GA->getGlobal(), GA, GA->getValueType(0),
1501
GA->getOffset(), M68kII::MO_TLSLE);
1502
return DAG.getNode(ISD::ADD, SDLoc(GA), MVT::i32, TGA, Tp);
1503
}
1504
1505
SDValue M68kTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1506
SelectionDAG &DAG) const {
1507
assert(Subtarget.isTargetELF());
1508
1509
auto *GA = cast<GlobalAddressSDNode>(Op);
1510
TLSModel::Model AccessModel = DAG.getTarget().getTLSModel(GA->getGlobal());
1511
1512
switch (AccessModel) {
1513
case TLSModel::GeneralDynamic:
1514
return LowerTLSGeneralDynamic(GA, DAG);
1515
case TLSModel::LocalDynamic:
1516
return LowerTLSLocalDynamic(GA, DAG);
1517
case TLSModel::InitialExec:
1518
return LowerTLSInitialExec(GA, DAG);
1519
case TLSModel::LocalExec:
1520
return LowerTLSLocalExec(GA, DAG);
1521
}
1522
1523
llvm_unreachable("Unexpected TLS access model type");
1524
}
1525
1526
bool M68kTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
1527
SDValue C) const {
1528
// Shifts and add instructions in M68000 and M68010 support
1529
// up to 32 bits, but mul only has 16-bit variant. So it's almost
1530
// certainly beneficial to lower 8/16/32-bit mul to their
1531
// add / shifts counterparts. But for 64-bits mul, it might be
1532
// safer to just leave it to compiler runtime implementations.
1533
return VT.bitsLE(MVT::i32) || Subtarget.atLeastM68020();
1534
}
1535
1536
static bool isOverflowArithmetic(unsigned Opcode) {
1537
switch (Opcode) {
1538
case ISD::UADDO:
1539
case ISD::SADDO:
1540
case ISD::USUBO:
1541
case ISD::SSUBO:
1542
case ISD::UMULO:
1543
case ISD::SMULO:
1544
return true;
1545
default:
1546
return false;
1547
}
1548
}
1549
1550
static void lowerOverflowArithmetic(SDValue Op, SelectionDAG &DAG,
1551
SDValue &Result, SDValue &CCR,
1552
unsigned &CC) {
1553
SDNode *N = Op.getNode();
1554
EVT VT = N->getValueType(0);
1555
SDValue LHS = N->getOperand(0);
1556
SDValue RHS = N->getOperand(1);
1557
SDLoc DL(Op);
1558
1559
unsigned TruncOp = 0;
1560
auto PromoteMULO = [&](unsigned ExtOp) {
1561
// We don't have 8-bit multiplications, so promote i8 version of U/SMULO
1562
// to i16.
1563
// Ideally this should be done by legalizer but sadly there is no promotion
1564
// rule for U/SMULO at this moment.
1565
if (VT == MVT::i8) {
1566
LHS = DAG.getNode(ExtOp, DL, MVT::i16, LHS);
1567
RHS = DAG.getNode(ExtOp, DL, MVT::i16, RHS);
1568
VT = MVT::i16;
1569
TruncOp = ISD::TRUNCATE;
1570
}
1571
};
1572
1573
bool NoOverflow = false;
1574
unsigned BaseOp = 0;
1575
switch (Op.getOpcode()) {
1576
default:
1577
llvm_unreachable("Unknown ovf instruction!");
1578
case ISD::SADDO:
1579
BaseOp = M68kISD::ADD;
1580
CC = M68k::COND_VS;
1581
break;
1582
case ISD::UADDO:
1583
BaseOp = M68kISD::ADD;
1584
CC = M68k::COND_CS;
1585
break;
1586
case ISD::SSUBO:
1587
BaseOp = M68kISD::SUB;
1588
CC = M68k::COND_VS;
1589
break;
1590
case ISD::USUBO:
1591
BaseOp = M68kISD::SUB;
1592
CC = M68k::COND_CS;
1593
break;
1594
case ISD::UMULO:
1595
PromoteMULO(ISD::ZERO_EXTEND);
1596
NoOverflow = VT != MVT::i32;
1597
BaseOp = NoOverflow ? ISD::MUL : M68kISD::UMUL;
1598
CC = M68k::COND_VS;
1599
break;
1600
case ISD::SMULO:
1601
PromoteMULO(ISD::SIGN_EXTEND);
1602
NoOverflow = VT != MVT::i32;
1603
BaseOp = NoOverflow ? ISD::MUL : M68kISD::SMUL;
1604
CC = M68k::COND_VS;
1605
break;
1606
}
1607
1608
SDVTList VTs;
1609
if (NoOverflow)
1610
VTs = DAG.getVTList(VT);
1611
else
1612
// Also sets CCR.
1613
VTs = DAG.getVTList(VT, MVT::i8);
1614
1615
SDValue Arith = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
1616
Result = Arith.getValue(0);
1617
if (TruncOp)
1618
// Right now the only place to truncate is from i16 to i8.
1619
Result = DAG.getNode(TruncOp, DL, MVT::i8, Arith);
1620
1621
if (NoOverflow)
1622
CCR = DAG.getConstant(0, DL, N->getValueType(1));
1623
else
1624
CCR = Arith.getValue(1);
1625
}
1626
1627
SDValue M68kTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
1628
SDNode *N = Op.getNode();
1629
SDLoc DL(Op);
1630
1631
// Lower the "add/sub/mul with overflow" instruction into a regular ins plus
1632
// a "setcc" instruction that checks the overflow flag.
1633
SDValue Result, CCR;
1634
unsigned CC;
1635
lowerOverflowArithmetic(Op, DAG, Result, CCR, CC);
1636
1637
SDValue Overflow;
1638
if (isa<ConstantSDNode>(CCR)) {
1639
// It's likely a result of operations that will not overflow
1640
// hence no setcc is needed.
1641
Overflow = CCR;
1642
} else {
1643
// Generate a M68kISD::SETCC.
1644
Overflow = DAG.getNode(M68kISD::SETCC, DL, N->getValueType(1),
1645
DAG.getConstant(CC, DL, MVT::i8), CCR);
1646
}
1647
1648
return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, Overflow);
1649
}
1650
1651
/// Create a BTST (Bit Test) node - Test bit \p BitNo in \p Src and set
1652
/// condition according to equal/not-equal condition code \p CC.
1653
static SDValue getBitTestCondition(SDValue Src, SDValue BitNo, ISD::CondCode CC,
1654
const SDLoc &DL, SelectionDAG &DAG) {
1655
// If Src is i8, promote it to i32 with any_extend. There is no i8 BTST
1656
// instruction. Since the shift amount is in-range-or-undefined, we know
1657
// that doing a bittest on the i32 value is ok.
1658
if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16)
1659
Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Src);
1660
1661
// If the operand types disagree, extend the shift amount to match. Since
1662
// BTST ignores high bits (like shifts) we can use anyextend.
1663
if (Src.getValueType() != BitNo.getValueType())
1664
BitNo = DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(), BitNo);
1665
1666
SDValue BTST = DAG.getNode(M68kISD::BTST, DL, MVT::i32, Src, BitNo);
1667
1668
// NOTE BTST sets CCR.Z flag
1669
M68k::CondCode Cond = CC == ISD::SETEQ ? M68k::COND_NE : M68k::COND_EQ;
1670
return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
1671
DAG.getConstant(Cond, DL, MVT::i8), BTST);
1672
}
1673
1674
/// Result of 'and' is compared against zero. Change to a BTST node if possible.
1675
static SDValue LowerAndToBTST(SDValue And, ISD::CondCode CC, const SDLoc &DL,
1676
SelectionDAG &DAG) {
1677
SDValue Op0 = And.getOperand(0);
1678
SDValue Op1 = And.getOperand(1);
1679
if (Op0.getOpcode() == ISD::TRUNCATE)
1680
Op0 = Op0.getOperand(0);
1681
if (Op1.getOpcode() == ISD::TRUNCATE)
1682
Op1 = Op1.getOperand(0);
1683
1684
SDValue LHS, RHS;
1685
if (Op1.getOpcode() == ISD::SHL)
1686
std::swap(Op0, Op1);
1687
if (Op0.getOpcode() == ISD::SHL) {
1688
if (isOneConstant(Op0.getOperand(0))) {
1689
// If we looked past a truncate, check that it's only truncating away
1690
// known zeros.
1691
unsigned BitWidth = Op0.getValueSizeInBits();
1692
unsigned AndBitWidth = And.getValueSizeInBits();
1693
if (BitWidth > AndBitWidth) {
1694
auto Known = DAG.computeKnownBits(Op0);
1695
if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
1696
return SDValue();
1697
}
1698
LHS = Op1;
1699
RHS = Op0.getOperand(1);
1700
}
1701
} else if (auto *AndRHS = dyn_cast<ConstantSDNode>(Op1)) {
1702
uint64_t AndRHSVal = AndRHS->getZExtValue();
1703
SDValue AndLHS = Op0;
1704
1705
if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
1706
LHS = AndLHS.getOperand(0);
1707
RHS = AndLHS.getOperand(1);
1708
}
1709
1710
// Use BTST if the immediate can't be encoded in a TEST instruction.
1711
if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
1712
LHS = AndLHS;
1713
RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), DL, LHS.getValueType());
1714
}
1715
}
1716
1717
if (LHS.getNode())
1718
return getBitTestCondition(LHS, RHS, CC, DL, DAG);
1719
1720
return SDValue();
1721
}
1722
1723
static M68k::CondCode TranslateIntegerM68kCC(ISD::CondCode SetCCOpcode) {
1724
switch (SetCCOpcode) {
1725
default:
1726
llvm_unreachable("Invalid integer condition!");
1727
case ISD::SETEQ:
1728
return M68k::COND_EQ;
1729
case ISD::SETGT:
1730
return M68k::COND_GT;
1731
case ISD::SETGE:
1732
return M68k::COND_GE;
1733
case ISD::SETLT:
1734
return M68k::COND_LT;
1735
case ISD::SETLE:
1736
return M68k::COND_LE;
1737
case ISD::SETNE:
1738
return M68k::COND_NE;
1739
case ISD::SETULT:
1740
return M68k::COND_CS;
1741
case ISD::SETUGE:
1742
return M68k::COND_CC;
1743
case ISD::SETUGT:
1744
return M68k::COND_HI;
1745
case ISD::SETULE:
1746
return M68k::COND_LS;
1747
}
1748
}
1749
1750
/// Do a one-to-one translation of a ISD::CondCode to the M68k-specific
1751
/// condition code, returning the condition code and the LHS/RHS of the
1752
/// comparison to make.
1753
static unsigned TranslateM68kCC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
1754
bool IsFP, SDValue &LHS, SDValue &RHS,
1755
SelectionDAG &DAG) {
1756
if (!IsFP) {
1757
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
1758
if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnes()) {
1759
// X > -1 -> X == 0, jump !sign.
1760
RHS = DAG.getConstant(0, DL, RHS.getValueType());
1761
return M68k::COND_PL;
1762
}
1763
if (SetCCOpcode == ISD::SETLT && RHSC->isZero()) {
1764
// X < 0 -> X == 0, jump on sign.
1765
return M68k::COND_MI;
1766
}
1767
if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
1768
// X < 1 -> X <= 0
1769
RHS = DAG.getConstant(0, DL, RHS.getValueType());
1770
return M68k::COND_LE;
1771
}
1772
}
1773
1774
return TranslateIntegerM68kCC(SetCCOpcode);
1775
}
1776
1777
// First determine if it is required or is profitable to flip the operands.
1778
1779
// If LHS is a foldable load, but RHS is not, flip the condition.
1780
if (ISD::isNON_EXTLoad(LHS.getNode()) && !ISD::isNON_EXTLoad(RHS.getNode())) {
1781
SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
1782
std::swap(LHS, RHS);
1783
}
1784
1785
switch (SetCCOpcode) {
1786
default:
1787
break;
1788
case ISD::SETOLT:
1789
case ISD::SETOLE:
1790
case ISD::SETUGT:
1791
case ISD::SETUGE:
1792
std::swap(LHS, RHS);
1793
break;
1794
}
1795
1796
// On a floating point condition, the flags are set as follows:
1797
// ZF PF CF op
1798
// 0 | 0 | 0 | X > Y
1799
// 0 | 0 | 1 | X < Y
1800
// 1 | 0 | 0 | X == Y
1801
// 1 | 1 | 1 | unordered
1802
switch (SetCCOpcode) {
1803
default:
1804
llvm_unreachable("Condcode should be pre-legalized away");
1805
case ISD::SETUEQ:
1806
case ISD::SETEQ:
1807
return M68k::COND_EQ;
1808
case ISD::SETOLT: // flipped
1809
case ISD::SETOGT:
1810
case ISD::SETGT:
1811
return M68k::COND_HI;
1812
case ISD::SETOLE: // flipped
1813
case ISD::SETOGE:
1814
case ISD::SETGE:
1815
return M68k::COND_CC;
1816
case ISD::SETUGT: // flipped
1817
case ISD::SETULT:
1818
case ISD::SETLT:
1819
return M68k::COND_CS;
1820
case ISD::SETUGE: // flipped
1821
case ISD::SETULE:
1822
case ISD::SETLE:
1823
return M68k::COND_LS;
1824
case ISD::SETONE:
1825
case ISD::SETNE:
1826
return M68k::COND_NE;
1827
case ISD::SETOEQ:
1828
case ISD::SETUNE:
1829
return M68k::COND_INVALID;
1830
}
1831
}
1832
1833
// Convert (truncate (srl X, N) to i1) to (bt X, N)
1834
static SDValue LowerTruncateToBTST(SDValue Op, ISD::CondCode CC,
1835
const SDLoc &DL, SelectionDAG &DAG) {
1836
1837
assert(Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1 &&
1838
"Expected TRUNCATE to i1 node");
1839
1840
if (Op.getOperand(0).getOpcode() != ISD::SRL)
1841
return SDValue();
1842
1843
SDValue ShiftRight = Op.getOperand(0);
1844
return getBitTestCondition(ShiftRight.getOperand(0), ShiftRight.getOperand(1),
1845
CC, DL, DAG);
1846
}
1847
1848
/// \brief return true if \c Op has a use that doesn't just read flags.
1849
static bool hasNonFlagsUse(SDValue Op) {
1850
for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
1851
++UI) {
1852
SDNode *User = *UI;
1853
unsigned UOpNo = UI.getOperandNo();
1854
if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
1855
// Look pass truncate.
1856
UOpNo = User->use_begin().getOperandNo();
1857
User = *User->use_begin();
1858
}
1859
1860
if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
1861
!(User->getOpcode() == ISD::SELECT && UOpNo == 0))
1862
return true;
1863
}
1864
return false;
1865
}
1866
1867
SDValue M68kTargetLowering::EmitTest(SDValue Op, unsigned M68kCC,
1868
const SDLoc &DL, SelectionDAG &DAG) const {
1869
1870
// CF and OF aren't always set the way we want. Determine which
1871
// of these we need.
1872
bool NeedCF = false;
1873
bool NeedOF = false;
1874
switch (M68kCC) {
1875
default:
1876
break;
1877
case M68k::COND_HI:
1878
case M68k::COND_CC:
1879
case M68k::COND_CS:
1880
case M68k::COND_LS:
1881
NeedCF = true;
1882
break;
1883
case M68k::COND_GT:
1884
case M68k::COND_GE:
1885
case M68k::COND_LT:
1886
case M68k::COND_LE:
1887
case M68k::COND_VS:
1888
case M68k::COND_VC: {
1889
// Check if we really need to set the
1890
// Overflow flag. If NoSignedWrap is present
1891
// that is not actually needed.
1892
switch (Op->getOpcode()) {
1893
case ISD::ADD:
1894
case ISD::SUB:
1895
case ISD::MUL:
1896
case ISD::SHL: {
1897
if (Op.getNode()->getFlags().hasNoSignedWrap())
1898
break;
1899
[[fallthrough]];
1900
}
1901
default:
1902
NeedOF = true;
1903
break;
1904
}
1905
break;
1906
}
1907
}
1908
// See if we can use the CCR value from the operand instead of
1909
// doing a separate TEST. TEST always sets OF and CF to 0, so unless
1910
// we prove that the arithmetic won't overflow, we can't use OF or CF.
1911
if (Op.getResNo() != 0 || NeedOF || NeedCF) {
1912
// Emit a CMP with 0, which is the TEST pattern.
1913
return DAG.getNode(M68kISD::CMP, DL, MVT::i8,
1914
DAG.getConstant(0, DL, Op.getValueType()), Op);
1915
}
1916
unsigned Opcode = 0;
1917
unsigned NumOperands = 0;
1918
1919
// Truncate operations may prevent the merge of the SETCC instruction
1920
// and the arithmetic instruction before it. Attempt to truncate the operands
1921
// of the arithmetic instruction and use a reduced bit-width instruction.
1922
bool NeedTruncation = false;
1923
SDValue ArithOp = Op;
1924
if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
1925
SDValue Arith = Op->getOperand(0);
1926
// Both the trunc and the arithmetic op need to have one user each.
1927
if (Arith->hasOneUse())
1928
switch (Arith.getOpcode()) {
1929
default:
1930
break;
1931
case ISD::ADD:
1932
case ISD::SUB:
1933
case ISD::AND:
1934
case ISD::OR:
1935
case ISD::XOR: {
1936
NeedTruncation = true;
1937
ArithOp = Arith;
1938
}
1939
}
1940
}
1941
1942
// NOTICE: In the code below we use ArithOp to hold the arithmetic operation
1943
// which may be the result of a CAST. We use the variable 'Op', which is the
1944
// non-casted variable when we check for possible users.
1945
switch (ArithOp.getOpcode()) {
1946
case ISD::ADD:
1947
Opcode = M68kISD::ADD;
1948
NumOperands = 2;
1949
break;
1950
case ISD::SHL:
1951
case ISD::SRL:
1952
// If we have a constant logical shift that's only used in a comparison
1953
// against zero turn it into an equivalent AND. This allows turning it into
1954
// a TEST instruction later.
1955
if ((M68kCC == M68k::COND_EQ || M68kCC == M68k::COND_NE) &&
1956
Op->hasOneUse() && isa<ConstantSDNode>(Op->getOperand(1)) &&
1957
!hasNonFlagsUse(Op)) {
1958
EVT VT = Op.getValueType();
1959
unsigned BitWidth = VT.getSizeInBits();
1960
unsigned ShAmt = Op->getConstantOperandVal(1);
1961
if (ShAmt >= BitWidth) // Avoid undefined shifts.
1962
break;
1963
APInt Mask = ArithOp.getOpcode() == ISD::SRL
1964
? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
1965
: APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
1966
if (!Mask.isSignedIntN(32)) // Avoid large immediates.
1967
break;
1968
Op = DAG.getNode(ISD::AND, DL, VT, Op->getOperand(0),
1969
DAG.getConstant(Mask, DL, VT));
1970
}
1971
break;
1972
1973
case ISD::AND:
1974
// If the primary 'and' result isn't used, don't bother using
1975
// M68kISD::AND, because a TEST instruction will be better.
1976
if (!hasNonFlagsUse(Op)) {
1977
SDValue Op0 = ArithOp->getOperand(0);
1978
SDValue Op1 = ArithOp->getOperand(1);
1979
EVT VT = ArithOp.getValueType();
1980
bool IsAndn = isBitwiseNot(Op0) || isBitwiseNot(Op1);
1981
bool IsLegalAndnType = VT == MVT::i32 || VT == MVT::i64;
1982
1983
// But if we can combine this into an ANDN operation, then create an AND
1984
// now and allow it to be pattern matched into an ANDN.
1985
if (/*!Subtarget.hasBMI() ||*/ !IsAndn || !IsLegalAndnType)
1986
break;
1987
}
1988
[[fallthrough]];
1989
case ISD::SUB:
1990
case ISD::OR:
1991
case ISD::XOR:
1992
// Due to the ISEL shortcoming noted above, be conservative if this op is
1993
// likely to be selected as part of a load-modify-store instruction.
1994
for (const auto *U : Op.getNode()->uses())
1995
if (U->getOpcode() == ISD::STORE)
1996
goto default_case;
1997
1998
// Otherwise use a regular CCR-setting instruction.
1999
switch (ArithOp.getOpcode()) {
2000
default:
2001
llvm_unreachable("unexpected operator!");
2002
case ISD::SUB:
2003
Opcode = M68kISD::SUB;
2004
break;
2005
case ISD::XOR:
2006
Opcode = M68kISD::XOR;
2007
break;
2008
case ISD::AND:
2009
Opcode = M68kISD::AND;
2010
break;
2011
case ISD::OR:
2012
Opcode = M68kISD::OR;
2013
break;
2014
}
2015
2016
NumOperands = 2;
2017
break;
2018
case M68kISD::ADD:
2019
case M68kISD::SUB:
2020
case M68kISD::OR:
2021
case M68kISD::XOR:
2022
case M68kISD::AND:
2023
return SDValue(Op.getNode(), 1);
2024
default:
2025
default_case:
2026
break;
2027
}
2028
2029
// If we found that truncation is beneficial, perform the truncation and
2030
// update 'Op'.
2031
if (NeedTruncation) {
2032
EVT VT = Op.getValueType();
2033
SDValue WideVal = Op->getOperand(0);
2034
EVT WideVT = WideVal.getValueType();
2035
unsigned ConvertedOp = 0;
2036
// Use a target machine opcode to prevent further DAGCombine
2037
// optimizations that may separate the arithmetic operations
2038
// from the setcc node.
2039
switch (WideVal.getOpcode()) {
2040
default:
2041
break;
2042
case ISD::ADD:
2043
ConvertedOp = M68kISD::ADD;
2044
break;
2045
case ISD::SUB:
2046
ConvertedOp = M68kISD::SUB;
2047
break;
2048
case ISD::AND:
2049
ConvertedOp = M68kISD::AND;
2050
break;
2051
case ISD::OR:
2052
ConvertedOp = M68kISD::OR;
2053
break;
2054
case ISD::XOR:
2055
ConvertedOp = M68kISD::XOR;
2056
break;
2057
}
2058
2059
if (ConvertedOp) {
2060
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2061
if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
2062
SDValue V0 = DAG.getNode(ISD::TRUNCATE, DL, VT, WideVal.getOperand(0));
2063
SDValue V1 = DAG.getNode(ISD::TRUNCATE, DL, VT, WideVal.getOperand(1));
2064
Op = DAG.getNode(ConvertedOp, DL, VT, V0, V1);
2065
}
2066
}
2067
}
2068
2069
if (Opcode == 0) {
2070
// Emit a CMP with 0, which is the TEST pattern.
2071
return DAG.getNode(M68kISD::CMP, DL, MVT::i8,
2072
DAG.getConstant(0, DL, Op.getValueType()), Op);
2073
}
2074
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i8);
2075
SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
2076
2077
SDValue New = DAG.getNode(Opcode, DL, VTs, Ops);
2078
DAG.ReplaceAllUsesWith(Op, New);
2079
return SDValue(New.getNode(), 1);
2080
}
2081
2082
/// \brief Return true if the condition is an unsigned comparison operation.
2083
static bool isM68kCCUnsigned(unsigned M68kCC) {
2084
switch (M68kCC) {
2085
default:
2086
llvm_unreachable("Invalid integer condition!");
2087
case M68k::COND_EQ:
2088
case M68k::COND_NE:
2089
case M68k::COND_CS:
2090
case M68k::COND_HI:
2091
case M68k::COND_LS:
2092
case M68k::COND_CC:
2093
return true;
2094
case M68k::COND_GT:
2095
case M68k::COND_GE:
2096
case M68k::COND_LT:
2097
case M68k::COND_LE:
2098
return false;
2099
}
2100
}
2101
2102
SDValue M68kTargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned M68kCC,
2103
const SDLoc &DL, SelectionDAG &DAG) const {
2104
if (isNullConstant(Op1))
2105
return EmitTest(Op0, M68kCC, DL, DAG);
2106
2107
assert(!(isa<ConstantSDNode>(Op1) && Op0.getValueType() == MVT::i1) &&
2108
"Unexpected comparison operation for MVT::i1 operands");
2109
2110
if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
2111
Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
2112
// Only promote the compare up to I32 if it is a 16 bit operation
2113
// with an immediate. 16 bit immediates are to be avoided.
2114
if ((Op0.getValueType() == MVT::i16 &&
2115
(isa<ConstantSDNode>(Op0) || isa<ConstantSDNode>(Op1))) &&
2116
!DAG.getMachineFunction().getFunction().hasMinSize()) {
2117
unsigned ExtendOp =
2118
isM68kCCUnsigned(M68kCC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
2119
Op0 = DAG.getNode(ExtendOp, DL, MVT::i32, Op0);
2120
Op1 = DAG.getNode(ExtendOp, DL, MVT::i32, Op1);
2121
}
2122
// Use SUB instead of CMP to enable CSE between SUB and CMP.
2123
SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i8);
2124
SDValue Sub = DAG.getNode(M68kISD::SUB, DL, VTs, Op0, Op1);
2125
return SDValue(Sub.getNode(), 1);
2126
}
2127
return DAG.getNode(M68kISD::CMP, DL, MVT::i8, Op0, Op1);
2128
}
2129
2130
/// Result of 'and' or 'trunc to i1' is compared against zero.
2131
/// Change to a BTST node if possible.
2132
SDValue M68kTargetLowering::LowerToBTST(SDValue Op, ISD::CondCode CC,
2133
const SDLoc &DL,
2134
SelectionDAG &DAG) const {
2135
if (Op.getOpcode() == ISD::AND)
2136
return LowerAndToBTST(Op, CC, DL, DAG);
2137
if (Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1)
2138
return LowerTruncateToBTST(Op, CC, DL, DAG);
2139
return SDValue();
2140
}
2141
2142
SDValue M68kTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2143
MVT VT = Op.getSimpleValueType();
2144
assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
2145
2146
SDValue Op0 = Op.getOperand(0);
2147
SDValue Op1 = Op.getOperand(1);
2148
SDLoc DL(Op);
2149
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2150
2151
// Optimize to BTST if possible.
2152
// Lower (X & (1 << N)) == 0 to BTST(X, N).
2153
// Lower ((X >>u N) & 1) != 0 to BTST(X, N).
2154
// Lower ((X >>s N) & 1) != 0 to BTST(X, N).
2155
// Lower (trunc (X >> N) to i1) to BTST(X, N).
2156
if (Op0.hasOneUse() && isNullConstant(Op1) &&
2157
(CC == ISD::SETEQ || CC == ISD::SETNE)) {
2158
if (SDValue NewSetCC = LowerToBTST(Op0, CC, DL, DAG)) {
2159
if (VT == MVT::i1)
2160
return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, NewSetCC);
2161
return NewSetCC;
2162
}
2163
}
2164
2165
// Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
2166
// these.
2167
if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
2168
(CC == ISD::SETEQ || CC == ISD::SETNE)) {
2169
2170
// If the input is a setcc, then reuse the input setcc or use a new one with
2171
// the inverted condition.
2172
if (Op0.getOpcode() == M68kISD::SETCC) {
2173
M68k::CondCode CCode = (M68k::CondCode)Op0.getConstantOperandVal(0);
2174
bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
2175
if (!Invert)
2176
return Op0;
2177
2178
CCode = M68k::GetOppositeBranchCondition(CCode);
2179
SDValue SetCC =
2180
DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
2181
DAG.getConstant(CCode, DL, MVT::i8), Op0.getOperand(1));
2182
if (VT == MVT::i1)
2183
return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
2184
return SetCC;
2185
}
2186
}
2187
if (Op0.getValueType() == MVT::i1 && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
2188
if (isOneConstant(Op1)) {
2189
ISD::CondCode NewCC = ISD::GlobalISel::getSetCCInverse(CC, true);
2190
return DAG.getSetCC(DL, VT, Op0, DAG.getConstant(0, DL, MVT::i1), NewCC);
2191
}
2192
if (!isNullConstant(Op1)) {
2193
SDValue Xor = DAG.getNode(ISD::XOR, DL, MVT::i1, Op0, Op1);
2194
return DAG.getSetCC(DL, VT, Xor, DAG.getConstant(0, DL, MVT::i1), CC);
2195
}
2196
}
2197
2198
bool IsFP = Op1.getSimpleValueType().isFloatingPoint();
2199
unsigned M68kCC = TranslateM68kCC(CC, DL, IsFP, Op0, Op1, DAG);
2200
if (M68kCC == M68k::COND_INVALID)
2201
return SDValue();
2202
2203
SDValue CCR = EmitCmp(Op0, Op1, M68kCC, DL, DAG);
2204
return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
2205
DAG.getConstant(M68kCC, DL, MVT::i8), CCR);
2206
}
2207
2208
SDValue M68kTargetLowering::LowerSETCCCARRY(SDValue Op,
2209
SelectionDAG &DAG) const {
2210
SDValue LHS = Op.getOperand(0);
2211
SDValue RHS = Op.getOperand(1);
2212
SDValue Carry = Op.getOperand(2);
2213
SDValue Cond = Op.getOperand(3);
2214
SDLoc DL(Op);
2215
2216
assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
2217
M68k::CondCode CC = TranslateIntegerM68kCC(cast<CondCodeSDNode>(Cond)->get());
2218
2219
EVT CarryVT = Carry.getValueType();
2220
APInt NegOne = APInt::getAllOnes(CarryVT.getScalarSizeInBits());
2221
Carry = DAG.getNode(M68kISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32), Carry,
2222
DAG.getConstant(NegOne, DL, CarryVT));
2223
2224
SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
2225
SDValue Cmp =
2226
DAG.getNode(M68kISD::SUBX, DL, VTs, LHS, RHS, Carry.getValue(1));
2227
2228
return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
2229
DAG.getConstant(CC, DL, MVT::i8), Cmp.getValue(1));
2230
}
2231
2232
/// Return true if opcode is a M68k logical comparison.
2233
static bool isM68kLogicalCmp(SDValue Op) {
2234
unsigned Opc = Op.getNode()->getOpcode();
2235
if (Opc == M68kISD::CMP)
2236
return true;
2237
if (Op.getResNo() == 1 &&
2238
(Opc == M68kISD::ADD || Opc == M68kISD::SUB || Opc == M68kISD::ADDX ||
2239
Opc == M68kISD::SUBX || Opc == M68kISD::SMUL || Opc == M68kISD::UMUL ||
2240
Opc == M68kISD::OR || Opc == M68kISD::XOR || Opc == M68kISD::AND))
2241
return true;
2242
2243
if (Op.getResNo() == 2 && Opc == M68kISD::UMUL)
2244
return true;
2245
2246
return false;
2247
}
2248
2249
static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
2250
if (V.getOpcode() != ISD::TRUNCATE)
2251
return false;
2252
2253
SDValue VOp0 = V.getOperand(0);
2254
unsigned InBits = VOp0.getValueSizeInBits();
2255
unsigned Bits = V.getValueSizeInBits();
2256
return DAG.MaskedValueIsZero(VOp0,
2257
APInt::getHighBitsSet(InBits, InBits - Bits));
2258
}
2259
2260
SDValue M68kTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2261
bool addTest = true;
2262
SDValue Cond = Op.getOperand(0);
2263
SDValue Op1 = Op.getOperand(1);
2264
SDValue Op2 = Op.getOperand(2);
2265
SDLoc DL(Op);
2266
SDValue CC;
2267
2268
if (Cond.getOpcode() == ISD::SETCC) {
2269
if (SDValue NewCond = LowerSETCC(Cond, DAG))
2270
Cond = NewCond;
2271
}
2272
2273
// (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
2274
// (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
2275
// (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
2276
// (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
2277
if (Cond.getOpcode() == M68kISD::SETCC &&
2278
Cond.getOperand(1).getOpcode() == M68kISD::CMP &&
2279
isNullConstant(Cond.getOperand(1).getOperand(0))) {
2280
SDValue Cmp = Cond.getOperand(1);
2281
2282
unsigned CondCode = Cond.getConstantOperandVal(0);
2283
2284
if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
2285
(CondCode == M68k::COND_EQ || CondCode == M68k::COND_NE)) {
2286
SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
2287
2288
SDValue CmpOp0 = Cmp.getOperand(1);
2289
// Apply further optimizations for special cases
2290
// (select (x != 0), -1, 0) -> neg & sbb
2291
// (select (x == 0), 0, -1) -> neg & sbb
2292
if (isNullConstant(Y) &&
2293
(isAllOnesConstant(Op1) == (CondCode == M68k::COND_NE))) {
2294
2295
SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
2296
2297
SDValue Neg =
2298
DAG.getNode(M68kISD::SUB, DL, VTs,
2299
DAG.getConstant(0, DL, CmpOp0.getValueType()), CmpOp0);
2300
2301
SDValue Res = DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(),
2302
DAG.getConstant(M68k::COND_CS, DL, MVT::i8),
2303
SDValue(Neg.getNode(), 1));
2304
return Res;
2305
}
2306
2307
Cmp = DAG.getNode(M68kISD::CMP, DL, MVT::i8,
2308
DAG.getConstant(1, DL, CmpOp0.getValueType()), CmpOp0);
2309
2310
SDValue Res = // Res = 0 or -1.
2311
DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(),
2312
DAG.getConstant(M68k::COND_CS, DL, MVT::i8), Cmp);
2313
2314
if (isAllOnesConstant(Op1) != (CondCode == M68k::COND_EQ))
2315
Res = DAG.getNOT(DL, Res, Res.getValueType());
2316
2317
if (!isNullConstant(Op2))
2318
Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
2319
return Res;
2320
}
2321
}
2322
2323
// Look past (and (setcc_carry (cmp ...)), 1).
2324
if (Cond.getOpcode() == ISD::AND &&
2325
Cond.getOperand(0).getOpcode() == M68kISD::SETCC_CARRY &&
2326
isOneConstant(Cond.getOperand(1)))
2327
Cond = Cond.getOperand(0);
2328
2329
// If condition flag is set by a M68kISD::CMP, then use it as the condition
2330
// setting operand in place of the M68kISD::SETCC.
2331
unsigned CondOpcode = Cond.getOpcode();
2332
if (CondOpcode == M68kISD::SETCC || CondOpcode == M68kISD::SETCC_CARRY) {
2333
CC = Cond.getOperand(0);
2334
2335
SDValue Cmp = Cond.getOperand(1);
2336
unsigned Opc = Cmp.getOpcode();
2337
2338
bool IllegalFPCMov = false;
2339
2340
if ((isM68kLogicalCmp(Cmp) && !IllegalFPCMov) || Opc == M68kISD::BTST) {
2341
Cond = Cmp;
2342
addTest = false;
2343
}
2344
} else if (isOverflowArithmetic(CondOpcode)) {
2345
// Result is unused here.
2346
SDValue Result;
2347
unsigned CCode;
2348
lowerOverflowArithmetic(Cond, DAG, Result, Cond, CCode);
2349
CC = DAG.getConstant(CCode, DL, MVT::i8);
2350
addTest = false;
2351
}
2352
2353
if (addTest) {
2354
// Look past the truncate if the high bits are known zero.
2355
if (isTruncWithZeroHighBitsInput(Cond, DAG))
2356
Cond = Cond.getOperand(0);
2357
2358
// We know the result of AND is compared against zero. Try to match
2359
// it to BT.
2360
if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
2361
if (SDValue NewSetCC = LowerToBTST(Cond, ISD::SETNE, DL, DAG)) {
2362
CC = NewSetCC.getOperand(0);
2363
Cond = NewSetCC.getOperand(1);
2364
addTest = false;
2365
}
2366
}
2367
}
2368
2369
if (addTest) {
2370
CC = DAG.getConstant(M68k::COND_NE, DL, MVT::i8);
2371
Cond = EmitTest(Cond, M68k::COND_NE, DL, DAG);
2372
}
2373
2374
// a < b ? -1 : 0 -> RES = ~setcc_carry
2375
// a < b ? 0 : -1 -> RES = setcc_carry
2376
// a >= b ? -1 : 0 -> RES = setcc_carry
2377
// a >= b ? 0 : -1 -> RES = ~setcc_carry
2378
if (Cond.getOpcode() == M68kISD::SUB) {
2379
unsigned CondCode = CC->getAsZExtVal();
2380
2381
if ((CondCode == M68k::COND_CC || CondCode == M68k::COND_CS) &&
2382
(isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
2383
(isNullConstant(Op1) || isNullConstant(Op2))) {
2384
SDValue Res =
2385
DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(),
2386
DAG.getConstant(M68k::COND_CS, DL, MVT::i8), Cond);
2387
if (isAllOnesConstant(Op1) != (CondCode == M68k::COND_CS))
2388
return DAG.getNOT(DL, Res, Res.getValueType());
2389
return Res;
2390
}
2391
}
2392
2393
// M68k doesn't have an i8 cmov. If both operands are the result of a
2394
// truncate widen the cmov and push the truncate through. This avoids
2395
// introducing a new branch during isel and doesn't add any extensions.
2396
if (Op.getValueType() == MVT::i8 && Op1.getOpcode() == ISD::TRUNCATE &&
2397
Op2.getOpcode() == ISD::TRUNCATE) {
2398
SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
2399
if (T1.getValueType() == T2.getValueType() &&
2400
// Block CopyFromReg so partial register stalls are avoided.
2401
T1.getOpcode() != ISD::CopyFromReg &&
2402
T2.getOpcode() != ISD::CopyFromReg) {
2403
SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
2404
SDValue Cmov = DAG.getNode(M68kISD::CMOV, DL, VTs, T2, T1, CC, Cond);
2405
return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
2406
}
2407
}
2408
2409
// Simple optimization when Cond is a constant to avoid generating
2410
// M68kISD::CMOV if possible.
2411
// TODO: Generalize this to use SelectionDAG::computeKnownBits.
2412
if (auto *Const = dyn_cast<ConstantSDNode>(Cond.getNode())) {
2413
const APInt &C = Const->getAPIntValue();
2414
if (C.countr_zero() >= 5)
2415
return Op2;
2416
else if (C.countr_one() >= 5)
2417
return Op1;
2418
}
2419
2420
// M68kISD::CMOV means set the result (which is operand 1) to the RHS if
2421
// condition is true.
2422
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
2423
SDValue Ops[] = {Op2, Op1, CC, Cond};
2424
return DAG.getNode(M68kISD::CMOV, DL, VTs, Ops);
2425
}
2426
2427
/// Return true if node is an ISD::AND or ISD::OR of two M68k::SETcc nodes
2428
/// each of which has no other use apart from the AND / OR.
2429
static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
2430
Opc = Op.getOpcode();
2431
if (Opc != ISD::OR && Opc != ISD::AND)
2432
return false;
2433
return (M68k::IsSETCC(Op.getOperand(0).getOpcode()) &&
2434
Op.getOperand(0).hasOneUse() &&
2435
M68k::IsSETCC(Op.getOperand(1).getOpcode()) &&
2436
Op.getOperand(1).hasOneUse());
2437
}
2438
2439
/// Return true if node is an ISD::XOR of a M68kISD::SETCC and 1 and that the
2440
/// SETCC node has a single use.
2441
static bool isXor1OfSetCC(SDValue Op) {
2442
if (Op.getOpcode() != ISD::XOR)
2443
return false;
2444
if (isOneConstant(Op.getOperand(1)))
2445
return Op.getOperand(0).getOpcode() == M68kISD::SETCC &&
2446
Op.getOperand(0).hasOneUse();
2447
return false;
2448
}
2449
2450
SDValue M68kTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2451
bool AddTest = true;
2452
SDValue Chain = Op.getOperand(0);
2453
SDValue Cond = Op.getOperand(1);
2454
SDValue Dest = Op.getOperand(2);
2455
SDLoc DL(Op);
2456
SDValue CC;
2457
bool Inverted = false;
2458
2459
if (Cond.getOpcode() == ISD::SETCC) {
2460
// Check for setcc([su]{add,sub}o == 0).
2461
if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
2462
isNullConstant(Cond.getOperand(1)) &&
2463
Cond.getOperand(0).getResNo() == 1 &&
2464
(Cond.getOperand(0).getOpcode() == ISD::SADDO ||
2465
Cond.getOperand(0).getOpcode() == ISD::UADDO ||
2466
Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
2467
Cond.getOperand(0).getOpcode() == ISD::USUBO)) {
2468
Inverted = true;
2469
Cond = Cond.getOperand(0);
2470
} else {
2471
if (SDValue NewCond = LowerSETCC(Cond, DAG))
2472
Cond = NewCond;
2473
}
2474
}
2475
2476
// Look pass (and (setcc_carry (cmp ...)), 1).
2477
if (Cond.getOpcode() == ISD::AND &&
2478
Cond.getOperand(0).getOpcode() == M68kISD::SETCC_CARRY &&
2479
isOneConstant(Cond.getOperand(1)))
2480
Cond = Cond.getOperand(0);
2481
2482
// If condition flag is set by a M68kISD::CMP, then use it as the condition
2483
// setting operand in place of the M68kISD::SETCC.
2484
unsigned CondOpcode = Cond.getOpcode();
2485
if (CondOpcode == M68kISD::SETCC || CondOpcode == M68kISD::SETCC_CARRY) {
2486
CC = Cond.getOperand(0);
2487
2488
SDValue Cmp = Cond.getOperand(1);
2489
unsigned Opc = Cmp.getOpcode();
2490
2491
if (isM68kLogicalCmp(Cmp) || Opc == M68kISD::BTST) {
2492
Cond = Cmp;
2493
AddTest = false;
2494
} else {
2495
switch (CC->getAsZExtVal()) {
2496
default:
2497
break;
2498
case M68k::COND_VS:
2499
case M68k::COND_CS:
2500
// These can only come from an arithmetic instruction with overflow,
2501
// e.g. SADDO, UADDO.
2502
Cond = Cond.getNode()->getOperand(1);
2503
AddTest = false;
2504
break;
2505
}
2506
}
2507
}
2508
CondOpcode = Cond.getOpcode();
2509
if (isOverflowArithmetic(CondOpcode)) {
2510
SDValue Result;
2511
unsigned CCode;
2512
lowerOverflowArithmetic(Cond, DAG, Result, Cond, CCode);
2513
2514
if (Inverted)
2515
CCode = M68k::GetOppositeBranchCondition((M68k::CondCode)CCode);
2516
CC = DAG.getConstant(CCode, DL, MVT::i8);
2517
2518
AddTest = false;
2519
} else {
2520
unsigned CondOpc;
2521
if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
2522
SDValue Cmp = Cond.getOperand(0).getOperand(1);
2523
if (CondOpc == ISD::OR) {
2524
// Also, recognize the pattern generated by an FCMP_UNE. We can emit
2525
// two branches instead of an explicit OR instruction with a
2526
// separate test.
2527
if (Cmp == Cond.getOperand(1).getOperand(1) && isM68kLogicalCmp(Cmp)) {
2528
CC = Cond.getOperand(0).getOperand(0);
2529
Chain = DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain,
2530
Dest, CC, Cmp);
2531
CC = Cond.getOperand(1).getOperand(0);
2532
Cond = Cmp;
2533
AddTest = false;
2534
}
2535
} else { // ISD::AND
2536
// Also, recognize the pattern generated by an FCMP_OEQ. We can emit
2537
// two branches instead of an explicit AND instruction with a
2538
// separate test. However, we only do this if this block doesn't
2539
// have a fall-through edge, because this requires an explicit
2540
// jmp when the condition is false.
2541
if (Cmp == Cond.getOperand(1).getOperand(1) && isM68kLogicalCmp(Cmp) &&
2542
Op.getNode()->hasOneUse()) {
2543
M68k::CondCode CCode =
2544
(M68k::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
2545
CCode = M68k::GetOppositeBranchCondition(CCode);
2546
CC = DAG.getConstant(CCode, DL, MVT::i8);
2547
SDNode *User = *Op.getNode()->use_begin();
2548
// Look for an unconditional branch following this conditional branch.
2549
// We need this because we need to reverse the successors in order
2550
// to implement FCMP_OEQ.
2551
if (User->getOpcode() == ISD::BR) {
2552
SDValue FalseBB = User->getOperand(1);
2553
SDNode *NewBR =
2554
DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
2555
assert(NewBR == User);
2556
(void)NewBR;
2557
Dest = FalseBB;
2558
2559
Chain = DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain,
2560
Dest, CC, Cmp);
2561
M68k::CondCode CCode =
2562
(M68k::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
2563
CCode = M68k::GetOppositeBranchCondition(CCode);
2564
CC = DAG.getConstant(CCode, DL, MVT::i8);
2565
Cond = Cmp;
2566
AddTest = false;
2567
}
2568
}
2569
}
2570
} else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
2571
// Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
2572
// It should be transformed during dag combiner except when the condition
2573
// is set by a arithmetics with overflow node.
2574
M68k::CondCode CCode =
2575
(M68k::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
2576
CCode = M68k::GetOppositeBranchCondition(CCode);
2577
CC = DAG.getConstant(CCode, DL, MVT::i8);
2578
Cond = Cond.getOperand(0).getOperand(1);
2579
AddTest = false;
2580
}
2581
}
2582
2583
if (AddTest) {
2584
// Look pass the truncate if the high bits are known zero.
2585
if (isTruncWithZeroHighBitsInput(Cond, DAG))
2586
Cond = Cond.getOperand(0);
2587
2588
// We know the result is compared against zero. Try to match it to BT.
2589
if (Cond.hasOneUse()) {
2590
if (SDValue NewSetCC = LowerToBTST(Cond, ISD::SETNE, DL, DAG)) {
2591
CC = NewSetCC.getOperand(0);
2592
Cond = NewSetCC.getOperand(1);
2593
AddTest = false;
2594
}
2595
}
2596
}
2597
2598
if (AddTest) {
2599
M68k::CondCode MxCond = Inverted ? M68k::COND_EQ : M68k::COND_NE;
2600
CC = DAG.getConstant(MxCond, DL, MVT::i8);
2601
Cond = EmitTest(Cond, MxCond, DL, DAG);
2602
}
2603
return DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain, Dest, CC,
2604
Cond);
2605
}
2606
2607
SDValue M68kTargetLowering::LowerADDC_ADDE_SUBC_SUBE(SDValue Op,
2608
SelectionDAG &DAG) const {
2609
MVT VT = Op.getNode()->getSimpleValueType(0);
2610
2611
// Let legalize expand this if it isn't a legal type yet.
2612
if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
2613
return SDValue();
2614
2615
SDVTList VTs = DAG.getVTList(VT, MVT::i8);
2616
2617
unsigned Opc;
2618
bool ExtraOp = false;
2619
switch (Op.getOpcode()) {
2620
default:
2621
llvm_unreachable("Invalid code");
2622
case ISD::ADDC:
2623
Opc = M68kISD::ADD;
2624
break;
2625
case ISD::ADDE:
2626
Opc = M68kISD::ADDX;
2627
ExtraOp = true;
2628
break;
2629
case ISD::SUBC:
2630
Opc = M68kISD::SUB;
2631
break;
2632
case ISD::SUBE:
2633
Opc = M68kISD::SUBX;
2634
ExtraOp = true;
2635
break;
2636
}
2637
2638
if (!ExtraOp)
2639
return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1));
2640
return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1),
2641
Op.getOperand(2));
2642
}
2643
2644
// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
2645
// their target countpart wrapped in the M68kISD::Wrapper node. Suppose N is
2646
// one of the above mentioned nodes. It has to be wrapped because otherwise
2647
// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
2648
// be used to form addressing mode. These wrapped nodes will be selected
2649
// into MOV32ri.
2650
SDValue M68kTargetLowering::LowerConstantPool(SDValue Op,
2651
SelectionDAG &DAG) const {
2652
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2653
2654
// In PIC mode (unless we're in PCRel PIC mode) we add an offset to the
2655
// global base reg.
2656
unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
2657
2658
unsigned WrapperKind = M68kISD::Wrapper;
2659
if (M68kII::isPCRelGlobalReference(OpFlag)) {
2660
WrapperKind = M68kISD::WrapperPC;
2661
}
2662
2663
MVT PtrVT = getPointerTy(DAG.getDataLayout());
2664
SDValue Result = DAG.getTargetConstantPool(
2665
CP->getConstVal(), PtrVT, CP->getAlign(), CP->getOffset(), OpFlag);
2666
2667
SDLoc DL(CP);
2668
Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
2669
2670
// With PIC, the address is actually $g + Offset.
2671
if (M68kII::isGlobalRelativeToPICBase(OpFlag)) {
2672
Result = DAG.getNode(ISD::ADD, DL, PtrVT,
2673
DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT),
2674
Result);
2675
}
2676
2677
return Result;
2678
}
2679
2680
SDValue M68kTargetLowering::LowerExternalSymbol(SDValue Op,
2681
SelectionDAG &DAG) const {
2682
const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
2683
2684
// In PIC mode (unless we're in PCRel PIC mode) we add an offset to the
2685
// global base reg.
2686
const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
2687
unsigned char OpFlag = Subtarget.classifyExternalReference(*Mod);
2688
2689
unsigned WrapperKind = M68kISD::Wrapper;
2690
if (M68kII::isPCRelGlobalReference(OpFlag)) {
2691
WrapperKind = M68kISD::WrapperPC;
2692
}
2693
2694
auto PtrVT = getPointerTy(DAG.getDataLayout());
2695
SDValue Result = DAG.getTargetExternalSymbol(Sym, PtrVT, OpFlag);
2696
2697
SDLoc DL(Op);
2698
Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
2699
2700
// With PIC, the address is actually $g + Offset.
2701
if (M68kII::isGlobalRelativeToPICBase(OpFlag)) {
2702
Result = DAG.getNode(ISD::ADD, DL, PtrVT,
2703
DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT),
2704
Result);
2705
}
2706
2707
// For symbols that require a load from a stub to get the address, emit the
2708
// load.
2709
if (M68kII::isGlobalStubReference(OpFlag)) {
2710
Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
2711
MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2712
}
2713
2714
return Result;
2715
}
2716
2717
SDValue M68kTargetLowering::LowerBlockAddress(SDValue Op,
2718
SelectionDAG &DAG) const {
2719
unsigned char OpFlags = Subtarget.classifyBlockAddressReference();
2720
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
2721
int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
2722
SDLoc DL(Op);
2723
auto PtrVT = getPointerTy(DAG.getDataLayout());
2724
2725
// Create the TargetBlockAddressAddress node.
2726
SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
2727
2728
if (M68kII::isPCRelBlockReference(OpFlags)) {
2729
Result = DAG.getNode(M68kISD::WrapperPC, DL, PtrVT, Result);
2730
} else {
2731
Result = DAG.getNode(M68kISD::Wrapper, DL, PtrVT, Result);
2732
}
2733
2734
// With PIC, the address is actually $g + Offset.
2735
if (M68kII::isGlobalRelativeToPICBase(OpFlags)) {
2736
Result =
2737
DAG.getNode(ISD::ADD, DL, PtrVT,
2738
DAG.getNode(M68kISD::GLOBAL_BASE_REG, DL, PtrVT), Result);
2739
}
2740
2741
return Result;
2742
}
2743
2744
SDValue M68kTargetLowering::LowerGlobalAddress(const GlobalValue *GV,
2745
const SDLoc &DL, int64_t Offset,
2746
SelectionDAG &DAG) const {
2747
unsigned char OpFlags = Subtarget.classifyGlobalReference(GV);
2748
auto PtrVT = getPointerTy(DAG.getDataLayout());
2749
2750
// Create the TargetGlobalAddress node, folding in the constant
2751
// offset if it is legal.
2752
SDValue Result;
2753
if (M68kII::isDirectGlobalReference(OpFlags)) {
2754
Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Offset);
2755
Offset = 0;
2756
} else {
2757
Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
2758
}
2759
2760
if (M68kII::isPCRelGlobalReference(OpFlags))
2761
Result = DAG.getNode(M68kISD::WrapperPC, DL, PtrVT, Result);
2762
else
2763
Result = DAG.getNode(M68kISD::Wrapper, DL, PtrVT, Result);
2764
2765
// With PIC, the address is actually $g + Offset.
2766
if (M68kII::isGlobalRelativeToPICBase(OpFlags)) {
2767
Result =
2768
DAG.getNode(ISD::ADD, DL, PtrVT,
2769
DAG.getNode(M68kISD::GLOBAL_BASE_REG, DL, PtrVT), Result);
2770
}
2771
2772
// For globals that require a load from a stub to get the address, emit the
2773
// load.
2774
if (M68kII::isGlobalStubReference(OpFlags)) {
2775
Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
2776
MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2777
}
2778
2779
// If there was a non-zero offset that we didn't fold, create an explicit
2780
// addition for it.
2781
if (Offset != 0) {
2782
Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
2783
DAG.getConstant(Offset, DL, PtrVT));
2784
}
2785
2786
return Result;
2787
}
2788
2789
SDValue M68kTargetLowering::LowerGlobalAddress(SDValue Op,
2790
SelectionDAG &DAG) const {
2791
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
2792
int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
2793
return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
2794
}
2795
2796
//===----------------------------------------------------------------------===//
2797
// Custom Lower Jump Table
2798
//===----------------------------------------------------------------------===//
2799
2800
SDValue M68kTargetLowering::LowerJumpTable(SDValue Op,
2801
SelectionDAG &DAG) const {
2802
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2803
2804
// In PIC mode (unless we're in PCRel PIC mode) we add an offset to the
2805
// global base reg.
2806
unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
2807
2808
unsigned WrapperKind = M68kISD::Wrapper;
2809
if (M68kII::isPCRelGlobalReference(OpFlag)) {
2810
WrapperKind = M68kISD::WrapperPC;
2811
}
2812
2813
auto PtrVT = getPointerTy(DAG.getDataLayout());
2814
SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
2815
SDLoc DL(JT);
2816
Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
2817
2818
// With PIC, the address is actually $g + Offset.
2819
if (M68kII::isGlobalRelativeToPICBase(OpFlag)) {
2820
Result = DAG.getNode(ISD::ADD, DL, PtrVT,
2821
DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT),
2822
Result);
2823
}
2824
2825
return Result;
2826
}
2827
2828
unsigned M68kTargetLowering::getJumpTableEncoding() const {
2829
return Subtarget.getJumpTableEncoding();
2830
}
2831
2832
const MCExpr *M68kTargetLowering::LowerCustomJumpTableEntry(
2833
const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
2834
unsigned uid, MCContext &Ctx) const {
2835
return MCSymbolRefExpr::create(MBB->getSymbol(), MCSymbolRefExpr::VK_GOTOFF,
2836
Ctx);
2837
}
2838
2839
SDValue M68kTargetLowering::getPICJumpTableRelocBase(SDValue Table,
2840
SelectionDAG &DAG) const {
2841
if (getJumpTableEncoding() == MachineJumpTableInfo::EK_Custom32)
2842
return DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(),
2843
getPointerTy(DAG.getDataLayout()));
2844
2845
// MachineJumpTableInfo::EK_LabelDifference32 entry
2846
return Table;
2847
}
2848
2849
// NOTE This only used for MachineJumpTableInfo::EK_LabelDifference32 entries
2850
const MCExpr *M68kTargetLowering::getPICJumpTableRelocBaseExpr(
2851
const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const {
2852
return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx);
2853
}
2854
2855
M68kTargetLowering::ConstraintType
2856
M68kTargetLowering::getConstraintType(StringRef Constraint) const {
2857
if (Constraint.size() > 0) {
2858
switch (Constraint[0]) {
2859
case 'a':
2860
case 'd':
2861
return C_RegisterClass;
2862
case 'I':
2863
case 'J':
2864
case 'K':
2865
case 'L':
2866
case 'M':
2867
case 'N':
2868
case 'O':
2869
case 'P':
2870
return C_Immediate;
2871
case 'C':
2872
if (Constraint.size() == 2)
2873
switch (Constraint[1]) {
2874
case '0':
2875
case 'i':
2876
case 'j':
2877
return C_Immediate;
2878
default:
2879
break;
2880
}
2881
break;
2882
case 'Q':
2883
case 'U':
2884
return C_Memory;
2885
default:
2886
break;
2887
}
2888
}
2889
2890
return TargetLowering::getConstraintType(Constraint);
2891
}
2892
2893
void M68kTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
2894
StringRef Constraint,
2895
std::vector<SDValue> &Ops,
2896
SelectionDAG &DAG) const {
2897
SDValue Result;
2898
2899
if (Constraint.size() == 1) {
2900
// Constant constraints
2901
switch (Constraint[0]) {
2902
case 'I':
2903
case 'J':
2904
case 'K':
2905
case 'L':
2906
case 'M':
2907
case 'N':
2908
case 'O':
2909
case 'P': {
2910
auto *C = dyn_cast<ConstantSDNode>(Op);
2911
if (!C)
2912
return;
2913
2914
int64_t Val = C->getSExtValue();
2915
switch (Constraint[0]) {
2916
case 'I': // constant integer in the range [1,8]
2917
if (Val > 0 && Val <= 8)
2918
break;
2919
return;
2920
case 'J': // constant signed 16-bit integer
2921
if (isInt<16>(Val))
2922
break;
2923
return;
2924
case 'K': // constant that is NOT in the range of [-0x80, 0x80)
2925
if (Val < -0x80 || Val >= 0x80)
2926
break;
2927
return;
2928
case 'L': // constant integer in the range [-8,-1]
2929
if (Val < 0 && Val >= -8)
2930
break;
2931
return;
2932
case 'M': // constant that is NOT in the range of [-0x100, 0x100]
2933
if (Val < -0x100 || Val >= 0x100)
2934
break;
2935
return;
2936
case 'N': // constant integer in the range [24,31]
2937
if (Val >= 24 && Val <= 31)
2938
break;
2939
return;
2940
case 'O': // constant integer 16
2941
if (Val == 16)
2942
break;
2943
return;
2944
case 'P': // constant integer in the range [8,15]
2945
if (Val >= 8 && Val <= 15)
2946
break;
2947
return;
2948
default:
2949
llvm_unreachable("Unhandled constant constraint");
2950
}
2951
2952
Result = DAG.getTargetConstant(Val, SDLoc(Op), Op.getValueType());
2953
break;
2954
}
2955
default:
2956
break;
2957
}
2958
}
2959
2960
if (Constraint.size() == 2) {
2961
switch (Constraint[0]) {
2962
case 'C':
2963
// Constant constraints start with 'C'
2964
switch (Constraint[1]) {
2965
case '0':
2966
case 'i':
2967
case 'j': {
2968
auto *C = dyn_cast<ConstantSDNode>(Op);
2969
if (!C)
2970
break;
2971
2972
int64_t Val = C->getSExtValue();
2973
switch (Constraint[1]) {
2974
case '0': // constant integer 0
2975
if (!Val)
2976
break;
2977
return;
2978
case 'i': // constant integer
2979
break;
2980
case 'j': // integer constant that doesn't fit in 16 bits
2981
if (!isInt<16>(C->getSExtValue()))
2982
break;
2983
return;
2984
default:
2985
llvm_unreachable("Unhandled constant constraint");
2986
}
2987
2988
Result = DAG.getTargetConstant(Val, SDLoc(Op), Op.getValueType());
2989
break;
2990
}
2991
default:
2992
break;
2993
}
2994
break;
2995
default:
2996
break;
2997
}
2998
}
2999
3000
if (Result.getNode()) {
3001
Ops.push_back(Result);
3002
return;
3003
}
3004
3005
TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3006
}
3007
3008
std::pair<unsigned, const TargetRegisterClass *>
3009
M68kTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
3010
StringRef Constraint,
3011
MVT VT) const {
3012
if (Constraint.size() == 1) {
3013
switch (Constraint[0]) {
3014
case 'r':
3015
case 'd':
3016
switch (VT.SimpleTy) {
3017
case MVT::i8:
3018
return std::make_pair(0U, &M68k::DR8RegClass);
3019
case MVT::i16:
3020
return std::make_pair(0U, &M68k::DR16RegClass);
3021
case MVT::i32:
3022
return std::make_pair(0U, &M68k::DR32RegClass);
3023
default:
3024
break;
3025
}
3026
break;
3027
case 'a':
3028
switch (VT.SimpleTy) {
3029
case MVT::i16:
3030
return std::make_pair(0U, &M68k::AR16RegClass);
3031
case MVT::i32:
3032
return std::make_pair(0U, &M68k::AR32RegClass);
3033
default:
3034
break;
3035
}
3036
break;
3037
default:
3038
break;
3039
}
3040
}
3041
3042
return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3043
}
3044
3045
/// Determines whether the callee is required to pop its own arguments.
3046
/// Callee pop is necessary to support tail calls.
3047
bool M68k::isCalleePop(CallingConv::ID CC, bool IsVarArg, bool GuaranteeTCO) {
3048
return CC == CallingConv::M68k_RTD && !IsVarArg;
3049
}
3050
3051
// Return true if it is OK for this CMOV pseudo-opcode to be cascaded
3052
// together with other CMOV pseudo-opcodes into a single basic-block with
3053
// conditional jump around it.
3054
static bool isCMOVPseudo(MachineInstr &MI) {
3055
switch (MI.getOpcode()) {
3056
case M68k::CMOV8d:
3057
case M68k::CMOV16d:
3058
case M68k::CMOV32r:
3059
return true;
3060
3061
default:
3062
return false;
3063
}
3064
}
3065
3066
// The CCR operand of SelectItr might be missing a kill marker
3067
// because there were multiple uses of CCR, and ISel didn't know
3068
// which to mark. Figure out whether SelectItr should have had a
3069
// kill marker, and set it if it should. Returns the correct kill
3070
// marker value.
3071
static bool checkAndUpdateCCRKill(MachineBasicBlock::iterator SelectItr,
3072
MachineBasicBlock *BB,
3073
const TargetRegisterInfo *TRI) {
3074
// Scan forward through BB for a use/def of CCR.
3075
MachineBasicBlock::iterator miI(std::next(SelectItr));
3076
for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
3077
const MachineInstr &mi = *miI;
3078
if (mi.readsRegister(M68k::CCR, /*TRI=*/nullptr))
3079
return false;
3080
if (mi.definesRegister(M68k::CCR, /*TRI=*/nullptr))
3081
break; // Should have kill-flag - update below.
3082
}
3083
3084
// If we hit the end of the block, check whether CCR is live into a
3085
// successor.
3086
if (miI == BB->end())
3087
for (const auto *SBB : BB->successors())
3088
if (SBB->isLiveIn(M68k::CCR))
3089
return false;
3090
3091
// We found a def, or hit the end of the basic block and CCR wasn't live
3092
// out. SelectMI should have a kill flag on CCR.
3093
SelectItr->addRegisterKilled(M68k::CCR, TRI);
3094
return true;
3095
}
3096
3097
MachineBasicBlock *
3098
M68kTargetLowering::EmitLoweredSelect(MachineInstr &MI,
3099
MachineBasicBlock *MBB) const {
3100
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
3101
DebugLoc DL = MI.getDebugLoc();
3102
3103
// To "insert" a SELECT_CC instruction, we actually have to insert the
3104
// diamond control-flow pattern. The incoming instruction knows the
3105
// destination vreg to set, the condition code register to branch on, the
3106
// true/false values to select between, and a branch opcode to use.
3107
const BasicBlock *BB = MBB->getBasicBlock();
3108
MachineFunction::iterator It = ++MBB->getIterator();
3109
3110
// ThisMBB:
3111
// ...
3112
// TrueVal = ...
3113
// cmp ccX, r1, r2
3114
// bcc Copy1MBB
3115
// fallthrough --> Copy0MBB
3116
MachineBasicBlock *ThisMBB = MBB;
3117
MachineFunction *F = MBB->getParent();
3118
3119
// This code lowers all pseudo-CMOV instructions. Generally it lowers these
3120
// as described above, by inserting a MBB, and then making a PHI at the join
3121
// point to select the true and false operands of the CMOV in the PHI.
3122
//
3123
// The code also handles two different cases of multiple CMOV opcodes
3124
// in a row.
3125
//
3126
// Case 1:
3127
// In this case, there are multiple CMOVs in a row, all which are based on
3128
// the same condition setting (or the exact opposite condition setting).
3129
// In this case we can lower all the CMOVs using a single inserted MBB, and
3130
// then make a number of PHIs at the join point to model the CMOVs. The only
3131
// trickiness here, is that in a case like:
3132
//
3133
// t2 = CMOV cond1 t1, f1
3134
// t3 = CMOV cond1 t2, f2
3135
//
3136
// when rewriting this into PHIs, we have to perform some renaming on the
3137
// temps since you cannot have a PHI operand refer to a PHI result earlier
3138
// in the same block. The "simple" but wrong lowering would be:
3139
//
3140
// t2 = PHI t1(BB1), f1(BB2)
3141
// t3 = PHI t2(BB1), f2(BB2)
3142
//
3143
// but clearly t2 is not defined in BB1, so that is incorrect. The proper
3144
// renaming is to note that on the path through BB1, t2 is really just a
3145
// copy of t1, and do that renaming, properly generating:
3146
//
3147
// t2 = PHI t1(BB1), f1(BB2)
3148
// t3 = PHI t1(BB1), f2(BB2)
3149
//
3150
// Case 2, we lower cascaded CMOVs such as
3151
//
3152
// (CMOV (CMOV F, T, cc1), T, cc2)
3153
//
3154
// to two successives branches.
3155
MachineInstr *CascadedCMOV = nullptr;
3156
MachineInstr *LastCMOV = &MI;
3157
M68k::CondCode CC = M68k::CondCode(MI.getOperand(3).getImm());
3158
M68k::CondCode OppCC = M68k::GetOppositeBranchCondition(CC);
3159
MachineBasicBlock::iterator NextMIIt =
3160
std::next(MachineBasicBlock::iterator(MI));
3161
3162
// Check for case 1, where there are multiple CMOVs with the same condition
3163
// first. Of the two cases of multiple CMOV lowerings, case 1 reduces the
3164
// number of jumps the most.
3165
3166
if (isCMOVPseudo(MI)) {
3167
// See if we have a string of CMOVS with the same condition.
3168
while (NextMIIt != MBB->end() && isCMOVPseudo(*NextMIIt) &&
3169
(NextMIIt->getOperand(3).getImm() == CC ||
3170
NextMIIt->getOperand(3).getImm() == OppCC)) {
3171
LastCMOV = &*NextMIIt;
3172
++NextMIIt;
3173
}
3174
}
3175
3176
// This checks for case 2, but only do this if we didn't already find
3177
// case 1, as indicated by LastCMOV == MI.
3178
if (LastCMOV == &MI && NextMIIt != MBB->end() &&
3179
NextMIIt->getOpcode() == MI.getOpcode() &&
3180
NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
3181
NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
3182
NextMIIt->getOperand(1).isKill()) {
3183
CascadedCMOV = &*NextMIIt;
3184
}
3185
3186
MachineBasicBlock *Jcc1MBB = nullptr;
3187
3188
// If we have a cascaded CMOV, we lower it to two successive branches to
3189
// the same block. CCR is used by both, so mark it as live in the second.
3190
if (CascadedCMOV) {
3191
Jcc1MBB = F->CreateMachineBasicBlock(BB);
3192
F->insert(It, Jcc1MBB);
3193
Jcc1MBB->addLiveIn(M68k::CCR);
3194
}
3195
3196
MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(BB);
3197
MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(BB);
3198
F->insert(It, Copy0MBB);
3199
F->insert(It, SinkMBB);
3200
3201
// Set the call frame size on entry to the new basic blocks.
3202
unsigned CallFrameSize = TII->getCallFrameSizeAt(MI);
3203
Copy0MBB->setCallFrameSize(CallFrameSize);
3204
SinkMBB->setCallFrameSize(CallFrameSize);
3205
3206
// If the CCR register isn't dead in the terminator, then claim that it's
3207
// live into the sink and copy blocks.
3208
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
3209
3210
MachineInstr *LastCCRSUser = CascadedCMOV ? CascadedCMOV : LastCMOV;
3211
if (!LastCCRSUser->killsRegister(M68k::CCR, /*TRI=*/nullptr) &&
3212
!checkAndUpdateCCRKill(LastCCRSUser, MBB, TRI)) {
3213
Copy0MBB->addLiveIn(M68k::CCR);
3214
SinkMBB->addLiveIn(M68k::CCR);
3215
}
3216
3217
// Transfer the remainder of MBB and its successor edges to SinkMBB.
3218
SinkMBB->splice(SinkMBB->begin(), MBB,
3219
std::next(MachineBasicBlock::iterator(LastCMOV)), MBB->end());
3220
SinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
3221
3222
// Add the true and fallthrough blocks as its successors.
3223
if (CascadedCMOV) {
3224
// The fallthrough block may be Jcc1MBB, if we have a cascaded CMOV.
3225
MBB->addSuccessor(Jcc1MBB);
3226
3227
// In that case, Jcc1MBB will itself fallthrough the Copy0MBB, and
3228
// jump to the SinkMBB.
3229
Jcc1MBB->addSuccessor(Copy0MBB);
3230
Jcc1MBB->addSuccessor(SinkMBB);
3231
} else {
3232
MBB->addSuccessor(Copy0MBB);
3233
}
3234
3235
// The true block target of the first (or only) branch is always SinkMBB.
3236
MBB->addSuccessor(SinkMBB);
3237
3238
// Create the conditional branch instruction.
3239
unsigned Opc = M68k::GetCondBranchFromCond(CC);
3240
BuildMI(MBB, DL, TII->get(Opc)).addMBB(SinkMBB);
3241
3242
if (CascadedCMOV) {
3243
unsigned Opc2 = M68k::GetCondBranchFromCond(
3244
(M68k::CondCode)CascadedCMOV->getOperand(3).getImm());
3245
BuildMI(Jcc1MBB, DL, TII->get(Opc2)).addMBB(SinkMBB);
3246
}
3247
3248
// Copy0MBB:
3249
// %FalseValue = ...
3250
// # fallthrough to SinkMBB
3251
Copy0MBB->addSuccessor(SinkMBB);
3252
3253
// SinkMBB:
3254
// %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ]
3255
// ...
3256
MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
3257
MachineBasicBlock::iterator MIItEnd =
3258
std::next(MachineBasicBlock::iterator(LastCMOV));
3259
MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
3260
DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
3261
MachineInstrBuilder MIB;
3262
3263
// As we are creating the PHIs, we have to be careful if there is more than
3264
// one. Later CMOVs may reference the results of earlier CMOVs, but later
3265
// PHIs have to reference the individual true/false inputs from earlier PHIs.
3266
// That also means that PHI construction must work forward from earlier to
3267
// later, and that the code must maintain a mapping from earlier PHI's
3268
// destination registers, and the registers that went into the PHI.
3269
3270
for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
3271
Register DestReg = MIIt->getOperand(0).getReg();
3272
Register Op1Reg = MIIt->getOperand(1).getReg();
3273
Register Op2Reg = MIIt->getOperand(2).getReg();
3274
3275
// If this CMOV we are generating is the opposite condition from
3276
// the jump we generated, then we have to swap the operands for the
3277
// PHI that is going to be generated.
3278
if (MIIt->getOperand(3).getImm() == OppCC)
3279
std::swap(Op1Reg, Op2Reg);
3280
3281
if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
3282
Op1Reg = RegRewriteTable[Op1Reg].first;
3283
3284
if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
3285
Op2Reg = RegRewriteTable[Op2Reg].second;
3286
3287
MIB =
3288
BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(M68k::PHI), DestReg)
3289
.addReg(Op1Reg)
3290
.addMBB(Copy0MBB)
3291
.addReg(Op2Reg)
3292
.addMBB(ThisMBB);
3293
3294
// Add this PHI to the rewrite table.
3295
RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
3296
}
3297
3298
// If we have a cascaded CMOV, the second Jcc provides the same incoming
3299
// value as the first Jcc (the True operand of the SELECT_CC/CMOV nodes).
3300
if (CascadedCMOV) {
3301
MIB.addReg(MI.getOperand(2).getReg()).addMBB(Jcc1MBB);
3302
// Copy the PHI result to the register defined by the second CMOV.
3303
BuildMI(*SinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())),
3304
DL, TII->get(TargetOpcode::COPY),
3305
CascadedCMOV->getOperand(0).getReg())
3306
.addReg(MI.getOperand(0).getReg());
3307
CascadedCMOV->eraseFromParent();
3308
}
3309
3310
// Now remove the CMOV(s).
3311
for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd;)
3312
(MIIt++)->eraseFromParent();
3313
3314
return SinkMBB;
3315
}
3316
3317
MachineBasicBlock *
3318
M68kTargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
3319
MachineBasicBlock *BB) const {
3320
llvm_unreachable("Cannot lower Segmented Stack Alloca with stack-split on");
3321
}
3322
3323
MachineBasicBlock *
3324
M68kTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
3325
MachineBasicBlock *BB) const {
3326
switch (MI.getOpcode()) {
3327
default:
3328
llvm_unreachable("Unexpected instr type to insert");
3329
case M68k::CMOV8d:
3330
case M68k::CMOV16d:
3331
case M68k::CMOV32r:
3332
return EmitLoweredSelect(MI, BB);
3333
case M68k::SALLOCA:
3334
return EmitLoweredSegAlloca(MI, BB);
3335
}
3336
}
3337
3338
SDValue M68kTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3339
MachineFunction &MF = DAG.getMachineFunction();
3340
auto PtrVT = getPointerTy(MF.getDataLayout());
3341
M68kMachineFunctionInfo *FuncInfo = MF.getInfo<M68kMachineFunctionInfo>();
3342
3343
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3344
SDLoc DL(Op);
3345
3346
// vastart just stores the address of the VarArgsFrameIndex slot into the
3347
// memory location argument.
3348
SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3349
return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
3350
MachinePointerInfo(SV));
3351
}
3352
3353
SDValue M68kTargetLowering::LowerATOMICFENCE(SDValue Op,
3354
SelectionDAG &DAG) const {
3355
// Lower to a memory barrier created from inline asm.
3356
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3357
LLVMContext &Ctx = *DAG.getContext();
3358
3359
const unsigned Flags = InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore |
3360
InlineAsm::Extra_HasSideEffects;
3361
const SDValue AsmOperands[4] = {
3362
Op.getOperand(0), // Input chain
3363
DAG.getTargetExternalSymbol(
3364
"", TLI.getProgramPointerTy(
3365
DAG.getDataLayout())), // Empty inline asm string
3366
DAG.getMDNode(MDNode::get(Ctx, {})), // (empty) srcloc
3367
DAG.getTargetConstant(Flags, SDLoc(Op),
3368
TLI.getPointerTy(DAG.getDataLayout())), // Flags
3369
};
3370
3371
return DAG.getNode(ISD::INLINEASM, SDLoc(Op),
3372
DAG.getVTList(MVT::Other, MVT::Glue), AsmOperands);
3373
}
3374
3375
// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
3376
// Calls to _alloca are needed to probe the stack when allocating more than 4k
3377
// bytes in one go. Touching the stack at 4K increments is necessary to ensure
3378
// that the guard pages used by the OS virtual memory manager are allocated in
3379
// correct sequence.
3380
SDValue M68kTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
3381
SelectionDAG &DAG) const {
3382
MachineFunction &MF = DAG.getMachineFunction();
3383
bool SplitStack = MF.shouldSplitStack();
3384
3385
SDLoc DL(Op);
3386
3387
// Get the inputs.
3388
SDNode *Node = Op.getNode();
3389
SDValue Chain = Op.getOperand(0);
3390
SDValue Size = Op.getOperand(1);
3391
unsigned Align = Op.getConstantOperandVal(2);
3392
EVT VT = Node->getValueType(0);
3393
3394
// Chain the dynamic stack allocation so that it doesn't modify the stack
3395
// pointer when other instructions are using the stack.
3396
Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
3397
3398
SDValue Result;
3399
if (SplitStack) {
3400
auto &MRI = MF.getRegInfo();
3401
auto SPTy = getPointerTy(DAG.getDataLayout());
3402
auto *ARClass = getRegClassFor(SPTy);
3403
Register Vreg = MRI.createVirtualRegister(ARClass);
3404
Chain = DAG.getCopyToReg(Chain, DL, Vreg, Size);
3405
Result = DAG.getNode(M68kISD::SEG_ALLOCA, DL, SPTy, Chain,
3406
DAG.getRegister(Vreg, SPTy));
3407
} else {
3408
auto &TLI = DAG.getTargetLoweringInfo();
3409
Register SPReg = TLI.getStackPointerRegisterToSaveRestore();
3410
assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
3411
" not tell us which reg is the stack pointer!");
3412
3413
SDValue SP = DAG.getCopyFromReg(Chain, DL, SPReg, VT);
3414
Chain = SP.getValue(1);
3415
const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
3416
unsigned StackAlign = TFI.getStackAlignment();
3417
Result = DAG.getNode(ISD::SUB, DL, VT, SP, Size); // Value
3418
if (Align > StackAlign)
3419
Result = DAG.getNode(ISD::AND, DL, VT, Result,
3420
DAG.getConstant(-(uint64_t)Align, DL, VT));
3421
Chain = DAG.getCopyToReg(Chain, DL, SPReg, Result); // Output chain
3422
}
3423
3424
Chain = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), DL);
3425
3426
SDValue Ops[2] = {Result, Chain};
3427
return DAG.getMergeValues(Ops, DL);
3428
}
3429
3430
SDValue M68kTargetLowering::LowerShiftLeftParts(SDValue Op,
3431
SelectionDAG &DAG) const {
3432
SDLoc DL(Op);
3433
SDValue Lo = Op.getOperand(0);
3434
SDValue Hi = Op.getOperand(1);
3435
SDValue Shamt = Op.getOperand(2);
3436
EVT VT = Lo.getValueType();
3437
3438
// if Shamt - register size < 0: // Shamt < register size
3439
// Lo = Lo << Shamt
3440
// Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (register size - 1 ^ Shamt))
3441
// else:
3442
// Lo = 0
3443
// Hi = Lo << (Shamt - register size)
3444
3445
SDValue Zero = DAG.getConstant(0, DL, VT);
3446
SDValue One = DAG.getConstant(1, DL, VT);
3447
SDValue MinusRegisterSize = DAG.getConstant(-32, DL, VT);
3448
SDValue RegisterSizeMinus1 = DAG.getConstant(32 - 1, DL, VT);
3449
SDValue ShamtMinusRegisterSize =
3450
DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusRegisterSize);
3451
SDValue RegisterSizeMinus1Shamt =
3452
DAG.getNode(ISD::XOR, DL, VT, RegisterSizeMinus1, Shamt);
3453
3454
SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
3455
SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
3456
SDValue ShiftRightLo =
3457
DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, RegisterSizeMinus1Shamt);
3458
SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
3459
SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
3460
SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusRegisterSize);
3461
3462
SDValue CC =
3463
DAG.getSetCC(DL, MVT::i8, ShamtMinusRegisterSize, Zero, ISD::SETLT);
3464
3465
Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
3466
Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3467
3468
return DAG.getMergeValues({Lo, Hi}, DL);
3469
}
3470
3471
SDValue M68kTargetLowering::LowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
3472
bool IsSRA) const {
3473
SDLoc DL(Op);
3474
SDValue Lo = Op.getOperand(0);
3475
SDValue Hi = Op.getOperand(1);
3476
SDValue Shamt = Op.getOperand(2);
3477
EVT VT = Lo.getValueType();
3478
3479
// SRA expansion:
3480
// if Shamt - register size < 0: // Shamt < register size
3481
// Lo = (Lo >>u Shamt) | ((Hi << 1) << (register size - 1 ^ Shamt))
3482
// Hi = Hi >>s Shamt
3483
// else:
3484
// Lo = Hi >>s (Shamt - register size);
3485
// Hi = Hi >>s (register size - 1)
3486
//
3487
// SRL expansion:
3488
// if Shamt - register size < 0: // Shamt < register size
3489
// Lo = (Lo >>u Shamt) | ((Hi << 1) << (register size - 1 ^ Shamt))
3490
// Hi = Hi >>u Shamt
3491
// else:
3492
// Lo = Hi >>u (Shamt - register size);
3493
// Hi = 0;
3494
3495
unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
3496
3497
SDValue Zero = DAG.getConstant(0, DL, VT);
3498
SDValue One = DAG.getConstant(1, DL, VT);
3499
SDValue MinusRegisterSize = DAG.getConstant(-32, DL, VT);
3500
SDValue RegisterSizeMinus1 = DAG.getConstant(32 - 1, DL, VT);
3501
SDValue ShamtMinusRegisterSize =
3502
DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusRegisterSize);
3503
SDValue RegisterSizeMinus1Shamt =
3504
DAG.getNode(ISD::XOR, DL, VT, RegisterSizeMinus1, Shamt);
3505
3506
SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
3507
SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
3508
SDValue ShiftLeftHi =
3509
DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, RegisterSizeMinus1Shamt);
3510
SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
3511
SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
3512
SDValue LoFalse =
3513
DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusRegisterSize);
3514
SDValue HiFalse =
3515
IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, RegisterSizeMinus1) : Zero;
3516
3517
SDValue CC =
3518
DAG.getSetCC(DL, MVT::i8, ShamtMinusRegisterSize, Zero, ISD::SETLT);
3519
3520
Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
3521
Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3522
3523
return DAG.getMergeValues({Lo, Hi}, DL);
3524
}
3525
3526
//===----------------------------------------------------------------------===//
3527
// DAG Combine
3528
//===----------------------------------------------------------------------===//
3529
3530
static SDValue getSETCC(M68k::CondCode Cond, SDValue CCR, const SDLoc &dl,
3531
SelectionDAG &DAG) {
3532
return DAG.getNode(M68kISD::SETCC, dl, MVT::i8,
3533
DAG.getConstant(Cond, dl, MVT::i8), CCR);
3534
}
3535
// When legalizing carry, we create carries via add X, -1
3536
// If that comes from an actual carry, via setcc, we use the
3537
// carry directly.
3538
static SDValue combineCarryThroughADD(SDValue CCR) {
3539
if (CCR.getOpcode() == M68kISD::ADD) {
3540
if (isAllOnesConstant(CCR.getOperand(1))) {
3541
SDValue Carry = CCR.getOperand(0);
3542
while (Carry.getOpcode() == ISD::TRUNCATE ||
3543
Carry.getOpcode() == ISD::ZERO_EXTEND ||
3544
Carry.getOpcode() == ISD::SIGN_EXTEND ||
3545
Carry.getOpcode() == ISD::ANY_EXTEND ||
3546
(Carry.getOpcode() == ISD::AND &&
3547
isOneConstant(Carry.getOperand(1))))
3548
Carry = Carry.getOperand(0);
3549
if (Carry.getOpcode() == M68kISD::SETCC ||
3550
Carry.getOpcode() == M68kISD::SETCC_CARRY) {
3551
if (Carry.getConstantOperandVal(0) == M68k::COND_CS)
3552
return Carry.getOperand(1);
3553
}
3554
}
3555
}
3556
3557
return SDValue();
3558
}
3559
3560
/// Optimize a CCR definition used according to the condition code \p CC into
3561
/// a simpler CCR value, potentially returning a new \p CC and replacing uses
3562
/// of chain values.
3563
static SDValue combineSetCCCCR(SDValue CCR, M68k::CondCode &CC,
3564
SelectionDAG &DAG,
3565
const M68kSubtarget &Subtarget) {
3566
if (CC == M68k::COND_CS)
3567
if (SDValue Flags = combineCarryThroughADD(CCR))
3568
return Flags;
3569
3570
return SDValue();
3571
}
3572
3573
// Optimize RES = M68kISD::SETCC CONDCODE, CCR_INPUT
3574
static SDValue combineM68kSetCC(SDNode *N, SelectionDAG &DAG,
3575
const M68kSubtarget &Subtarget) {
3576
SDLoc DL(N);
3577
M68k::CondCode CC = M68k::CondCode(N->getConstantOperandVal(0));
3578
SDValue CCR = N->getOperand(1);
3579
3580
// Try to simplify the CCR and condition code operands.
3581
if (SDValue Flags = combineSetCCCCR(CCR, CC, DAG, Subtarget))
3582
return getSETCC(CC, Flags, DL, DAG);
3583
3584
return SDValue();
3585
}
3586
static SDValue combineM68kBrCond(SDNode *N, SelectionDAG &DAG,
3587
const M68kSubtarget &Subtarget) {
3588
SDLoc DL(N);
3589
M68k::CondCode CC = M68k::CondCode(N->getConstantOperandVal(2));
3590
SDValue CCR = N->getOperand(3);
3591
3592
// Try to simplify the CCR and condition code operands.
3593
// Make sure to not keep references to operands, as combineSetCCCCR can
3594
// RAUW them under us.
3595
if (SDValue Flags = combineSetCCCCR(CCR, CC, DAG, Subtarget)) {
3596
SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
3597
return DAG.getNode(M68kISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
3598
N->getOperand(1), Cond, Flags);
3599
}
3600
3601
return SDValue();
3602
}
3603
3604
static SDValue combineSUBX(SDNode *N, SelectionDAG &DAG) {
3605
if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) {
3606
MVT VT = N->getSimpleValueType(0);
3607
SDVTList VTs = DAG.getVTList(VT, MVT::i32);
3608
return DAG.getNode(M68kISD::SUBX, SDLoc(N), VTs, N->getOperand(0),
3609
N->getOperand(1), Flags);
3610
}
3611
3612
return SDValue();
3613
}
3614
3615
// Optimize RES, CCR = M68kISD::ADDX LHS, RHS, CCR
3616
static SDValue combineADDX(SDNode *N, SelectionDAG &DAG,
3617
TargetLowering::DAGCombinerInfo &DCI) {
3618
if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) {
3619
MVT VT = N->getSimpleValueType(0);
3620
SDVTList VTs = DAG.getVTList(VT, MVT::i32);
3621
return DAG.getNode(M68kISD::ADDX, SDLoc(N), VTs, N->getOperand(0),
3622
N->getOperand(1), Flags);
3623
}
3624
3625
return SDValue();
3626
}
3627
3628
SDValue M68kTargetLowering::PerformDAGCombine(SDNode *N,
3629
DAGCombinerInfo &DCI) const {
3630
SelectionDAG &DAG = DCI.DAG;
3631
switch (N->getOpcode()) {
3632
case M68kISD::SUBX:
3633
return combineSUBX(N, DAG);
3634
case M68kISD::ADDX:
3635
return combineADDX(N, DAG, DCI);
3636
case M68kISD::SETCC:
3637
return combineM68kSetCC(N, DAG, Subtarget);
3638
case M68kISD::BRCOND:
3639
return combineM68kBrCond(N, DAG, Subtarget);
3640
}
3641
3642
return SDValue();
3643
}
3644
3645
//===----------------------------------------------------------------------===//
3646
// M68kISD Node Names
3647
//===----------------------------------------------------------------------===//
3648
const char *M68kTargetLowering::getTargetNodeName(unsigned Opcode) const {
3649
switch (Opcode) {
3650
case M68kISD::CALL:
3651
return "M68kISD::CALL";
3652
case M68kISD::TAIL_CALL:
3653
return "M68kISD::TAIL_CALL";
3654
case M68kISD::RET:
3655
return "M68kISD::RET";
3656
case M68kISD::TC_RETURN:
3657
return "M68kISD::TC_RETURN";
3658
case M68kISD::ADD:
3659
return "M68kISD::ADD";
3660
case M68kISD::SUB:
3661
return "M68kISD::SUB";
3662
case M68kISD::ADDX:
3663
return "M68kISD::ADDX";
3664
case M68kISD::SUBX:
3665
return "M68kISD::SUBX";
3666
case M68kISD::SMUL:
3667
return "M68kISD::SMUL";
3668
case M68kISD::UMUL:
3669
return "M68kISD::UMUL";
3670
case M68kISD::OR:
3671
return "M68kISD::OR";
3672
case M68kISD::XOR:
3673
return "M68kISD::XOR";
3674
case M68kISD::AND:
3675
return "M68kISD::AND";
3676
case M68kISD::CMP:
3677
return "M68kISD::CMP";
3678
case M68kISD::BTST:
3679
return "M68kISD::BTST";
3680
case M68kISD::SELECT:
3681
return "M68kISD::SELECT";
3682
case M68kISD::CMOV:
3683
return "M68kISD::CMOV";
3684
case M68kISD::BRCOND:
3685
return "M68kISD::BRCOND";
3686
case M68kISD::SETCC:
3687
return "M68kISD::SETCC";
3688
case M68kISD::SETCC_CARRY:
3689
return "M68kISD::SETCC_CARRY";
3690
case M68kISD::GLOBAL_BASE_REG:
3691
return "M68kISD::GLOBAL_BASE_REG";
3692
case M68kISD::Wrapper:
3693
return "M68kISD::Wrapper";
3694
case M68kISD::WrapperPC:
3695
return "M68kISD::WrapperPC";
3696
case M68kISD::SEG_ALLOCA:
3697
return "M68kISD::SEG_ALLOCA";
3698
default:
3699
return NULL;
3700
}
3701
}
3702
3703
CCAssignFn *M68kTargetLowering::getCCAssignFn(CallingConv::ID CC, bool Return,
3704
bool IsVarArg) const {
3705
if (Return)
3706
return RetCC_M68k_C;
3707
else
3708
return CC_M68k_C;
3709
}
3710
3711