Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
35266 views
1
//===- ARMInstructionSelector.cpp ----------------------------*- C++ -*-==//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
/// \file
9
/// This file implements the targeting of the InstructionSelector class for ARM.
10
/// \todo This should be generated by TableGen.
11
//===----------------------------------------------------------------------===//
12
13
#include "ARMRegisterBankInfo.h"
14
#include "ARMSubtarget.h"
15
#include "ARMTargetMachine.h"
16
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
17
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
18
#include "llvm/CodeGen/MachineConstantPool.h"
19
#include "llvm/CodeGen/MachineRegisterInfo.h"
20
#include "llvm/IR/IntrinsicsARM.h"
21
#include "llvm/Support/Debug.h"
22
23
#define DEBUG_TYPE "arm-isel"
24
25
using namespace llvm;
26
27
namespace {
28
29
#define GET_GLOBALISEL_PREDICATE_BITSET
30
#include "ARMGenGlobalISel.inc"
31
#undef GET_GLOBALISEL_PREDICATE_BITSET
32
33
class ARMInstructionSelector : public InstructionSelector {
34
public:
35
ARMInstructionSelector(const ARMBaseTargetMachine &TM, const ARMSubtarget &STI,
36
const ARMRegisterBankInfo &RBI);
37
38
bool select(MachineInstr &I) override;
39
static const char *getName() { return DEBUG_TYPE; }
40
41
private:
42
bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
43
44
struct CmpConstants;
45
struct InsertInfo;
46
47
bool selectCmp(CmpConstants Helper, MachineInstrBuilder &MIB,
48
MachineRegisterInfo &MRI) const;
49
50
// Helper for inserting a comparison sequence that sets \p ResReg to either 1
51
// if \p LHSReg and \p RHSReg are in the relationship defined by \p Cond, or
52
// \p PrevRes otherwise. In essence, it computes PrevRes OR (LHS Cond RHS).
53
bool insertComparison(CmpConstants Helper, InsertInfo I, unsigned ResReg,
54
ARMCC::CondCodes Cond, unsigned LHSReg, unsigned RHSReg,
55
unsigned PrevRes) const;
56
57
// Set \p DestReg to \p Constant.
58
void putConstant(InsertInfo I, unsigned DestReg, unsigned Constant) const;
59
60
bool selectGlobal(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI) const;
61
bool selectSelect(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI) const;
62
bool selectShift(unsigned ShiftOpc, MachineInstrBuilder &MIB) const;
63
64
// Check if the types match and both operands have the expected size and
65
// register bank.
66
bool validOpRegPair(MachineRegisterInfo &MRI, unsigned LHS, unsigned RHS,
67
unsigned ExpectedSize, unsigned ExpectedRegBankID) const;
68
69
// Check if the register has the expected size and register bank.
70
bool validReg(MachineRegisterInfo &MRI, unsigned Reg, unsigned ExpectedSize,
71
unsigned ExpectedRegBankID) const;
72
73
const ARMBaseInstrInfo &TII;
74
const ARMBaseRegisterInfo &TRI;
75
const ARMBaseTargetMachine &TM;
76
const ARMRegisterBankInfo &RBI;
77
const ARMSubtarget &STI;
78
79
// FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
80
// uses "STI." in the code generated by TableGen. If we want to reuse some of
81
// the custom C++ predicates written for DAGISel, we need to have both around.
82
const ARMSubtarget *Subtarget = &STI;
83
84
// Store the opcodes that we might need, so we don't have to check what kind
85
// of subtarget (ARM vs Thumb) we have all the time.
86
struct OpcodeCache {
87
unsigned ZEXT16;
88
unsigned SEXT16;
89
90
unsigned ZEXT8;
91
unsigned SEXT8;
92
93
// Used for implementing ZEXT/SEXT from i1
94
unsigned AND;
95
unsigned RSB;
96
97
unsigned STORE32;
98
unsigned LOAD32;
99
100
unsigned STORE16;
101
unsigned LOAD16;
102
103
unsigned STORE8;
104
unsigned LOAD8;
105
106
unsigned ADDrr;
107
unsigned ADDri;
108
109
// Used for G_ICMP
110
unsigned CMPrr;
111
unsigned MOVi;
112
unsigned MOVCCi;
113
114
// Used for G_SELECT
115
unsigned MOVCCr;
116
117
unsigned TSTri;
118
unsigned Bcc;
119
120
// Used for G_GLOBAL_VALUE
121
unsigned MOVi32imm;
122
unsigned ConstPoolLoad;
123
unsigned MOV_ga_pcrel;
124
unsigned LDRLIT_ga_pcrel;
125
unsigned LDRLIT_ga_abs;
126
127
OpcodeCache(const ARMSubtarget &STI);
128
} const Opcodes;
129
130
// Select the opcode for simple extensions (that translate to a single SXT/UXT
131
// instruction). Extension operations more complicated than that should not
132
// invoke this. Returns the original opcode if it doesn't know how to select a
133
// better one.
134
unsigned selectSimpleExtOpc(unsigned Opc, unsigned Size) const;
135
136
// Select the opcode for simple loads and stores. Returns the original opcode
137
// if it doesn't know how to select a better one.
138
unsigned selectLoadStoreOpCode(unsigned Opc, unsigned RegBank,
139
unsigned Size) const;
140
141
void renderVFPF32Imm(MachineInstrBuilder &New, const MachineInstr &Old,
142
int OpIdx = -1) const;
143
void renderVFPF64Imm(MachineInstrBuilder &New, const MachineInstr &Old,
144
int OpIdx = -1) const;
145
void renderInvertedImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
146
int OpIdx = -1) const;
147
148
#define GET_GLOBALISEL_PREDICATES_DECL
149
#include "ARMGenGlobalISel.inc"
150
#undef GET_GLOBALISEL_PREDICATES_DECL
151
152
// We declare the temporaries used by selectImpl() in the class to minimize the
153
// cost of constructing placeholder values.
154
#define GET_GLOBALISEL_TEMPORARIES_DECL
155
#include "ARMGenGlobalISel.inc"
156
#undef GET_GLOBALISEL_TEMPORARIES_DECL
157
};
158
} // end anonymous namespace
159
160
namespace llvm {
161
InstructionSelector *
162
createARMInstructionSelector(const ARMBaseTargetMachine &TM,
163
const ARMSubtarget &STI,
164
const ARMRegisterBankInfo &RBI) {
165
return new ARMInstructionSelector(TM, STI, RBI);
166
}
167
}
168
169
#define GET_GLOBALISEL_IMPL
170
#include "ARMGenGlobalISel.inc"
171
#undef GET_GLOBALISEL_IMPL
172
173
ARMInstructionSelector::ARMInstructionSelector(const ARMBaseTargetMachine &TM,
174
const ARMSubtarget &STI,
175
const ARMRegisterBankInfo &RBI)
176
: TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), TM(TM), RBI(RBI),
177
STI(STI), Opcodes(STI),
178
#define GET_GLOBALISEL_PREDICATES_INIT
179
#include "ARMGenGlobalISel.inc"
180
#undef GET_GLOBALISEL_PREDICATES_INIT
181
#define GET_GLOBALISEL_TEMPORARIES_INIT
182
#include "ARMGenGlobalISel.inc"
183
#undef GET_GLOBALISEL_TEMPORARIES_INIT
184
{
185
}
186
187
static const TargetRegisterClass *guessRegClass(unsigned Reg,
188
MachineRegisterInfo &MRI,
189
const TargetRegisterInfo &TRI,
190
const RegisterBankInfo &RBI) {
191
const RegisterBank *RegBank = RBI.getRegBank(Reg, MRI, TRI);
192
assert(RegBank && "Can't get reg bank for virtual register");
193
194
const unsigned Size = MRI.getType(Reg).getSizeInBits();
195
assert((RegBank->getID() == ARM::GPRRegBankID ||
196
RegBank->getID() == ARM::FPRRegBankID) &&
197
"Unsupported reg bank");
198
199
if (RegBank->getID() == ARM::FPRRegBankID) {
200
if (Size == 32)
201
return &ARM::SPRRegClass;
202
else if (Size == 64)
203
return &ARM::DPRRegClass;
204
else if (Size == 128)
205
return &ARM::QPRRegClass;
206
else
207
llvm_unreachable("Unsupported destination size");
208
}
209
210
return &ARM::GPRRegClass;
211
}
212
213
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
214
MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
215
const RegisterBankInfo &RBI) {
216
Register DstReg = I.getOperand(0).getReg();
217
if (DstReg.isPhysical())
218
return true;
219
220
const TargetRegisterClass *RC = guessRegClass(DstReg, MRI, TRI, RBI);
221
222
// No need to constrain SrcReg. It will get constrained when
223
// we hit another of its uses or its defs.
224
// Copies do not have constraints.
225
if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
226
LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
227
<< " operand\n");
228
return false;
229
}
230
return true;
231
}
232
233
static bool selectMergeValues(MachineInstrBuilder &MIB,
234
const ARMBaseInstrInfo &TII,
235
MachineRegisterInfo &MRI,
236
const TargetRegisterInfo &TRI,
237
const RegisterBankInfo &RBI) {
238
assert(TII.getSubtarget().hasVFP2Base() && "Can't select merge without VFP");
239
240
// We only support G_MERGE_VALUES as a way to stick together two scalar GPRs
241
// into one DPR.
242
Register VReg0 = MIB.getReg(0);
243
(void)VReg0;
244
assert(MRI.getType(VReg0).getSizeInBits() == 64 &&
245
RBI.getRegBank(VReg0, MRI, TRI)->getID() == ARM::FPRRegBankID &&
246
"Unsupported operand for G_MERGE_VALUES");
247
Register VReg1 = MIB.getReg(1);
248
(void)VReg1;
249
assert(MRI.getType(VReg1).getSizeInBits() == 32 &&
250
RBI.getRegBank(VReg1, MRI, TRI)->getID() == ARM::GPRRegBankID &&
251
"Unsupported operand for G_MERGE_VALUES");
252
Register VReg2 = MIB.getReg(2);
253
(void)VReg2;
254
assert(MRI.getType(VReg2).getSizeInBits() == 32 &&
255
RBI.getRegBank(VReg2, MRI, TRI)->getID() == ARM::GPRRegBankID &&
256
"Unsupported operand for G_MERGE_VALUES");
257
258
MIB->setDesc(TII.get(ARM::VMOVDRR));
259
MIB.add(predOps(ARMCC::AL));
260
261
return true;
262
}
263
264
static bool selectUnmergeValues(MachineInstrBuilder &MIB,
265
const ARMBaseInstrInfo &TII,
266
MachineRegisterInfo &MRI,
267
const TargetRegisterInfo &TRI,
268
const RegisterBankInfo &RBI) {
269
assert(TII.getSubtarget().hasVFP2Base() &&
270
"Can't select unmerge without VFP");
271
272
// We only support G_UNMERGE_VALUES as a way to break up one DPR into two
273
// GPRs.
274
Register VReg0 = MIB.getReg(0);
275
(void)VReg0;
276
assert(MRI.getType(VReg0).getSizeInBits() == 32 &&
277
RBI.getRegBank(VReg0, MRI, TRI)->getID() == ARM::GPRRegBankID &&
278
"Unsupported operand for G_UNMERGE_VALUES");
279
Register VReg1 = MIB.getReg(1);
280
(void)VReg1;
281
assert(MRI.getType(VReg1).getSizeInBits() == 32 &&
282
RBI.getRegBank(VReg1, MRI, TRI)->getID() == ARM::GPRRegBankID &&
283
"Unsupported operand for G_UNMERGE_VALUES");
284
Register VReg2 = MIB.getReg(2);
285
(void)VReg2;
286
assert(MRI.getType(VReg2).getSizeInBits() == 64 &&
287
RBI.getRegBank(VReg2, MRI, TRI)->getID() == ARM::FPRRegBankID &&
288
"Unsupported operand for G_UNMERGE_VALUES");
289
290
MIB->setDesc(TII.get(ARM::VMOVRRD));
291
MIB.add(predOps(ARMCC::AL));
292
293
return true;
294
}
295
296
ARMInstructionSelector::OpcodeCache::OpcodeCache(const ARMSubtarget &STI) {
297
bool isThumb = STI.isThumb();
298
299
using namespace TargetOpcode;
300
301
#define STORE_OPCODE(VAR, OPC) VAR = isThumb ? ARM::t2##OPC : ARM::OPC
302
STORE_OPCODE(SEXT16, SXTH);
303
STORE_OPCODE(ZEXT16, UXTH);
304
305
STORE_OPCODE(SEXT8, SXTB);
306
STORE_OPCODE(ZEXT8, UXTB);
307
308
STORE_OPCODE(AND, ANDri);
309
STORE_OPCODE(RSB, RSBri);
310
311
STORE_OPCODE(STORE32, STRi12);
312
STORE_OPCODE(LOAD32, LDRi12);
313
314
// LDRH/STRH are special...
315
STORE16 = isThumb ? ARM::t2STRHi12 : ARM::STRH;
316
LOAD16 = isThumb ? ARM::t2LDRHi12 : ARM::LDRH;
317
318
STORE_OPCODE(STORE8, STRBi12);
319
STORE_OPCODE(LOAD8, LDRBi12);
320
321
STORE_OPCODE(ADDrr, ADDrr);
322
STORE_OPCODE(ADDri, ADDri);
323
324
STORE_OPCODE(CMPrr, CMPrr);
325
STORE_OPCODE(MOVi, MOVi);
326
STORE_OPCODE(MOVCCi, MOVCCi);
327
328
STORE_OPCODE(MOVCCr, MOVCCr);
329
330
STORE_OPCODE(TSTri, TSTri);
331
STORE_OPCODE(Bcc, Bcc);
332
333
STORE_OPCODE(MOVi32imm, MOVi32imm);
334
ConstPoolLoad = isThumb ? ARM::t2LDRpci : ARM::LDRi12;
335
STORE_OPCODE(MOV_ga_pcrel, MOV_ga_pcrel);
336
LDRLIT_ga_pcrel = isThumb ? ARM::tLDRLIT_ga_pcrel : ARM::LDRLIT_ga_pcrel;
337
LDRLIT_ga_abs = isThumb ? ARM::tLDRLIT_ga_abs : ARM::LDRLIT_ga_abs;
338
#undef MAP_OPCODE
339
}
340
341
unsigned ARMInstructionSelector::selectSimpleExtOpc(unsigned Opc,
342
unsigned Size) const {
343
using namespace TargetOpcode;
344
345
if (Size != 8 && Size != 16)
346
return Opc;
347
348
if (Opc == G_SEXT)
349
return Size == 8 ? Opcodes.SEXT8 : Opcodes.SEXT16;
350
351
if (Opc == G_ZEXT)
352
return Size == 8 ? Opcodes.ZEXT8 : Opcodes.ZEXT16;
353
354
return Opc;
355
}
356
357
unsigned ARMInstructionSelector::selectLoadStoreOpCode(unsigned Opc,
358
unsigned RegBank,
359
unsigned Size) const {
360
bool isStore = Opc == TargetOpcode::G_STORE;
361
362
if (RegBank == ARM::GPRRegBankID) {
363
switch (Size) {
364
case 1:
365
case 8:
366
return isStore ? Opcodes.STORE8 : Opcodes.LOAD8;
367
case 16:
368
return isStore ? Opcodes.STORE16 : Opcodes.LOAD16;
369
case 32:
370
return isStore ? Opcodes.STORE32 : Opcodes.LOAD32;
371
default:
372
return Opc;
373
}
374
}
375
376
if (RegBank == ARM::FPRRegBankID) {
377
switch (Size) {
378
case 32:
379
return isStore ? ARM::VSTRS : ARM::VLDRS;
380
case 64:
381
return isStore ? ARM::VSTRD : ARM::VLDRD;
382
default:
383
return Opc;
384
}
385
}
386
387
return Opc;
388
}
389
390
// When lowering comparisons, we sometimes need to perform two compares instead
391
// of just one. Get the condition codes for both comparisons. If only one is
392
// needed, the second member of the pair is ARMCC::AL.
393
static std::pair<ARMCC::CondCodes, ARMCC::CondCodes>
394
getComparePreds(CmpInst::Predicate Pred) {
395
std::pair<ARMCC::CondCodes, ARMCC::CondCodes> Preds = {ARMCC::AL, ARMCC::AL};
396
switch (Pred) {
397
case CmpInst::FCMP_ONE:
398
Preds = {ARMCC::GT, ARMCC::MI};
399
break;
400
case CmpInst::FCMP_UEQ:
401
Preds = {ARMCC::EQ, ARMCC::VS};
402
break;
403
case CmpInst::ICMP_EQ:
404
case CmpInst::FCMP_OEQ:
405
Preds.first = ARMCC::EQ;
406
break;
407
case CmpInst::ICMP_SGT:
408
case CmpInst::FCMP_OGT:
409
Preds.first = ARMCC::GT;
410
break;
411
case CmpInst::ICMP_SGE:
412
case CmpInst::FCMP_OGE:
413
Preds.first = ARMCC::GE;
414
break;
415
case CmpInst::ICMP_UGT:
416
case CmpInst::FCMP_UGT:
417
Preds.first = ARMCC::HI;
418
break;
419
case CmpInst::FCMP_OLT:
420
Preds.first = ARMCC::MI;
421
break;
422
case CmpInst::ICMP_ULE:
423
case CmpInst::FCMP_OLE:
424
Preds.first = ARMCC::LS;
425
break;
426
case CmpInst::FCMP_ORD:
427
Preds.first = ARMCC::VC;
428
break;
429
case CmpInst::FCMP_UNO:
430
Preds.first = ARMCC::VS;
431
break;
432
case CmpInst::FCMP_UGE:
433
Preds.first = ARMCC::PL;
434
break;
435
case CmpInst::ICMP_SLT:
436
case CmpInst::FCMP_ULT:
437
Preds.first = ARMCC::LT;
438
break;
439
case CmpInst::ICMP_SLE:
440
case CmpInst::FCMP_ULE:
441
Preds.first = ARMCC::LE;
442
break;
443
case CmpInst::FCMP_UNE:
444
case CmpInst::ICMP_NE:
445
Preds.first = ARMCC::NE;
446
break;
447
case CmpInst::ICMP_UGE:
448
Preds.first = ARMCC::HS;
449
break;
450
case CmpInst::ICMP_ULT:
451
Preds.first = ARMCC::LO;
452
break;
453
default:
454
break;
455
}
456
assert(Preds.first != ARMCC::AL && "No comparisons needed?");
457
return Preds;
458
}
459
460
struct ARMInstructionSelector::CmpConstants {
461
CmpConstants(unsigned CmpOpcode, unsigned FlagsOpcode, unsigned SelectOpcode,
462
unsigned OpRegBank, unsigned OpSize)
463
: ComparisonOpcode(CmpOpcode), ReadFlagsOpcode(FlagsOpcode),
464
SelectResultOpcode(SelectOpcode), OperandRegBankID(OpRegBank),
465
OperandSize(OpSize) {}
466
467
// The opcode used for performing the comparison.
468
const unsigned ComparisonOpcode;
469
470
// The opcode used for reading the flags set by the comparison. May be
471
// ARM::INSTRUCTION_LIST_END if we don't need to read the flags.
472
const unsigned ReadFlagsOpcode;
473
474
// The opcode used for materializing the result of the comparison.
475
const unsigned SelectResultOpcode;
476
477
// The assumed register bank ID for the operands.
478
const unsigned OperandRegBankID;
479
480
// The assumed size in bits for the operands.
481
const unsigned OperandSize;
482
};
483
484
struct ARMInstructionSelector::InsertInfo {
485
InsertInfo(MachineInstrBuilder &MIB)
486
: MBB(*MIB->getParent()), InsertBefore(std::next(MIB->getIterator())),
487
DbgLoc(MIB->getDebugLoc()) {}
488
489
MachineBasicBlock &MBB;
490
const MachineBasicBlock::instr_iterator InsertBefore;
491
const DebugLoc &DbgLoc;
492
};
493
494
void ARMInstructionSelector::putConstant(InsertInfo I, unsigned DestReg,
495
unsigned Constant) const {
496
(void)BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, TII.get(Opcodes.MOVi))
497
.addDef(DestReg)
498
.addImm(Constant)
499
.add(predOps(ARMCC::AL))
500
.add(condCodeOp());
501
}
502
503
bool ARMInstructionSelector::validOpRegPair(MachineRegisterInfo &MRI,
504
unsigned LHSReg, unsigned RHSReg,
505
unsigned ExpectedSize,
506
unsigned ExpectedRegBankID) const {
507
return MRI.getType(LHSReg) == MRI.getType(RHSReg) &&
508
validReg(MRI, LHSReg, ExpectedSize, ExpectedRegBankID) &&
509
validReg(MRI, RHSReg, ExpectedSize, ExpectedRegBankID);
510
}
511
512
bool ARMInstructionSelector::validReg(MachineRegisterInfo &MRI, unsigned Reg,
513
unsigned ExpectedSize,
514
unsigned ExpectedRegBankID) const {
515
if (MRI.getType(Reg).getSizeInBits() != ExpectedSize) {
516
LLVM_DEBUG(dbgs() << "Unexpected size for register");
517
return false;
518
}
519
520
if (RBI.getRegBank(Reg, MRI, TRI)->getID() != ExpectedRegBankID) {
521
LLVM_DEBUG(dbgs() << "Unexpected register bank for register");
522
return false;
523
}
524
525
return true;
526
}
527
528
bool ARMInstructionSelector::selectCmp(CmpConstants Helper,
529
MachineInstrBuilder &MIB,
530
MachineRegisterInfo &MRI) const {
531
const InsertInfo I(MIB);
532
533
auto ResReg = MIB.getReg(0);
534
if (!validReg(MRI, ResReg, 1, ARM::GPRRegBankID))
535
return false;
536
537
auto Cond =
538
static_cast<CmpInst::Predicate>(MIB->getOperand(1).getPredicate());
539
if (Cond == CmpInst::FCMP_TRUE || Cond == CmpInst::FCMP_FALSE) {
540
putConstant(I, ResReg, Cond == CmpInst::FCMP_TRUE ? 1 : 0);
541
MIB->eraseFromParent();
542
return true;
543
}
544
545
auto LHSReg = MIB.getReg(2);
546
auto RHSReg = MIB.getReg(3);
547
if (!validOpRegPair(MRI, LHSReg, RHSReg, Helper.OperandSize,
548
Helper.OperandRegBankID))
549
return false;
550
551
auto ARMConds = getComparePreds(Cond);
552
auto ZeroReg = MRI.createVirtualRegister(&ARM::GPRRegClass);
553
putConstant(I, ZeroReg, 0);
554
555
if (ARMConds.second == ARMCC::AL) {
556
// Simple case, we only need one comparison and we're done.
557
if (!insertComparison(Helper, I, ResReg, ARMConds.first, LHSReg, RHSReg,
558
ZeroReg))
559
return false;
560
} else {
561
// Not so simple, we need two successive comparisons.
562
auto IntermediateRes = MRI.createVirtualRegister(&ARM::GPRRegClass);
563
if (!insertComparison(Helper, I, IntermediateRes, ARMConds.first, LHSReg,
564
RHSReg, ZeroReg))
565
return false;
566
if (!insertComparison(Helper, I, ResReg, ARMConds.second, LHSReg, RHSReg,
567
IntermediateRes))
568
return false;
569
}
570
571
MIB->eraseFromParent();
572
return true;
573
}
574
575
bool ARMInstructionSelector::insertComparison(CmpConstants Helper, InsertInfo I,
576
unsigned ResReg,
577
ARMCC::CondCodes Cond,
578
unsigned LHSReg, unsigned RHSReg,
579
unsigned PrevRes) const {
580
// Perform the comparison.
581
auto CmpI =
582
BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, TII.get(Helper.ComparisonOpcode))
583
.addUse(LHSReg)
584
.addUse(RHSReg)
585
.add(predOps(ARMCC::AL));
586
if (!constrainSelectedInstRegOperands(*CmpI, TII, TRI, RBI))
587
return false;
588
589
// Read the comparison flags (if necessary).
590
if (Helper.ReadFlagsOpcode != ARM::INSTRUCTION_LIST_END) {
591
auto ReadI = BuildMI(I.MBB, I.InsertBefore, I.DbgLoc,
592
TII.get(Helper.ReadFlagsOpcode))
593
.add(predOps(ARMCC::AL));
594
if (!constrainSelectedInstRegOperands(*ReadI, TII, TRI, RBI))
595
return false;
596
}
597
598
// Select either 1 or the previous result based on the value of the flags.
599
auto Mov1I = BuildMI(I.MBB, I.InsertBefore, I.DbgLoc,
600
TII.get(Helper.SelectResultOpcode))
601
.addDef(ResReg)
602
.addUse(PrevRes)
603
.addImm(1)
604
.add(predOps(Cond, ARM::CPSR));
605
if (!constrainSelectedInstRegOperands(*Mov1I, TII, TRI, RBI))
606
return false;
607
608
return true;
609
}
610
611
bool ARMInstructionSelector::selectGlobal(MachineInstrBuilder &MIB,
612
MachineRegisterInfo &MRI) const {
613
if ((STI.isROPI() || STI.isRWPI()) && !STI.isTargetELF()) {
614
LLVM_DEBUG(dbgs() << "ROPI and RWPI only supported for ELF\n");
615
return false;
616
}
617
618
auto GV = MIB->getOperand(1).getGlobal();
619
if (GV->isThreadLocal()) {
620
LLVM_DEBUG(dbgs() << "TLS variables not supported yet\n");
621
return false;
622
}
623
624
auto &MBB = *MIB->getParent();
625
auto &MF = *MBB.getParent();
626
627
bool UseMovt = STI.useMovt();
628
629
LLT PtrTy = MRI.getType(MIB->getOperand(0).getReg());
630
const Align Alignment(4);
631
632
auto addOpsForConstantPoolLoad = [&MF, Alignment, PtrTy](
633
MachineInstrBuilder &MIB,
634
const GlobalValue *GV, bool IsSBREL) {
635
assert((MIB->getOpcode() == ARM::LDRi12 ||
636
MIB->getOpcode() == ARM::t2LDRpci) &&
637
"Unsupported instruction");
638
auto ConstPool = MF.getConstantPool();
639
auto CPIndex =
640
// For SB relative entries we need a target-specific constant pool.
641
// Otherwise, just use a regular constant pool entry.
642
IsSBREL
643
? ConstPool->getConstantPoolIndex(
644
ARMConstantPoolConstant::Create(GV, ARMCP::SBREL), Alignment)
645
: ConstPool->getConstantPoolIndex(GV, Alignment);
646
MIB.addConstantPoolIndex(CPIndex, /*Offset*/ 0, /*TargetFlags*/ 0)
647
.addMemOperand(MF.getMachineMemOperand(
648
MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
649
PtrTy, Alignment));
650
if (MIB->getOpcode() == ARM::LDRi12)
651
MIB.addImm(0);
652
MIB.add(predOps(ARMCC::AL));
653
};
654
655
auto addGOTMemOperand = [this, &MF, Alignment](MachineInstrBuilder &MIB) {
656
MIB.addMemOperand(MF.getMachineMemOperand(
657
MachinePointerInfo::getGOT(MF), MachineMemOperand::MOLoad,
658
TM.getProgramPointerSize(), Alignment));
659
};
660
661
if (TM.isPositionIndependent()) {
662
bool Indirect = STI.isGVIndirectSymbol(GV);
663
664
// For ARM mode, we have different pseudoinstructions for direct accesses
665
// and indirect accesses, and the ones for indirect accesses include the
666
// load from GOT. For Thumb mode, we use the same pseudoinstruction for both
667
// direct and indirect accesses, and we need to manually generate the load
668
// from GOT.
669
bool UseOpcodeThatLoads = Indirect && !STI.isThumb();
670
671
// FIXME: Taking advantage of MOVT for ELF is pretty involved, so we don't
672
// support it yet. See PR28229.
673
unsigned Opc =
674
UseMovt && !STI.isTargetELF()
675
? (UseOpcodeThatLoads ? (unsigned)ARM::MOV_ga_pcrel_ldr
676
: Opcodes.MOV_ga_pcrel)
677
: (UseOpcodeThatLoads ? (unsigned)ARM::LDRLIT_ga_pcrel_ldr
678
: Opcodes.LDRLIT_ga_pcrel);
679
MIB->setDesc(TII.get(Opc));
680
681
int TargetFlags = ARMII::MO_NO_FLAG;
682
if (STI.isTargetDarwin())
683
TargetFlags |= ARMII::MO_NONLAZY;
684
if (STI.isGVInGOT(GV))
685
TargetFlags |= ARMII::MO_GOT;
686
MIB->getOperand(1).setTargetFlags(TargetFlags);
687
688
if (Indirect) {
689
if (!UseOpcodeThatLoads) {
690
auto ResultReg = MIB.getReg(0);
691
auto AddressReg = MRI.createVirtualRegister(&ARM::GPRRegClass);
692
693
MIB->getOperand(0).setReg(AddressReg);
694
695
auto InsertBefore = std::next(MIB->getIterator());
696
auto MIBLoad = BuildMI(MBB, InsertBefore, MIB->getDebugLoc(),
697
TII.get(Opcodes.LOAD32))
698
.addDef(ResultReg)
699
.addReg(AddressReg)
700
.addImm(0)
701
.add(predOps(ARMCC::AL));
702
addGOTMemOperand(MIBLoad);
703
704
if (!constrainSelectedInstRegOperands(*MIBLoad, TII, TRI, RBI))
705
return false;
706
} else {
707
addGOTMemOperand(MIB);
708
}
709
}
710
711
return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
712
}
713
714
bool isReadOnly = STI.getTargetLowering()->isReadOnly(GV);
715
if (STI.isROPI() && isReadOnly) {
716
unsigned Opc = UseMovt ? Opcodes.MOV_ga_pcrel : Opcodes.LDRLIT_ga_pcrel;
717
MIB->setDesc(TII.get(Opc));
718
return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
719
}
720
if (STI.isRWPI() && !isReadOnly) {
721
auto Offset = MRI.createVirtualRegister(&ARM::GPRRegClass);
722
MachineInstrBuilder OffsetMIB;
723
if (UseMovt) {
724
OffsetMIB = BuildMI(MBB, *MIB, MIB->getDebugLoc(),
725
TII.get(Opcodes.MOVi32imm), Offset);
726
OffsetMIB.addGlobalAddress(GV, /*Offset*/ 0, ARMII::MO_SBREL);
727
} else {
728
// Load the offset from the constant pool.
729
OffsetMIB = BuildMI(MBB, *MIB, MIB->getDebugLoc(),
730
TII.get(Opcodes.ConstPoolLoad), Offset);
731
addOpsForConstantPoolLoad(OffsetMIB, GV, /*IsSBREL*/ true);
732
}
733
if (!constrainSelectedInstRegOperands(*OffsetMIB, TII, TRI, RBI))
734
return false;
735
736
// Add the offset to the SB register.
737
MIB->setDesc(TII.get(Opcodes.ADDrr));
738
MIB->removeOperand(1);
739
MIB.addReg(ARM::R9) // FIXME: don't hardcode R9
740
.addReg(Offset)
741
.add(predOps(ARMCC::AL))
742
.add(condCodeOp());
743
744
return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
745
}
746
747
if (STI.isTargetELF()) {
748
if (UseMovt) {
749
MIB->setDesc(TII.get(Opcodes.MOVi32imm));
750
} else {
751
// Load the global's address from the constant pool.
752
MIB->setDesc(TII.get(Opcodes.ConstPoolLoad));
753
MIB->removeOperand(1);
754
addOpsForConstantPoolLoad(MIB, GV, /*IsSBREL*/ false);
755
}
756
} else if (STI.isTargetMachO()) {
757
if (UseMovt)
758
MIB->setDesc(TII.get(Opcodes.MOVi32imm));
759
else
760
MIB->setDesc(TII.get(Opcodes.LDRLIT_ga_abs));
761
} else {
762
LLVM_DEBUG(dbgs() << "Object format not supported yet\n");
763
return false;
764
}
765
766
return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
767
}
768
769
bool ARMInstructionSelector::selectSelect(MachineInstrBuilder &MIB,
770
MachineRegisterInfo &MRI) const {
771
auto &MBB = *MIB->getParent();
772
auto InsertBefore = std::next(MIB->getIterator());
773
auto &DbgLoc = MIB->getDebugLoc();
774
775
// Compare the condition to 1.
776
auto CondReg = MIB.getReg(1);
777
assert(validReg(MRI, CondReg, 1, ARM::GPRRegBankID) &&
778
"Unsupported types for select operation");
779
auto CmpI = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(Opcodes.TSTri))
780
.addUse(CondReg)
781
.addImm(1)
782
.add(predOps(ARMCC::AL));
783
if (!constrainSelectedInstRegOperands(*CmpI, TII, TRI, RBI))
784
return false;
785
786
// Move a value into the result register based on the result of the
787
// comparison.
788
auto ResReg = MIB.getReg(0);
789
auto TrueReg = MIB.getReg(2);
790
auto FalseReg = MIB.getReg(3);
791
assert(validOpRegPair(MRI, ResReg, TrueReg, 32, ARM::GPRRegBankID) &&
792
validOpRegPair(MRI, TrueReg, FalseReg, 32, ARM::GPRRegBankID) &&
793
"Unsupported types for select operation");
794
auto Mov1I = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(Opcodes.MOVCCr))
795
.addDef(ResReg)
796
.addUse(TrueReg)
797
.addUse(FalseReg)
798
.add(predOps(ARMCC::EQ, ARM::CPSR));
799
if (!constrainSelectedInstRegOperands(*Mov1I, TII, TRI, RBI))
800
return false;
801
802
MIB->eraseFromParent();
803
return true;
804
}
805
806
bool ARMInstructionSelector::selectShift(unsigned ShiftOpc,
807
MachineInstrBuilder &MIB) const {
808
assert(!STI.isThumb() && "Unsupported subtarget");
809
MIB->setDesc(TII.get(ARM::MOVsr));
810
MIB.addImm(ShiftOpc);
811
MIB.add(predOps(ARMCC::AL)).add(condCodeOp());
812
return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
813
}
814
815
void ARMInstructionSelector::renderVFPF32Imm(
816
MachineInstrBuilder &NewInstBuilder, const MachineInstr &OldInst,
817
int OpIdx) const {
818
assert(OldInst.getOpcode() == TargetOpcode::G_FCONSTANT &&
819
OpIdx == -1 && "Expected G_FCONSTANT");
820
821
APFloat FPImmValue = OldInst.getOperand(1).getFPImm()->getValueAPF();
822
int FPImmEncoding = ARM_AM::getFP32Imm(FPImmValue);
823
assert(FPImmEncoding != -1 && "Invalid immediate value");
824
825
NewInstBuilder.addImm(FPImmEncoding);
826
}
827
828
void ARMInstructionSelector::renderVFPF64Imm(
829
MachineInstrBuilder &NewInstBuilder, const MachineInstr &OldInst, int OpIdx) const {
830
assert(OldInst.getOpcode() == TargetOpcode::G_FCONSTANT &&
831
OpIdx == -1 && "Expected G_FCONSTANT");
832
833
APFloat FPImmValue = OldInst.getOperand(1).getFPImm()->getValueAPF();
834
int FPImmEncoding = ARM_AM::getFP64Imm(FPImmValue);
835
assert(FPImmEncoding != -1 && "Invalid immediate value");
836
837
NewInstBuilder.addImm(FPImmEncoding);
838
}
839
840
void ARMInstructionSelector::renderInvertedImm(MachineInstrBuilder &MIB,
841
const MachineInstr &MI,
842
int OpIdx) const {
843
assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
844
"Expected G_CONSTANT");
845
int64_t CVal = MI.getOperand(1).getCImm()->getSExtValue();
846
MIB.addImm(~CVal);
847
}
848
849
bool ARMInstructionSelector::select(MachineInstr &I) {
850
assert(I.getParent() && "Instruction should be in a basic block!");
851
assert(I.getParent()->getParent() && "Instruction should be in a function!");
852
853
auto &MBB = *I.getParent();
854
auto &MF = *MBB.getParent();
855
auto &MRI = MF.getRegInfo();
856
857
if (!isPreISelGenericOpcode(I.getOpcode())) {
858
if (I.isCopy())
859
return selectCopy(I, TII, MRI, TRI, RBI);
860
861
return true;
862
}
863
864
using namespace TargetOpcode;
865
866
if (selectImpl(I, *CoverageInfo))
867
return true;
868
869
MachineInstrBuilder MIB{MF, I};
870
bool isSExt = false;
871
872
switch (I.getOpcode()) {
873
case G_SEXT:
874
isSExt = true;
875
[[fallthrough]];
876
case G_ZEXT: {
877
assert(MRI.getType(I.getOperand(0).getReg()).getSizeInBits() <= 32 &&
878
"Unsupported destination size for extension");
879
880
LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
881
unsigned SrcSize = SrcTy.getSizeInBits();
882
switch (SrcSize) {
883
case 1: {
884
// ZExt boils down to & 0x1; for SExt we also subtract that from 0
885
I.setDesc(TII.get(Opcodes.AND));
886
MIB.addImm(1).add(predOps(ARMCC::AL)).add(condCodeOp());
887
888
if (isSExt) {
889
Register SExtResult = I.getOperand(0).getReg();
890
891
// Use a new virtual register for the result of the AND
892
Register AndResult = MRI.createVirtualRegister(&ARM::GPRRegClass);
893
I.getOperand(0).setReg(AndResult);
894
895
auto InsertBefore = std::next(I.getIterator());
896
auto SubI =
897
BuildMI(MBB, InsertBefore, I.getDebugLoc(), TII.get(Opcodes.RSB))
898
.addDef(SExtResult)
899
.addUse(AndResult)
900
.addImm(0)
901
.add(predOps(ARMCC::AL))
902
.add(condCodeOp());
903
if (!constrainSelectedInstRegOperands(*SubI, TII, TRI, RBI))
904
return false;
905
}
906
break;
907
}
908
case 8:
909
case 16: {
910
unsigned NewOpc = selectSimpleExtOpc(I.getOpcode(), SrcSize);
911
if (NewOpc == I.getOpcode())
912
return false;
913
I.setDesc(TII.get(NewOpc));
914
MIB.addImm(0).add(predOps(ARMCC::AL));
915
break;
916
}
917
default:
918
LLVM_DEBUG(dbgs() << "Unsupported source size for extension");
919
return false;
920
}
921
break;
922
}
923
case G_ANYEXT:
924
case G_TRUNC: {
925
// The high bits are undefined, so there's nothing special to do, just
926
// treat it as a copy.
927
auto SrcReg = I.getOperand(1).getReg();
928
auto DstReg = I.getOperand(0).getReg();
929
930
const auto &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
931
const auto &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
932
933
if (SrcRegBank.getID() == ARM::FPRRegBankID) {
934
// This should only happen in the obscure case where we have put a 64-bit
935
// integer into a D register. Get it out of there and keep only the
936
// interesting part.
937
assert(I.getOpcode() == G_TRUNC && "Unsupported operand for G_ANYEXT");
938
assert(DstRegBank.getID() == ARM::GPRRegBankID &&
939
"Unsupported combination of register banks");
940
assert(MRI.getType(SrcReg).getSizeInBits() == 64 && "Unsupported size");
941
assert(MRI.getType(DstReg).getSizeInBits() <= 32 && "Unsupported size");
942
943
Register IgnoredBits = MRI.createVirtualRegister(&ARM::GPRRegClass);
944
auto InsertBefore = std::next(I.getIterator());
945
auto MovI =
946
BuildMI(MBB, InsertBefore, I.getDebugLoc(), TII.get(ARM::VMOVRRD))
947
.addDef(DstReg)
948
.addDef(IgnoredBits)
949
.addUse(SrcReg)
950
.add(predOps(ARMCC::AL));
951
if (!constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI))
952
return false;
953
954
MIB->eraseFromParent();
955
return true;
956
}
957
958
if (SrcRegBank.getID() != DstRegBank.getID()) {
959
LLVM_DEBUG(
960
dbgs() << "G_TRUNC/G_ANYEXT operands on different register banks\n");
961
return false;
962
}
963
964
if (SrcRegBank.getID() != ARM::GPRRegBankID) {
965
LLVM_DEBUG(dbgs() << "G_TRUNC/G_ANYEXT on non-GPR not supported yet\n");
966
return false;
967
}
968
969
I.setDesc(TII.get(COPY));
970
return selectCopy(I, TII, MRI, TRI, RBI);
971
}
972
case G_CONSTANT: {
973
if (!MRI.getType(I.getOperand(0).getReg()).isPointer()) {
974
// Non-pointer constants should be handled by TableGen.
975
LLVM_DEBUG(dbgs() << "Unsupported constant type\n");
976
return false;
977
}
978
979
auto &Val = I.getOperand(1);
980
if (Val.isCImm()) {
981
if (!Val.getCImm()->isZero()) {
982
LLVM_DEBUG(dbgs() << "Unsupported pointer constant value\n");
983
return false;
984
}
985
Val.ChangeToImmediate(0);
986
} else {
987
assert(Val.isImm() && "Unexpected operand for G_CONSTANT");
988
if (Val.getImm() != 0) {
989
LLVM_DEBUG(dbgs() << "Unsupported pointer constant value\n");
990
return false;
991
}
992
}
993
994
assert(!STI.isThumb() && "Unsupported subtarget");
995
I.setDesc(TII.get(ARM::MOVi));
996
MIB.add(predOps(ARMCC::AL)).add(condCodeOp());
997
break;
998
}
999
case G_FCONSTANT: {
1000
// Load from constant pool
1001
unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits() / 8;
1002
Align Alignment(Size);
1003
1004
assert((Size == 4 || Size == 8) && "Unsupported FP constant type");
1005
auto LoadOpcode = Size == 4 ? ARM::VLDRS : ARM::VLDRD;
1006
1007
auto ConstPool = MF.getConstantPool();
1008
auto CPIndex =
1009
ConstPool->getConstantPoolIndex(I.getOperand(1).getFPImm(), Alignment);
1010
MIB->setDesc(TII.get(LoadOpcode));
1011
MIB->removeOperand(1);
1012
MIB.addConstantPoolIndex(CPIndex, /*Offset*/ 0, /*TargetFlags*/ 0)
1013
.addMemOperand(
1014
MF.getMachineMemOperand(MachinePointerInfo::getConstantPool(MF),
1015
MachineMemOperand::MOLoad, Size, Alignment))
1016
.addImm(0)
1017
.add(predOps(ARMCC::AL));
1018
break;
1019
}
1020
case G_INTTOPTR:
1021
case G_PTRTOINT: {
1022
auto SrcReg = I.getOperand(1).getReg();
1023
auto DstReg = I.getOperand(0).getReg();
1024
1025
const auto &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
1026
const auto &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1027
1028
if (SrcRegBank.getID() != DstRegBank.getID()) {
1029
LLVM_DEBUG(
1030
dbgs()
1031
<< "G_INTTOPTR/G_PTRTOINT operands on different register banks\n");
1032
return false;
1033
}
1034
1035
if (SrcRegBank.getID() != ARM::GPRRegBankID) {
1036
LLVM_DEBUG(
1037
dbgs() << "G_INTTOPTR/G_PTRTOINT on non-GPR not supported yet\n");
1038
return false;
1039
}
1040
1041
I.setDesc(TII.get(COPY));
1042
return selectCopy(I, TII, MRI, TRI, RBI);
1043
}
1044
case G_SELECT:
1045
return selectSelect(MIB, MRI);
1046
case G_ICMP: {
1047
CmpConstants Helper(Opcodes.CMPrr, ARM::INSTRUCTION_LIST_END,
1048
Opcodes.MOVCCi, ARM::GPRRegBankID, 32);
1049
return selectCmp(Helper, MIB, MRI);
1050
}
1051
case G_FCMP: {
1052
assert(STI.hasVFP2Base() && "Can't select fcmp without VFP");
1053
1054
Register OpReg = I.getOperand(2).getReg();
1055
unsigned Size = MRI.getType(OpReg).getSizeInBits();
1056
1057
if (Size == 64 && !STI.hasFP64()) {
1058
LLVM_DEBUG(dbgs() << "Subtarget only supports single precision");
1059
return false;
1060
}
1061
if (Size != 32 && Size != 64) {
1062
LLVM_DEBUG(dbgs() << "Unsupported size for G_FCMP operand");
1063
return false;
1064
}
1065
1066
CmpConstants Helper(Size == 32 ? ARM::VCMPS : ARM::VCMPD, ARM::FMSTAT,
1067
Opcodes.MOVCCi, ARM::FPRRegBankID, Size);
1068
return selectCmp(Helper, MIB, MRI);
1069
}
1070
case G_LSHR:
1071
return selectShift(ARM_AM::ShiftOpc::lsr, MIB);
1072
case G_ASHR:
1073
return selectShift(ARM_AM::ShiftOpc::asr, MIB);
1074
case G_SHL: {
1075
return selectShift(ARM_AM::ShiftOpc::lsl, MIB);
1076
}
1077
case G_PTR_ADD:
1078
I.setDesc(TII.get(Opcodes.ADDrr));
1079
MIB.add(predOps(ARMCC::AL)).add(condCodeOp());
1080
break;
1081
case G_FRAME_INDEX:
1082
// Add 0 to the given frame index and hope it will eventually be folded into
1083
// the user(s).
1084
I.setDesc(TII.get(Opcodes.ADDri));
1085
MIB.addImm(0).add(predOps(ARMCC::AL)).add(condCodeOp());
1086
break;
1087
case G_GLOBAL_VALUE:
1088
return selectGlobal(MIB, MRI);
1089
case G_STORE:
1090
case G_LOAD: {
1091
const auto &MemOp = **I.memoperands_begin();
1092
if (MemOp.isAtomic()) {
1093
LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
1094
return false;
1095
}
1096
1097
Register Reg = I.getOperand(0).getReg();
1098
unsigned RegBank = RBI.getRegBank(Reg, MRI, TRI)->getID();
1099
1100
LLT ValTy = MRI.getType(Reg);
1101
const auto ValSize = ValTy.getSizeInBits();
1102
1103
assert((ValSize != 64 || STI.hasVFP2Base()) &&
1104
"Don't know how to load/store 64-bit value without VFP");
1105
1106
const auto NewOpc = selectLoadStoreOpCode(I.getOpcode(), RegBank, ValSize);
1107
if (NewOpc == G_LOAD || NewOpc == G_STORE)
1108
return false;
1109
1110
I.setDesc(TII.get(NewOpc));
1111
1112
if (NewOpc == ARM::LDRH || NewOpc == ARM::STRH)
1113
// LDRH has a funny addressing mode (there's already a FIXME for it).
1114
MIB.addReg(0);
1115
MIB.addImm(0).add(predOps(ARMCC::AL));
1116
break;
1117
}
1118
case G_MERGE_VALUES: {
1119
if (!selectMergeValues(MIB, TII, MRI, TRI, RBI))
1120
return false;
1121
break;
1122
}
1123
case G_UNMERGE_VALUES: {
1124
if (!selectUnmergeValues(MIB, TII, MRI, TRI, RBI))
1125
return false;
1126
break;
1127
}
1128
case G_BRCOND: {
1129
if (!validReg(MRI, I.getOperand(0).getReg(), 1, ARM::GPRRegBankID)) {
1130
LLVM_DEBUG(dbgs() << "Unsupported condition register for G_BRCOND");
1131
return false;
1132
}
1133
1134
// Set the flags.
1135
auto Test =
1136
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcodes.TSTri))
1137
.addReg(I.getOperand(0).getReg())
1138
.addImm(1)
1139
.add(predOps(ARMCC::AL));
1140
if (!constrainSelectedInstRegOperands(*Test, TII, TRI, RBI))
1141
return false;
1142
1143
// Branch conditionally.
1144
auto Branch =
1145
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcodes.Bcc))
1146
.add(I.getOperand(1))
1147
.add(predOps(ARMCC::NE, ARM::CPSR));
1148
if (!constrainSelectedInstRegOperands(*Branch, TII, TRI, RBI))
1149
return false;
1150
I.eraseFromParent();
1151
return true;
1152
}
1153
case G_PHI: {
1154
I.setDesc(TII.get(PHI));
1155
1156
Register DstReg = I.getOperand(0).getReg();
1157
const TargetRegisterClass *RC = guessRegClass(DstReg, MRI, TRI, RBI);
1158
if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
1159
break;
1160
}
1161
1162
return true;
1163
}
1164
default:
1165
return false;
1166
}
1167
1168
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1169
}
1170
1171