Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
35269 views
1
//===-- AArch64AdvSIMDScalar.cpp - Replace dead defs w/ zero reg --===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
// When profitable, replace GPR targeting i64 instructions with their
9
// AdvSIMD scalar equivalents. Generally speaking, "profitable" is defined
10
// as minimizing the number of cross-class register copies.
11
//===----------------------------------------------------------------------===//
12
13
//===----------------------------------------------------------------------===//
14
// TODO: Graph based predicate heuristics.
15
// Walking the instruction list linearly will get many, perhaps most, of
16
// the cases, but to do a truly thorough job of this, we need a more
17
// wholistic approach.
18
//
19
// This optimization is very similar in spirit to the register allocator's
20
// spill placement, only here we're determining where to place cross-class
21
// register copies rather than spills. As such, a similar approach is
22
// called for.
23
//
24
// We want to build up a set of graphs of all instructions which are candidates
25
// for transformation along with instructions which generate their inputs and
26
// consume their outputs. For each edge in the graph, we assign a weight
27
// based on whether there is a copy required there (weight zero if not) and
28
// the block frequency of the block containing the defining or using
29
// instruction, whichever is less. Our optimization is then a graph problem
30
// to minimize the total weight of all the graphs, then transform instructions
31
// and add or remove copy instructions as called for to implement the
32
// solution.
33
//===----------------------------------------------------------------------===//
34
35
#include "AArch64.h"
36
#include "AArch64InstrInfo.h"
37
#include "AArch64RegisterInfo.h"
38
#include "llvm/ADT/Statistic.h"
39
#include "llvm/CodeGen/MachineFunction.h"
40
#include "llvm/CodeGen/MachineFunctionPass.h"
41
#include "llvm/CodeGen/MachineInstr.h"
42
#include "llvm/CodeGen/MachineInstrBuilder.h"
43
#include "llvm/CodeGen/MachineRegisterInfo.h"
44
#include "llvm/Support/CommandLine.h"
45
#include "llvm/Support/Debug.h"
46
#include "llvm/Support/raw_ostream.h"
47
using namespace llvm;
48
49
#define DEBUG_TYPE "aarch64-simd-scalar"
50
51
// Allow forcing all i64 operations with equivalent SIMD instructions to use
52
// them. For stress-testing the transformation function.
53
static cl::opt<bool>
54
TransformAll("aarch64-simd-scalar-force-all",
55
cl::desc("Force use of AdvSIMD scalar instructions everywhere"),
56
cl::init(false), cl::Hidden);
57
58
STATISTIC(NumScalarInsnsUsed, "Number of scalar instructions used");
59
STATISTIC(NumCopiesDeleted, "Number of cross-class copies deleted");
60
STATISTIC(NumCopiesInserted, "Number of cross-class copies inserted");
61
62
#define AARCH64_ADVSIMD_NAME "AdvSIMD Scalar Operation Optimization"
63
64
namespace {
65
class AArch64AdvSIMDScalar : public MachineFunctionPass {
66
MachineRegisterInfo *MRI;
67
const TargetInstrInfo *TII;
68
69
private:
70
// isProfitableToTransform - Predicate function to determine whether an
71
// instruction should be transformed to its equivalent AdvSIMD scalar
72
// instruction. "add Xd, Xn, Xm" ==> "add Dd, Da, Db", for example.
73
bool isProfitableToTransform(const MachineInstr &MI) const;
74
75
// transformInstruction - Perform the transformation of an instruction
76
// to its equivalant AdvSIMD scalar instruction. Update inputs and outputs
77
// to be the correct register class, minimizing cross-class copies.
78
void transformInstruction(MachineInstr &MI);
79
80
// processMachineBasicBlock - Main optimzation loop.
81
bool processMachineBasicBlock(MachineBasicBlock *MBB);
82
83
public:
84
static char ID; // Pass identification, replacement for typeid.
85
explicit AArch64AdvSIMDScalar() : MachineFunctionPass(ID) {
86
initializeAArch64AdvSIMDScalarPass(*PassRegistry::getPassRegistry());
87
}
88
89
bool runOnMachineFunction(MachineFunction &F) override;
90
91
StringRef getPassName() const override { return AARCH64_ADVSIMD_NAME; }
92
93
void getAnalysisUsage(AnalysisUsage &AU) const override {
94
AU.setPreservesCFG();
95
MachineFunctionPass::getAnalysisUsage(AU);
96
}
97
};
98
char AArch64AdvSIMDScalar::ID = 0;
99
} // end anonymous namespace
100
101
INITIALIZE_PASS(AArch64AdvSIMDScalar, "aarch64-simd-scalar",
102
AARCH64_ADVSIMD_NAME, false, false)
103
104
static bool isGPR64(unsigned Reg, unsigned SubReg,
105
const MachineRegisterInfo *MRI) {
106
if (SubReg)
107
return false;
108
if (Register::isVirtualRegister(Reg))
109
return MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::GPR64RegClass);
110
return AArch64::GPR64RegClass.contains(Reg);
111
}
112
113
static bool isFPR64(unsigned Reg, unsigned SubReg,
114
const MachineRegisterInfo *MRI) {
115
if (Register::isVirtualRegister(Reg))
116
return (MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::FPR64RegClass) &&
117
SubReg == 0) ||
118
(MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::FPR128RegClass) &&
119
SubReg == AArch64::dsub);
120
// Physical register references just check the register class directly.
121
return (AArch64::FPR64RegClass.contains(Reg) && SubReg == 0) ||
122
(AArch64::FPR128RegClass.contains(Reg) && SubReg == AArch64::dsub);
123
}
124
125
// getSrcFromCopy - Get the original source register for a GPR64 <--> FPR64
126
// copy instruction. Return nullptr if the instruction is not a copy.
127
static MachineOperand *getSrcFromCopy(MachineInstr *MI,
128
const MachineRegisterInfo *MRI,
129
unsigned &SubReg) {
130
SubReg = 0;
131
// The "FMOV Xd, Dn" instruction is the typical form.
132
if (MI->getOpcode() == AArch64::FMOVDXr ||
133
MI->getOpcode() == AArch64::FMOVXDr)
134
return &MI->getOperand(1);
135
// A lane zero extract "UMOV.d Xd, Vn[0]" is equivalent. We shouldn't see
136
// these at this stage, but it's easy to check for.
137
if (MI->getOpcode() == AArch64::UMOVvi64 && MI->getOperand(2).getImm() == 0) {
138
SubReg = AArch64::dsub;
139
return &MI->getOperand(1);
140
}
141
// Or just a plain COPY instruction. This can be directly to/from FPR64,
142
// or it can be a dsub subreg reference to an FPR128.
143
if (MI->getOpcode() == AArch64::COPY) {
144
if (isFPR64(MI->getOperand(0).getReg(), MI->getOperand(0).getSubReg(),
145
MRI) &&
146
isGPR64(MI->getOperand(1).getReg(), MI->getOperand(1).getSubReg(), MRI))
147
return &MI->getOperand(1);
148
if (isGPR64(MI->getOperand(0).getReg(), MI->getOperand(0).getSubReg(),
149
MRI) &&
150
isFPR64(MI->getOperand(1).getReg(), MI->getOperand(1).getSubReg(),
151
MRI)) {
152
SubReg = MI->getOperand(1).getSubReg();
153
return &MI->getOperand(1);
154
}
155
}
156
157
// Otherwise, this is some other kind of instruction.
158
return nullptr;
159
}
160
161
// getTransformOpcode - For any opcode for which there is an AdvSIMD equivalent
162
// that we're considering transforming to, return that AdvSIMD opcode. For all
163
// others, return the original opcode.
164
static unsigned getTransformOpcode(unsigned Opc) {
165
switch (Opc) {
166
default:
167
break;
168
// FIXME: Lots more possibilities.
169
case AArch64::ADDXrr:
170
return AArch64::ADDv1i64;
171
case AArch64::SUBXrr:
172
return AArch64::SUBv1i64;
173
case AArch64::ANDXrr:
174
return AArch64::ANDv8i8;
175
case AArch64::EORXrr:
176
return AArch64::EORv8i8;
177
case AArch64::ORRXrr:
178
return AArch64::ORRv8i8;
179
}
180
// No AdvSIMD equivalent, so just return the original opcode.
181
return Opc;
182
}
183
184
static bool isTransformable(const MachineInstr &MI) {
185
unsigned Opc = MI.getOpcode();
186
return Opc != getTransformOpcode(Opc);
187
}
188
189
// isProfitableToTransform - Predicate function to determine whether an
190
// instruction should be transformed to its equivalent AdvSIMD scalar
191
// instruction. "add Xd, Xn, Xm" ==> "add Dd, Da, Db", for example.
192
bool AArch64AdvSIMDScalar::isProfitableToTransform(
193
const MachineInstr &MI) const {
194
// If this instruction isn't eligible to be transformed (no SIMD equivalent),
195
// early exit since that's the common case.
196
if (!isTransformable(MI))
197
return false;
198
199
// Count the number of copies we'll need to add and approximate the number
200
// of copies that a transform will enable us to remove.
201
unsigned NumNewCopies = 3;
202
unsigned NumRemovableCopies = 0;
203
204
Register OrigSrc0 = MI.getOperand(1).getReg();
205
Register OrigSrc1 = MI.getOperand(2).getReg();
206
unsigned SubReg0;
207
unsigned SubReg1;
208
if (!MRI->def_empty(OrigSrc0)) {
209
MachineRegisterInfo::def_instr_iterator Def =
210
MRI->def_instr_begin(OrigSrc0);
211
assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
212
MachineOperand *MOSrc0 = getSrcFromCopy(&*Def, MRI, SubReg0);
213
// If the source was from a copy, we don't need to insert a new copy.
214
if (MOSrc0)
215
--NumNewCopies;
216
// If there are no other users of the original source, we can delete
217
// that instruction.
218
if (MOSrc0 && MRI->hasOneNonDBGUse(OrigSrc0))
219
++NumRemovableCopies;
220
}
221
if (!MRI->def_empty(OrigSrc1)) {
222
MachineRegisterInfo::def_instr_iterator Def =
223
MRI->def_instr_begin(OrigSrc1);
224
assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
225
MachineOperand *MOSrc1 = getSrcFromCopy(&*Def, MRI, SubReg1);
226
if (MOSrc1)
227
--NumNewCopies;
228
// If there are no other users of the original source, we can delete
229
// that instruction.
230
if (MOSrc1 && MRI->hasOneNonDBGUse(OrigSrc1))
231
++NumRemovableCopies;
232
}
233
234
// If any of the uses of the original instructions is a cross class copy,
235
// that's a copy that will be removable if we transform. Likewise, if
236
// any of the uses is a transformable instruction, it's likely the tranforms
237
// will chain, enabling us to save a copy there, too. This is an aggressive
238
// heuristic that approximates the graph based cost analysis described above.
239
Register Dst = MI.getOperand(0).getReg();
240
bool AllUsesAreCopies = true;
241
for (MachineRegisterInfo::use_instr_nodbg_iterator
242
Use = MRI->use_instr_nodbg_begin(Dst),
243
E = MRI->use_instr_nodbg_end();
244
Use != E; ++Use) {
245
unsigned SubReg;
246
if (getSrcFromCopy(&*Use, MRI, SubReg) || isTransformable(*Use))
247
++NumRemovableCopies;
248
// If the use is an INSERT_SUBREG, that's still something that can
249
// directly use the FPR64, so we don't invalidate AllUsesAreCopies. It's
250
// preferable to have it use the FPR64 in most cases, as if the source
251
// vector is an IMPLICIT_DEF, the INSERT_SUBREG just goes away entirely.
252
// Ditto for a lane insert.
253
else if (Use->getOpcode() == AArch64::INSERT_SUBREG ||
254
Use->getOpcode() == AArch64::INSvi64gpr)
255
;
256
else
257
AllUsesAreCopies = false;
258
}
259
// If all of the uses of the original destination register are copies to
260
// FPR64, then we won't end up having a new copy back to GPR64 either.
261
if (AllUsesAreCopies)
262
--NumNewCopies;
263
264
// If a transform will not increase the number of cross-class copies required,
265
// return true.
266
if (NumNewCopies <= NumRemovableCopies)
267
return true;
268
269
// Finally, even if we otherwise wouldn't transform, check if we're forcing
270
// transformation of everything.
271
return TransformAll;
272
}
273
274
static MachineInstr *insertCopy(const TargetInstrInfo *TII, MachineInstr &MI,
275
unsigned Dst, unsigned Src, bool IsKill) {
276
MachineInstrBuilder MIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
277
TII->get(AArch64::COPY), Dst)
278
.addReg(Src, getKillRegState(IsKill));
279
LLVM_DEBUG(dbgs() << " adding copy: " << *MIB);
280
++NumCopiesInserted;
281
return MIB;
282
}
283
284
// transformInstruction - Perform the transformation of an instruction
285
// to its equivalant AdvSIMD scalar instruction. Update inputs and outputs
286
// to be the correct register class, minimizing cross-class copies.
287
void AArch64AdvSIMDScalar::transformInstruction(MachineInstr &MI) {
288
LLVM_DEBUG(dbgs() << "Scalar transform: " << MI);
289
290
MachineBasicBlock *MBB = MI.getParent();
291
unsigned OldOpc = MI.getOpcode();
292
unsigned NewOpc = getTransformOpcode(OldOpc);
293
assert(OldOpc != NewOpc && "transform an instruction to itself?!");
294
295
// Check if we need a copy for the source registers.
296
Register OrigSrc0 = MI.getOperand(1).getReg();
297
Register OrigSrc1 = MI.getOperand(2).getReg();
298
unsigned Src0 = 0, SubReg0;
299
unsigned Src1 = 0, SubReg1;
300
bool KillSrc0 = false, KillSrc1 = false;
301
if (!MRI->def_empty(OrigSrc0)) {
302
MachineRegisterInfo::def_instr_iterator Def =
303
MRI->def_instr_begin(OrigSrc0);
304
assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
305
MachineOperand *MOSrc0 = getSrcFromCopy(&*Def, MRI, SubReg0);
306
// If there are no other users of the original source, we can delete
307
// that instruction.
308
if (MOSrc0) {
309
Src0 = MOSrc0->getReg();
310
KillSrc0 = MOSrc0->isKill();
311
// Src0 is going to be reused, thus, it cannot be killed anymore.
312
MOSrc0->setIsKill(false);
313
if (MRI->hasOneNonDBGUse(OrigSrc0)) {
314
assert(MOSrc0 && "Can't delete copy w/o a valid original source!");
315
Def->eraseFromParent();
316
++NumCopiesDeleted;
317
}
318
}
319
}
320
if (!MRI->def_empty(OrigSrc1)) {
321
MachineRegisterInfo::def_instr_iterator Def =
322
MRI->def_instr_begin(OrigSrc1);
323
assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
324
MachineOperand *MOSrc1 = getSrcFromCopy(&*Def, MRI, SubReg1);
325
// If there are no other users of the original source, we can delete
326
// that instruction.
327
if (MOSrc1) {
328
Src1 = MOSrc1->getReg();
329
KillSrc1 = MOSrc1->isKill();
330
// Src0 is going to be reused, thus, it cannot be killed anymore.
331
MOSrc1->setIsKill(false);
332
if (MRI->hasOneNonDBGUse(OrigSrc1)) {
333
assert(MOSrc1 && "Can't delete copy w/o a valid original source!");
334
Def->eraseFromParent();
335
++NumCopiesDeleted;
336
}
337
}
338
}
339
// If we weren't able to reference the original source directly, create a
340
// copy.
341
if (!Src0) {
342
SubReg0 = 0;
343
Src0 = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
344
insertCopy(TII, MI, Src0, OrigSrc0, KillSrc0);
345
KillSrc0 = true;
346
}
347
if (!Src1) {
348
SubReg1 = 0;
349
Src1 = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
350
insertCopy(TII, MI, Src1, OrigSrc1, KillSrc1);
351
KillSrc1 = true;
352
}
353
354
// Create a vreg for the destination.
355
// FIXME: No need to do this if the ultimate user expects an FPR64.
356
// Check for that and avoid the copy if possible.
357
Register Dst = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
358
359
// For now, all of the new instructions have the same simple three-register
360
// form, so no need to special case based on what instruction we're
361
// building.
362
BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(NewOpc), Dst)
363
.addReg(Src0, getKillRegState(KillSrc0), SubReg0)
364
.addReg(Src1, getKillRegState(KillSrc1), SubReg1);
365
366
// Now copy the result back out to a GPR.
367
// FIXME: Try to avoid this if all uses could actually just use the FPR64
368
// directly.
369
insertCopy(TII, MI, MI.getOperand(0).getReg(), Dst, true);
370
371
// Erase the old instruction.
372
MI.eraseFromParent();
373
374
++NumScalarInsnsUsed;
375
}
376
377
// processMachineBasicBlock - Main optimzation loop.
378
bool AArch64AdvSIMDScalar::processMachineBasicBlock(MachineBasicBlock *MBB) {
379
bool Changed = false;
380
for (MachineInstr &MI : llvm::make_early_inc_range(*MBB)) {
381
if (isProfitableToTransform(MI)) {
382
transformInstruction(MI);
383
Changed = true;
384
}
385
}
386
return Changed;
387
}
388
389
// runOnMachineFunction - Pass entry point from PassManager.
390
bool AArch64AdvSIMDScalar::runOnMachineFunction(MachineFunction &mf) {
391
bool Changed = false;
392
LLVM_DEBUG(dbgs() << "***** AArch64AdvSIMDScalar *****\n");
393
394
if (skipFunction(mf.getFunction()))
395
return false;
396
397
MRI = &mf.getRegInfo();
398
TII = mf.getSubtarget().getInstrInfo();
399
400
// Just check things on a one-block-at-a-time basis.
401
for (MachineBasicBlock &MBB : mf)
402
if (processMachineBasicBlock(&MBB))
403
Changed = true;
404
return Changed;
405
}
406
407
// createAArch64AdvSIMDScalar - Factory function used by AArch64TargetMachine
408
// to add the pass to the PassManager.
409
FunctionPass *llvm::createAArch64AdvSIMDScalar() {
410
return new AArch64AdvSIMDScalar();
411
}
412
413