Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/llvm/lib/Target/X86/GISel/X86RegisterBankInfo.cpp
96383 views
1
//===- X86RegisterBankInfo.cpp -----------------------------------*- C++ -*-==//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
/// \file
9
/// This file implements the targeting of the RegisterBankInfo class for X86.
10
/// \todo This should be generated by TableGen.
11
//===----------------------------------------------------------------------===//
12
13
#include "X86RegisterBankInfo.h"
14
#include "X86InstrInfo.h"
15
#include "X86Subtarget.h"
16
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
17
#include "llvm/CodeGen/GlobalISel/Utils.h"
18
#include "llvm/CodeGen/MachineRegisterInfo.h"
19
#include "llvm/CodeGen/RegisterBank.h"
20
#include "llvm/CodeGen/RegisterBankInfo.h"
21
#include "llvm/CodeGen/TargetRegisterInfo.h"
22
#include "llvm/IR/IntrinsicsX86.h"
23
24
#define GET_TARGET_REGBANK_IMPL
25
#include "X86GenRegisterBank.inc"
26
27
using namespace llvm;
28
// This file will be TableGen'ed at some point.
29
#define GET_TARGET_REGBANK_INFO_IMPL
30
#include "X86GenRegisterBankInfo.def"
31
32
X86RegisterBankInfo::X86RegisterBankInfo(const TargetRegisterInfo &TRI) {
33
34
// validate RegBank initialization.
35
const RegisterBank &RBGPR = getRegBank(X86::GPRRegBankID);
36
(void)RBGPR;
37
assert(&X86::GPRRegBank == &RBGPR && "Incorrect RegBanks inizalization.");
38
39
// The GPR register bank is fully defined by all the registers in
40
// GR64 + its subclasses.
41
assert(RBGPR.covers(*TRI.getRegClass(X86::GR64RegClassID)) &&
42
"Subclass not added?");
43
assert(getMaximumSize(RBGPR.getID()) == 64 &&
44
"GPRs should hold up to 64-bit");
45
}
46
47
const RegisterBank &
48
X86RegisterBankInfo::getRegBankFromRegClass(const TargetRegisterClass &RC,
49
LLT) const {
50
51
if (X86::GR8RegClass.hasSubClassEq(&RC) ||
52
X86::GR16RegClass.hasSubClassEq(&RC) ||
53
X86::GR32RegClass.hasSubClassEq(&RC) ||
54
X86::GR64RegClass.hasSubClassEq(&RC) ||
55
X86::LOW32_ADDR_ACCESSRegClass.hasSubClassEq(&RC) ||
56
X86::LOW32_ADDR_ACCESS_RBPRegClass.hasSubClassEq(&RC))
57
return getRegBank(X86::GPRRegBankID);
58
59
if (X86::FR32XRegClass.hasSubClassEq(&RC) ||
60
X86::FR64XRegClass.hasSubClassEq(&RC) ||
61
X86::VR128XRegClass.hasSubClassEq(&RC) ||
62
X86::VR256XRegClass.hasSubClassEq(&RC) ||
63
X86::VR512RegClass.hasSubClassEq(&RC))
64
return getRegBank(X86::VECRRegBankID);
65
66
if (X86::RFP80RegClass.hasSubClassEq(&RC) ||
67
X86::RFP32RegClass.hasSubClassEq(&RC) ||
68
X86::RFP64RegClass.hasSubClassEq(&RC))
69
return getRegBank(X86::PSRRegBankID);
70
71
llvm_unreachable("Unsupported register kind yet.");
72
}
73
74
// \returns true if a given intrinsic only uses and defines FPRs.
75
static bool isFPIntrinsic(const MachineRegisterInfo &MRI,
76
const MachineInstr &MI) {
77
// TODO: Add more intrinsics.
78
switch (cast<GIntrinsic>(MI).getIntrinsicID()) {
79
default:
80
return false;
81
// SSE1
82
case Intrinsic::x86_sse_rcp_ss:
83
case Intrinsic::x86_sse_rcp_ps:
84
case Intrinsic::x86_sse_rsqrt_ss:
85
case Intrinsic::x86_sse_rsqrt_ps:
86
case Intrinsic::x86_sse_min_ss:
87
case Intrinsic::x86_sse_min_ps:
88
case Intrinsic::x86_sse_max_ss:
89
case Intrinsic::x86_sse_max_ps:
90
return true;
91
}
92
return false;
93
}
94
95
bool X86RegisterBankInfo::hasFPConstraints(const MachineInstr &MI,
96
const MachineRegisterInfo &MRI,
97
const TargetRegisterInfo &TRI,
98
unsigned Depth) const {
99
unsigned Op = MI.getOpcode();
100
if (Op == TargetOpcode::G_INTRINSIC && isFPIntrinsic(MRI, MI))
101
return true;
102
103
// Do we have an explicit floating point instruction?
104
if (isPreISelGenericFloatingPointOpcode(Op))
105
return true;
106
107
// No. Check if we have a copy-like instruction. If we do, then we could
108
// still be fed by floating point instructions.
109
if (Op != TargetOpcode::COPY && !MI.isPHI() &&
110
!isPreISelGenericOptimizationHint(Op))
111
return false;
112
113
// Check if we already know the register bank.
114
auto *RB = getRegBank(MI.getOperand(0).getReg(), MRI, TRI);
115
if (RB == &getRegBank(X86::PSRRegBankID))
116
return true;
117
if (RB == &getRegBank(X86::GPRRegBankID))
118
return false;
119
120
// We don't know anything.
121
//
122
// If we have a phi, we may be able to infer that it will be assigned a fp
123
// type based off of its inputs.
124
if (!MI.isPHI() || Depth > MaxFPRSearchDepth)
125
return false;
126
127
return any_of(MI.explicit_uses(), [&](const MachineOperand &Op) {
128
return Op.isReg() &&
129
onlyDefinesFP(*MRI.getVRegDef(Op.getReg()), MRI, TRI, Depth + 1);
130
});
131
}
132
133
bool X86RegisterBankInfo::onlyUsesFP(const MachineInstr &MI,
134
const MachineRegisterInfo &MRI,
135
const TargetRegisterInfo &TRI,
136
unsigned Depth) const {
137
switch (MI.getOpcode()) {
138
case TargetOpcode::G_FPTOSI:
139
case TargetOpcode::G_FPTOUI:
140
case TargetOpcode::G_FCMP:
141
case TargetOpcode::G_LROUND:
142
case TargetOpcode::G_LLROUND:
143
case TargetOpcode::G_INTRINSIC_TRUNC:
144
case TargetOpcode::G_INTRINSIC_ROUND:
145
return true;
146
default:
147
break;
148
}
149
return hasFPConstraints(MI, MRI, TRI, Depth);
150
}
151
152
bool X86RegisterBankInfo::onlyDefinesFP(const MachineInstr &MI,
153
const MachineRegisterInfo &MRI,
154
const TargetRegisterInfo &TRI,
155
unsigned Depth) const {
156
switch (MI.getOpcode()) {
157
case TargetOpcode::G_SITOFP:
158
case TargetOpcode::G_UITOFP:
159
return true;
160
default:
161
break;
162
}
163
return hasFPConstraints(MI, MRI, TRI, Depth);
164
}
165
166
X86GenRegisterBankInfo::PartialMappingIdx
167
X86GenRegisterBankInfo::getPartialMappingIdx(const MachineInstr &MI,
168
const LLT &Ty, bool isFP) {
169
const MachineFunction *MF = MI.getMF();
170
const X86Subtarget *ST = &MF->getSubtarget<X86Subtarget>();
171
bool HasSSE1 = ST->hasSSE1();
172
bool HasSSE2 = ST->hasSSE2();
173
// 80 bits is only generated for X87 floating points.
174
if (Ty.getSizeInBits() == 80)
175
isFP = true;
176
if ((Ty.isScalar() && !isFP) || Ty.isPointer()) {
177
switch (Ty.getSizeInBits()) {
178
case 1:
179
case 8:
180
return PMI_GPR8;
181
case 16:
182
return PMI_GPR16;
183
case 32:
184
return PMI_GPR32;
185
case 64:
186
return PMI_GPR64;
187
case 128:
188
return PMI_VEC128;
189
break;
190
default:
191
llvm_unreachable("Unsupported register size.");
192
}
193
} else if (Ty.isScalar()) {
194
switch (Ty.getSizeInBits()) {
195
case 32:
196
return HasSSE1 ? PMI_FP32 : PMI_PSR32;
197
case 64:
198
return HasSSE2 ? PMI_FP64 : PMI_PSR64;
199
case 128:
200
return PMI_VEC128;
201
case 80:
202
return PMI_PSR80;
203
default:
204
llvm_unreachable("Unsupported register size.");
205
}
206
} else {
207
switch (Ty.getSizeInBits()) {
208
case 128:
209
return PMI_VEC128;
210
case 256:
211
return PMI_VEC256;
212
case 512:
213
return PMI_VEC512;
214
default:
215
llvm_unreachable("Unsupported register size.");
216
}
217
}
218
219
return PMI_None;
220
}
221
222
void X86RegisterBankInfo::getInstrPartialMappingIdxs(
223
const MachineInstr &MI, const MachineRegisterInfo &MRI, const bool isFP,
224
SmallVectorImpl<PartialMappingIdx> &OpRegBankIdx) {
225
226
unsigned NumOperands = MI.getNumOperands();
227
for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
228
auto &MO = MI.getOperand(Idx);
229
if (!MO.isReg() || !MO.getReg())
230
OpRegBankIdx[Idx] = PMI_None;
231
else
232
OpRegBankIdx[Idx] =
233
getPartialMappingIdx(MI, MRI.getType(MO.getReg()), isFP);
234
}
235
}
236
237
bool X86RegisterBankInfo::getInstrValueMapping(
238
const MachineInstr &MI,
239
const SmallVectorImpl<PartialMappingIdx> &OpRegBankIdx,
240
SmallVectorImpl<const ValueMapping *> &OpdsMapping) {
241
242
unsigned NumOperands = MI.getNumOperands();
243
for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
244
if (!MI.getOperand(Idx).isReg())
245
continue;
246
if (!MI.getOperand(Idx).getReg())
247
continue;
248
249
auto Mapping = getValueMapping(OpRegBankIdx[Idx], 1);
250
if (!Mapping->isValid())
251
return false;
252
253
OpdsMapping[Idx] = Mapping;
254
}
255
return true;
256
}
257
258
const RegisterBankInfo::InstructionMapping &
259
X86RegisterBankInfo::getSameOperandsMapping(const MachineInstr &MI,
260
bool isFP) const {
261
const MachineFunction &MF = *MI.getParent()->getParent();
262
const MachineRegisterInfo &MRI = MF.getRegInfo();
263
264
unsigned NumOperands = MI.getNumOperands();
265
LLT Ty = MRI.getType(MI.getOperand(0).getReg());
266
267
if (NumOperands != 3 || (Ty != MRI.getType(MI.getOperand(1).getReg())) ||
268
(Ty != MRI.getType(MI.getOperand(2).getReg())))
269
llvm_unreachable("Unsupported operand mapping yet.");
270
271
auto Mapping = getValueMapping(getPartialMappingIdx(MI, Ty, isFP), 3);
272
return getInstructionMapping(DefaultMappingID, 1, Mapping, NumOperands);
273
}
274
275
const RegisterBankInfo::InstructionMapping &
276
X86RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
277
const MachineFunction &MF = *MI.getParent()->getParent();
278
const TargetSubtargetInfo &STI = MF.getSubtarget();
279
const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
280
const MachineRegisterInfo &MRI = MF.getRegInfo();
281
unsigned Opc = MI.getOpcode();
282
283
// Try the default logic for non-generic instructions that are either
284
// copies or already have some operands assigned to banks.
285
if (!isPreISelGenericOpcode(Opc) || Opc == TargetOpcode::G_PHI) {
286
const InstructionMapping &Mapping = getInstrMappingImpl(MI);
287
if (Mapping.isValid())
288
return Mapping;
289
}
290
291
switch (Opc) {
292
case TargetOpcode::G_ADD:
293
case TargetOpcode::G_SUB:
294
case TargetOpcode::G_MUL:
295
return getSameOperandsMapping(MI, false);
296
case TargetOpcode::G_FADD:
297
case TargetOpcode::G_FSUB:
298
case TargetOpcode::G_FMUL:
299
case TargetOpcode::G_FDIV:
300
return getSameOperandsMapping(MI, true);
301
case TargetOpcode::G_SHL:
302
case TargetOpcode::G_LSHR:
303
case TargetOpcode::G_ASHR: {
304
unsigned NumOperands = MI.getNumOperands();
305
LLT Ty = MRI.getType(MI.getOperand(0).getReg());
306
307
auto Mapping = getValueMapping(getPartialMappingIdx(MI, Ty, false), 3);
308
return getInstructionMapping(DefaultMappingID, 1, Mapping, NumOperands);
309
}
310
default:
311
break;
312
}
313
314
unsigned NumOperands = MI.getNumOperands();
315
SmallVector<PartialMappingIdx, 4> OpRegBankIdx(NumOperands);
316
317
switch (Opc) {
318
case TargetOpcode::G_FPEXT:
319
case TargetOpcode::G_FPTRUNC:
320
case TargetOpcode::G_FCONSTANT:
321
// Instruction having only floating-point operands (all scalars in
322
// VECRReg)
323
getInstrPartialMappingIdxs(MI, MRI, /* isFP= */ true, OpRegBankIdx);
324
break;
325
case TargetOpcode::G_SITOFP:
326
case TargetOpcode::G_FPTOSI: {
327
// Some of the floating-point instructions have mixed GPR and FP
328
// operands: fine-tune the computed mapping.
329
auto &Op0 = MI.getOperand(0);
330
auto &Op1 = MI.getOperand(1);
331
const LLT Ty0 = MRI.getType(Op0.getReg());
332
const LLT Ty1 = MRI.getType(Op1.getReg());
333
334
bool FirstArgIsFP = Opc == TargetOpcode::G_SITOFP;
335
bool SecondArgIsFP = Opc == TargetOpcode::G_FPTOSI;
336
OpRegBankIdx[0] = getPartialMappingIdx(MI, Ty0, /* isFP= */ FirstArgIsFP);
337
OpRegBankIdx[1] = getPartialMappingIdx(MI, Ty1, /* isFP= */ SecondArgIsFP);
338
break;
339
}
340
case TargetOpcode::G_FCMP: {
341
LLT Ty1 = MRI.getType(MI.getOperand(2).getReg());
342
LLT Ty2 = MRI.getType(MI.getOperand(3).getReg());
343
(void)Ty2;
344
assert(Ty1.getSizeInBits() == Ty2.getSizeInBits() &&
345
"Mismatched operand sizes for G_FCMP");
346
347
unsigned Size = Ty1.getSizeInBits();
348
(void)Size;
349
assert((Size == 32 || Size == 64) && "Unsupported size for G_FCMP");
350
351
auto FpRegBank = getPartialMappingIdx(MI, Ty1, /* isFP= */ true);
352
OpRegBankIdx = {PMI_GPR8,
353
/* Predicate */ PMI_None, FpRegBank, FpRegBank};
354
break;
355
}
356
case TargetOpcode::G_TRUNC:
357
case TargetOpcode::G_ANYEXT: {
358
auto &Op0 = MI.getOperand(0);
359
auto &Op1 = MI.getOperand(1);
360
const LLT Ty0 = MRI.getType(Op0.getReg());
361
const LLT Ty1 = MRI.getType(Op1.getReg());
362
363
bool isFPTrunc = (Ty0.getSizeInBits() == 32 || Ty0.getSizeInBits() == 64) &&
364
Ty1.getSizeInBits() == 128 && Opc == TargetOpcode::G_TRUNC;
365
bool isFPAnyExt =
366
Ty0.getSizeInBits() == 128 &&
367
(Ty1.getSizeInBits() == 32 || Ty1.getSizeInBits() == 64) &&
368
Opc == TargetOpcode::G_ANYEXT;
369
370
getInstrPartialMappingIdxs(MI, MRI, /* isFP= */ isFPTrunc || isFPAnyExt,
371
OpRegBankIdx);
372
break;
373
}
374
case TargetOpcode::G_LOAD: {
375
// Check if that load feeds fp instructions.
376
// In that case, we want the default mapping to be on FPR
377
// instead of blind map every scalar to GPR.
378
bool IsFP = any_of(MRI.use_nodbg_instructions(cast<GLoad>(MI).getDstReg()),
379
[&](const MachineInstr &UseMI) {
380
// If we have at least one direct use in a FP
381
// instruction, assume this was a floating point load
382
// in the IR. If it was not, we would have had a
383
// bitcast before reaching that instruction.
384
return onlyUsesFP(UseMI, MRI, TRI);
385
});
386
getInstrPartialMappingIdxs(MI, MRI, IsFP, OpRegBankIdx);
387
break;
388
}
389
case TargetOpcode::G_STORE: {
390
// Check if that store is fed by fp instructions.
391
Register VReg = cast<GStore>(MI).getValueReg();
392
if (!VReg)
393
break;
394
MachineInstr *DefMI = MRI.getVRegDef(VReg);
395
bool IsFP = onlyDefinesFP(*DefMI, MRI, TRI);
396
getInstrPartialMappingIdxs(MI, MRI, IsFP, OpRegBankIdx);
397
break;
398
}
399
default:
400
// Track the bank of each register, use NotFP mapping (all scalars in
401
// GPRs)
402
getInstrPartialMappingIdxs(MI, MRI, /* isFP= */ false, OpRegBankIdx);
403
break;
404
}
405
406
// Finally construct the computed mapping.
407
SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
408
if (!getInstrValueMapping(MI, OpRegBankIdx, OpdsMapping))
409
return getInvalidInstructionMapping();
410
411
return getInstructionMapping(DefaultMappingID, /* Cost */ 1,
412
getOperandsMapping(OpdsMapping), NumOperands);
413
}
414
415
void X86RegisterBankInfo::applyMappingImpl(
416
MachineIRBuilder &Builder, const OperandsMapper &OpdMapper) const {
417
return applyDefaultMapping(OpdMapper);
418
}
419
420
RegisterBankInfo::InstructionMappings
421
X86RegisterBankInfo::getInstrAlternativeMappings(const MachineInstr &MI) const {
422
423
const MachineFunction &MF = *MI.getParent()->getParent();
424
const TargetSubtargetInfo &STI = MF.getSubtarget();
425
const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
426
const MachineRegisterInfo &MRI = MF.getRegInfo();
427
428
switch (MI.getOpcode()) {
429
case TargetOpcode::G_LOAD:
430
case TargetOpcode::G_STORE:
431
case TargetOpcode::G_IMPLICIT_DEF: {
432
// we going to try to map 32/64/80 bit to PMI_FP32/PMI_FP64/PMI_FP80
433
unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
434
if (Size != 32 && Size != 64 && Size != 80)
435
break;
436
437
unsigned NumOperands = MI.getNumOperands();
438
439
// Track the bank of each register, use FP mapping (all scalars in VEC)
440
SmallVector<PartialMappingIdx, 4> OpRegBankIdx(NumOperands);
441
getInstrPartialMappingIdxs(MI, MRI, /* isFP= */ true, OpRegBankIdx);
442
443
// Finally construct the computed mapping.
444
SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
445
if (!getInstrValueMapping(MI, OpRegBankIdx, OpdsMapping))
446
break;
447
448
const RegisterBankInfo::InstructionMapping &Mapping = getInstructionMapping(
449
/*ID*/ 1, /*Cost*/ 1, getOperandsMapping(OpdsMapping), NumOperands);
450
InstructionMappings AltMappings;
451
AltMappings.push_back(&Mapping);
452
return AltMappings;
453
}
454
default:
455
break;
456
}
457
return RegisterBankInfo::getInstrAlternativeMappings(MI);
458
}
459
460