Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/llvm/lib/Target/ARM/ARMLegalizerInfo.cpp
35269 views
1
//===- ARMLegalizerInfo.cpp --------------------------------------*- C++ -*-==//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
/// \file
9
/// This file implements the targeting of the Machinelegalizer class for ARM.
10
/// \todo This should be generated by TableGen.
11
//===----------------------------------------------------------------------===//
12
13
#include "ARMLegalizerInfo.h"
14
#include "ARMCallLowering.h"
15
#include "ARMSubtarget.h"
16
#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
17
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18
#include "llvm/CodeGen/LowLevelTypeUtils.h"
19
#include "llvm/CodeGen/MachineRegisterInfo.h"
20
#include "llvm/CodeGen/TargetOpcodes.h"
21
#include "llvm/CodeGen/ValueTypes.h"
22
#include "llvm/IR/DerivedTypes.h"
23
#include "llvm/IR/Type.h"
24
25
using namespace llvm;
26
using namespace LegalizeActions;
27
28
static bool AEABI(const ARMSubtarget &ST) {
29
return ST.isTargetAEABI() || ST.isTargetGNUAEABI() || ST.isTargetMuslAEABI();
30
}
31
32
ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) {
33
using namespace TargetOpcode;
34
35
const LLT p0 = LLT::pointer(0, 32);
36
37
const LLT s1 = LLT::scalar(1);
38
const LLT s8 = LLT::scalar(8);
39
const LLT s16 = LLT::scalar(16);
40
const LLT s32 = LLT::scalar(32);
41
const LLT s64 = LLT::scalar(64);
42
43
auto &LegacyInfo = getLegacyLegalizerInfo();
44
if (ST.isThumb1Only()) {
45
// Thumb1 is not supported yet.
46
LegacyInfo.computeTables();
47
verify(*ST.getInstrInfo());
48
return;
49
}
50
51
getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT})
52
.legalForCartesianProduct({s8, s16, s32}, {s1, s8, s16});
53
54
getActionDefinitionsBuilder(G_SEXT_INREG).lower();
55
56
getActionDefinitionsBuilder({G_MUL, G_AND, G_OR, G_XOR})
57
.legalFor({s32})
58
.clampScalar(0, s32, s32);
59
60
if (ST.hasNEON())
61
getActionDefinitionsBuilder({G_ADD, G_SUB})
62
.legalFor({s32, s64})
63
.minScalar(0, s32);
64
else
65
getActionDefinitionsBuilder({G_ADD, G_SUB})
66
.legalFor({s32})
67
.minScalar(0, s32);
68
69
getActionDefinitionsBuilder({G_ASHR, G_LSHR, G_SHL})
70
.legalFor({{s32, s32}})
71
.minScalar(0, s32)
72
.clampScalar(1, s32, s32);
73
74
bool HasHWDivide = (!ST.isThumb() && ST.hasDivideInARMMode()) ||
75
(ST.isThumb() && ST.hasDivideInThumbMode());
76
if (HasHWDivide)
77
getActionDefinitionsBuilder({G_SDIV, G_UDIV})
78
.legalFor({s32})
79
.clampScalar(0, s32, s32);
80
else
81
getActionDefinitionsBuilder({G_SDIV, G_UDIV})
82
.libcallFor({s32})
83
.clampScalar(0, s32, s32);
84
85
auto &REMBuilder =
86
getActionDefinitionsBuilder({G_SREM, G_UREM}).minScalar(0, s32);
87
if (HasHWDivide)
88
REMBuilder.lowerFor({s32});
89
else if (AEABI(ST))
90
REMBuilder.customFor({s32});
91
else
92
REMBuilder.libcallFor({s32});
93
94
getActionDefinitionsBuilder(G_INTTOPTR)
95
.legalFor({{p0, s32}})
96
.minScalar(1, s32);
97
getActionDefinitionsBuilder(G_PTRTOINT)
98
.legalFor({{s32, p0}})
99
.minScalar(0, s32);
100
101
getActionDefinitionsBuilder(G_CONSTANT)
102
.legalFor({s32, p0})
103
.clampScalar(0, s32, s32);
104
105
getActionDefinitionsBuilder(G_ICMP)
106
.legalForCartesianProduct({s1}, {s32, p0})
107
.minScalar(1, s32);
108
109
getActionDefinitionsBuilder(G_SELECT)
110
.legalForCartesianProduct({s32, p0}, {s1})
111
.minScalar(0, s32);
112
113
// We're keeping these builders around because we'll want to add support for
114
// floating point to them.
115
auto &LoadStoreBuilder = getActionDefinitionsBuilder({G_LOAD, G_STORE})
116
.legalForTypesWithMemDesc({{s8, p0, s8, 8},
117
{s16, p0, s16, 8},
118
{s32, p0, s32, 8},
119
{p0, p0, p0, 8}})
120
.unsupportedIfMemSizeNotPow2();
121
122
getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});
123
getActionDefinitionsBuilder(G_GLOBAL_VALUE).legalFor({p0});
124
125
auto &PhiBuilder =
126
getActionDefinitionsBuilder(G_PHI)
127
.legalFor({s32, p0})
128
.minScalar(0, s32);
129
130
getActionDefinitionsBuilder(G_PTR_ADD)
131
.legalFor({{p0, s32}})
132
.minScalar(1, s32);
133
134
getActionDefinitionsBuilder(G_BRCOND).legalFor({s1});
135
136
if (!ST.useSoftFloat() && ST.hasVFP2Base()) {
137
getActionDefinitionsBuilder(
138
{G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FCONSTANT, G_FNEG})
139
.legalFor({s32, s64});
140
141
LoadStoreBuilder
142
.legalForTypesWithMemDesc({{s64, p0, s64, 32}})
143
.maxScalar(0, s32);
144
PhiBuilder.legalFor({s64});
145
146
getActionDefinitionsBuilder(G_FCMP).legalForCartesianProduct({s1},
147
{s32, s64});
148
149
getActionDefinitionsBuilder(G_MERGE_VALUES).legalFor({{s64, s32}});
150
getActionDefinitionsBuilder(G_UNMERGE_VALUES).legalFor({{s32, s64}});
151
152
getActionDefinitionsBuilder(G_FPEXT).legalFor({{s64, s32}});
153
getActionDefinitionsBuilder(G_FPTRUNC).legalFor({{s32, s64}});
154
155
getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
156
.legalForCartesianProduct({s32}, {s32, s64});
157
getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
158
.legalForCartesianProduct({s32, s64}, {s32});
159
160
getActionDefinitionsBuilder({G_GET_FPENV, G_SET_FPENV, G_GET_FPMODE})
161
.legalFor({s32});
162
getActionDefinitionsBuilder(G_RESET_FPENV).alwaysLegal();
163
getActionDefinitionsBuilder(G_SET_FPMODE).customFor({s32});
164
} else {
165
getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV})
166
.libcallFor({s32, s64});
167
168
LoadStoreBuilder.maxScalar(0, s32);
169
170
getActionDefinitionsBuilder(G_FNEG).lowerFor({s32, s64});
171
172
getActionDefinitionsBuilder(G_FCONSTANT).customFor({s32, s64});
173
174
getActionDefinitionsBuilder(G_FCMP).customForCartesianProduct({s1},
175
{s32, s64});
176
177
if (AEABI(ST))
178
setFCmpLibcallsAEABI();
179
else
180
setFCmpLibcallsGNU();
181
182
getActionDefinitionsBuilder(G_FPEXT).libcallFor({{s64, s32}});
183
getActionDefinitionsBuilder(G_FPTRUNC).libcallFor({{s32, s64}});
184
185
getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
186
.libcallForCartesianProduct({s32}, {s32, s64});
187
getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
188
.libcallForCartesianProduct({s32, s64}, {s32});
189
190
getActionDefinitionsBuilder({G_GET_FPENV, G_SET_FPENV, G_RESET_FPENV})
191
.libcall();
192
getActionDefinitionsBuilder({G_GET_FPMODE, G_SET_FPMODE, G_RESET_FPMODE})
193
.libcall();
194
}
195
196
// Just expand whatever loads and stores are left.
197
LoadStoreBuilder.lower();
198
199
if (!ST.useSoftFloat() && ST.hasVFP4Base())
200
getActionDefinitionsBuilder(G_FMA).legalFor({s32, s64});
201
else
202
getActionDefinitionsBuilder(G_FMA).libcallFor({s32, s64});
203
204
getActionDefinitionsBuilder({G_FREM, G_FPOW}).libcallFor({s32, s64});
205
206
if (ST.hasV5TOps()) {
207
getActionDefinitionsBuilder(G_CTLZ)
208
.legalFor({s32, s32})
209
.clampScalar(1, s32, s32)
210
.clampScalar(0, s32, s32);
211
getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF)
212
.lowerFor({s32, s32})
213
.clampScalar(1, s32, s32)
214
.clampScalar(0, s32, s32);
215
} else {
216
getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF)
217
.libcallFor({s32, s32})
218
.clampScalar(1, s32, s32)
219
.clampScalar(0, s32, s32);
220
getActionDefinitionsBuilder(G_CTLZ)
221
.lowerFor({s32, s32})
222
.clampScalar(1, s32, s32)
223
.clampScalar(0, s32, s32);
224
}
225
226
LegacyInfo.computeTables();
227
verify(*ST.getInstrInfo());
228
}
229
230
void ARMLegalizerInfo::setFCmpLibcallsAEABI() {
231
// FCMP_TRUE and FCMP_FALSE don't need libcalls, they should be
232
// default-initialized.
233
FCmp32Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1);
234
FCmp32Libcalls[CmpInst::FCMP_OEQ] = {
235
{RTLIB::OEQ_F32, CmpInst::BAD_ICMP_PREDICATE}};
236
FCmp32Libcalls[CmpInst::FCMP_OGE] = {
237
{RTLIB::OGE_F32, CmpInst::BAD_ICMP_PREDICATE}};
238
FCmp32Libcalls[CmpInst::FCMP_OGT] = {
239
{RTLIB::OGT_F32, CmpInst::BAD_ICMP_PREDICATE}};
240
FCmp32Libcalls[CmpInst::FCMP_OLE] = {
241
{RTLIB::OLE_F32, CmpInst::BAD_ICMP_PREDICATE}};
242
FCmp32Libcalls[CmpInst::FCMP_OLT] = {
243
{RTLIB::OLT_F32, CmpInst::BAD_ICMP_PREDICATE}};
244
FCmp32Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::UO_F32, CmpInst::ICMP_EQ}};
245
FCmp32Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F32, CmpInst::ICMP_EQ}};
246
FCmp32Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F32, CmpInst::ICMP_EQ}};
247
FCmp32Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F32, CmpInst::ICMP_EQ}};
248
FCmp32Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F32, CmpInst::ICMP_EQ}};
249
FCmp32Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F32, CmpInst::ICMP_EQ}};
250
FCmp32Libcalls[CmpInst::FCMP_UNO] = {
251
{RTLIB::UO_F32, CmpInst::BAD_ICMP_PREDICATE}};
252
FCmp32Libcalls[CmpInst::FCMP_ONE] = {
253
{RTLIB::OGT_F32, CmpInst::BAD_ICMP_PREDICATE},
254
{RTLIB::OLT_F32, CmpInst::BAD_ICMP_PREDICATE}};
255
FCmp32Libcalls[CmpInst::FCMP_UEQ] = {
256
{RTLIB::OEQ_F32, CmpInst::BAD_ICMP_PREDICATE},
257
{RTLIB::UO_F32, CmpInst::BAD_ICMP_PREDICATE}};
258
259
FCmp64Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1);
260
FCmp64Libcalls[CmpInst::FCMP_OEQ] = {
261
{RTLIB::OEQ_F64, CmpInst::BAD_ICMP_PREDICATE}};
262
FCmp64Libcalls[CmpInst::FCMP_OGE] = {
263
{RTLIB::OGE_F64, CmpInst::BAD_ICMP_PREDICATE}};
264
FCmp64Libcalls[CmpInst::FCMP_OGT] = {
265
{RTLIB::OGT_F64, CmpInst::BAD_ICMP_PREDICATE}};
266
FCmp64Libcalls[CmpInst::FCMP_OLE] = {
267
{RTLIB::OLE_F64, CmpInst::BAD_ICMP_PREDICATE}};
268
FCmp64Libcalls[CmpInst::FCMP_OLT] = {
269
{RTLIB::OLT_F64, CmpInst::BAD_ICMP_PREDICATE}};
270
FCmp64Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::UO_F64, CmpInst::ICMP_EQ}};
271
FCmp64Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F64, CmpInst::ICMP_EQ}};
272
FCmp64Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F64, CmpInst::ICMP_EQ}};
273
FCmp64Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F64, CmpInst::ICMP_EQ}};
274
FCmp64Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F64, CmpInst::ICMP_EQ}};
275
FCmp64Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F64, CmpInst::ICMP_EQ}};
276
FCmp64Libcalls[CmpInst::FCMP_UNO] = {
277
{RTLIB::UO_F64, CmpInst::BAD_ICMP_PREDICATE}};
278
FCmp64Libcalls[CmpInst::FCMP_ONE] = {
279
{RTLIB::OGT_F64, CmpInst::BAD_ICMP_PREDICATE},
280
{RTLIB::OLT_F64, CmpInst::BAD_ICMP_PREDICATE}};
281
FCmp64Libcalls[CmpInst::FCMP_UEQ] = {
282
{RTLIB::OEQ_F64, CmpInst::BAD_ICMP_PREDICATE},
283
{RTLIB::UO_F64, CmpInst::BAD_ICMP_PREDICATE}};
284
}
285
286
void ARMLegalizerInfo::setFCmpLibcallsGNU() {
287
// FCMP_TRUE and FCMP_FALSE don't need libcalls, they should be
288
// default-initialized.
289
FCmp32Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1);
290
FCmp32Libcalls[CmpInst::FCMP_OEQ] = {{RTLIB::OEQ_F32, CmpInst::ICMP_EQ}};
291
FCmp32Libcalls[CmpInst::FCMP_OGE] = {{RTLIB::OGE_F32, CmpInst::ICMP_SGE}};
292
FCmp32Libcalls[CmpInst::FCMP_OGT] = {{RTLIB::OGT_F32, CmpInst::ICMP_SGT}};
293
FCmp32Libcalls[CmpInst::FCMP_OLE] = {{RTLIB::OLE_F32, CmpInst::ICMP_SLE}};
294
FCmp32Libcalls[CmpInst::FCMP_OLT] = {{RTLIB::OLT_F32, CmpInst::ICMP_SLT}};
295
FCmp32Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::UO_F32, CmpInst::ICMP_EQ}};
296
FCmp32Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F32, CmpInst::ICMP_SGE}};
297
FCmp32Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F32, CmpInst::ICMP_SGT}};
298
FCmp32Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F32, CmpInst::ICMP_SLE}};
299
FCmp32Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F32, CmpInst::ICMP_SLT}};
300
FCmp32Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F32, CmpInst::ICMP_NE}};
301
FCmp32Libcalls[CmpInst::FCMP_UNO] = {{RTLIB::UO_F32, CmpInst::ICMP_NE}};
302
FCmp32Libcalls[CmpInst::FCMP_ONE] = {{RTLIB::OGT_F32, CmpInst::ICMP_SGT},
303
{RTLIB::OLT_F32, CmpInst::ICMP_SLT}};
304
FCmp32Libcalls[CmpInst::FCMP_UEQ] = {{RTLIB::OEQ_F32, CmpInst::ICMP_EQ},
305
{RTLIB::UO_F32, CmpInst::ICMP_NE}};
306
307
FCmp64Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1);
308
FCmp64Libcalls[CmpInst::FCMP_OEQ] = {{RTLIB::OEQ_F64, CmpInst::ICMP_EQ}};
309
FCmp64Libcalls[CmpInst::FCMP_OGE] = {{RTLIB::OGE_F64, CmpInst::ICMP_SGE}};
310
FCmp64Libcalls[CmpInst::FCMP_OGT] = {{RTLIB::OGT_F64, CmpInst::ICMP_SGT}};
311
FCmp64Libcalls[CmpInst::FCMP_OLE] = {{RTLIB::OLE_F64, CmpInst::ICMP_SLE}};
312
FCmp64Libcalls[CmpInst::FCMP_OLT] = {{RTLIB::OLT_F64, CmpInst::ICMP_SLT}};
313
FCmp64Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::UO_F64, CmpInst::ICMP_EQ}};
314
FCmp64Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F64, CmpInst::ICMP_SGE}};
315
FCmp64Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F64, CmpInst::ICMP_SGT}};
316
FCmp64Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F64, CmpInst::ICMP_SLE}};
317
FCmp64Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F64, CmpInst::ICMP_SLT}};
318
FCmp64Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F64, CmpInst::ICMP_NE}};
319
FCmp64Libcalls[CmpInst::FCMP_UNO] = {{RTLIB::UO_F64, CmpInst::ICMP_NE}};
320
FCmp64Libcalls[CmpInst::FCMP_ONE] = {{RTLIB::OGT_F64, CmpInst::ICMP_SGT},
321
{RTLIB::OLT_F64, CmpInst::ICMP_SLT}};
322
FCmp64Libcalls[CmpInst::FCMP_UEQ] = {{RTLIB::OEQ_F64, CmpInst::ICMP_EQ},
323
{RTLIB::UO_F64, CmpInst::ICMP_NE}};
324
}
325
326
ARMLegalizerInfo::FCmpLibcallsList
327
ARMLegalizerInfo::getFCmpLibcalls(CmpInst::Predicate Predicate,
328
unsigned Size) const {
329
assert(CmpInst::isFPPredicate(Predicate) && "Unsupported FCmp predicate");
330
if (Size == 32)
331
return FCmp32Libcalls[Predicate];
332
if (Size == 64)
333
return FCmp64Libcalls[Predicate];
334
llvm_unreachable("Unsupported size for FCmp predicate");
335
}
336
337
bool ARMLegalizerInfo::legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI,
338
LostDebugLocObserver &LocObserver) const {
339
using namespace TargetOpcode;
340
341
MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
342
MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
343
LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
344
345
switch (MI.getOpcode()) {
346
default:
347
return false;
348
case G_SREM:
349
case G_UREM: {
350
Register OriginalResult = MI.getOperand(0).getReg();
351
auto Size = MRI.getType(OriginalResult).getSizeInBits();
352
if (Size != 32)
353
return false;
354
355
auto Libcall =
356
MI.getOpcode() == G_SREM ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
357
358
// Our divmod libcalls return a struct containing the quotient and the
359
// remainder. Create a new, unused register for the quotient and use the
360
// destination of the original instruction for the remainder.
361
Type *ArgTy = Type::getInt32Ty(Ctx);
362
StructType *RetTy = StructType::get(Ctx, {ArgTy, ArgTy}, /* Packed */ true);
363
Register RetRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
364
OriginalResult};
365
auto Status = createLibcall(MIRBuilder, Libcall, {RetRegs, RetTy, 0},
366
{{MI.getOperand(1).getReg(), ArgTy, 0},
367
{MI.getOperand(2).getReg(), ArgTy, 0}},
368
LocObserver, &MI);
369
if (Status != LegalizerHelper::Legalized)
370
return false;
371
break;
372
}
373
case G_FCMP: {
374
assert(MRI.getType(MI.getOperand(2).getReg()) ==
375
MRI.getType(MI.getOperand(3).getReg()) &&
376
"Mismatched operands for G_FCMP");
377
auto OpSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits();
378
379
auto OriginalResult = MI.getOperand(0).getReg();
380
auto Predicate =
381
static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
382
auto Libcalls = getFCmpLibcalls(Predicate, OpSize);
383
384
if (Libcalls.empty()) {
385
assert((Predicate == CmpInst::FCMP_TRUE ||
386
Predicate == CmpInst::FCMP_FALSE) &&
387
"Predicate needs libcalls, but none specified");
388
MIRBuilder.buildConstant(OriginalResult,
389
Predicate == CmpInst::FCMP_TRUE ? 1 : 0);
390
MI.eraseFromParent();
391
return true;
392
}
393
394
assert((OpSize == 32 || OpSize == 64) && "Unsupported operand size");
395
auto *ArgTy = OpSize == 32 ? Type::getFloatTy(Ctx) : Type::getDoubleTy(Ctx);
396
auto *RetTy = Type::getInt32Ty(Ctx);
397
398
SmallVector<Register, 2> Results;
399
for (auto Libcall : Libcalls) {
400
auto LibcallResult = MRI.createGenericVirtualRegister(LLT::scalar(32));
401
auto Status = createLibcall(MIRBuilder, Libcall.LibcallID,
402
{LibcallResult, RetTy, 0},
403
{{MI.getOperand(2).getReg(), ArgTy, 0},
404
{MI.getOperand(3).getReg(), ArgTy, 0}},
405
LocObserver, &MI);
406
407
if (Status != LegalizerHelper::Legalized)
408
return false;
409
410
auto ProcessedResult =
411
Libcalls.size() == 1
412
? OriginalResult
413
: MRI.createGenericVirtualRegister(MRI.getType(OriginalResult));
414
415
// We have a result, but we need to transform it into a proper 1-bit 0 or
416
// 1, taking into account the different peculiarities of the values
417
// returned by the comparison functions.
418
CmpInst::Predicate ResultPred = Libcall.Predicate;
419
if (ResultPred == CmpInst::BAD_ICMP_PREDICATE) {
420
// We have a nice 0 or 1, and we just need to truncate it back to 1 bit
421
// to keep the types consistent.
422
MIRBuilder.buildTrunc(ProcessedResult, LibcallResult);
423
} else {
424
// We need to compare against 0.
425
assert(CmpInst::isIntPredicate(ResultPred) && "Unsupported predicate");
426
auto Zero = MIRBuilder.buildConstant(LLT::scalar(32), 0);
427
MIRBuilder.buildICmp(ResultPred, ProcessedResult, LibcallResult, Zero);
428
}
429
Results.push_back(ProcessedResult);
430
}
431
432
if (Results.size() != 1) {
433
assert(Results.size() == 2 && "Unexpected number of results");
434
MIRBuilder.buildOr(OriginalResult, Results[0], Results[1]);
435
}
436
break;
437
}
438
case G_FCONSTANT: {
439
// Convert to integer constants, while preserving the binary representation.
440
auto AsInteger =
441
MI.getOperand(1).getFPImm()->getValueAPF().bitcastToAPInt();
442
MIRBuilder.buildConstant(MI.getOperand(0),
443
*ConstantInt::get(Ctx, AsInteger));
444
break;
445
}
446
case G_SET_FPMODE: {
447
// New FPSCR = (FPSCR & FPStatusBits) | (Modes & ~FPStatusBits)
448
LLT FPEnvTy = LLT::scalar(32);
449
auto FPEnv = MRI.createGenericVirtualRegister(FPEnvTy);
450
Register Modes = MI.getOperand(0).getReg();
451
MIRBuilder.buildGetFPEnv(FPEnv);
452
auto StatusBitMask = MIRBuilder.buildConstant(FPEnvTy, ARM::FPStatusBits);
453
auto StatusBits = MIRBuilder.buildAnd(FPEnvTy, FPEnv, StatusBitMask);
454
auto NotStatusBitMask =
455
MIRBuilder.buildConstant(FPEnvTy, ~ARM::FPStatusBits);
456
auto FPModeBits = MIRBuilder.buildAnd(FPEnvTy, Modes, NotStatusBitMask);
457
auto NewFPSCR = MIRBuilder.buildOr(FPEnvTy, StatusBits, FPModeBits);
458
MIRBuilder.buildSetFPEnv(NewFPSCR);
459
break;
460
}
461
}
462
463
MI.eraseFromParent();
464
return true;
465
}
466
467