Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/llvm/lib/IR/IRBuilder.cpp
35233 views
1
//===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file implements the IRBuilder class, which is used as a convenient way
10
// to create LLVM instructions with a consistent and simplified interface.
11
//
12
//===----------------------------------------------------------------------===//
13
14
#include "llvm/IR/IRBuilder.h"
15
#include "llvm/ADT/ArrayRef.h"
16
#include "llvm/IR/Constant.h"
17
#include "llvm/IR/Constants.h"
18
#include "llvm/IR/DebugInfoMetadata.h"
19
#include "llvm/IR/DerivedTypes.h"
20
#include "llvm/IR/Function.h"
21
#include "llvm/IR/GlobalValue.h"
22
#include "llvm/IR/GlobalVariable.h"
23
#include "llvm/IR/IntrinsicInst.h"
24
#include "llvm/IR/Intrinsics.h"
25
#include "llvm/IR/LLVMContext.h"
26
#include "llvm/IR/Module.h"
27
#include "llvm/IR/NoFolder.h"
28
#include "llvm/IR/Operator.h"
29
#include "llvm/IR/Statepoint.h"
30
#include "llvm/IR/Type.h"
31
#include "llvm/IR/Value.h"
32
#include "llvm/Support/Casting.h"
33
#include <cassert>
34
#include <cstdint>
35
#include <optional>
36
#include <vector>
37
38
using namespace llvm;
39
40
/// CreateGlobalString - Make a new global variable with an initializer that
41
/// has array of i8 type filled in with the nul terminated string value
42
/// specified. If Name is specified, it is the name of the global variable
43
/// created.
44
GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str,
45
const Twine &Name,
46
unsigned AddressSpace,
47
Module *M, bool AddNull) {
48
Constant *StrConstant = ConstantDataArray::getString(Context, Str, AddNull);
49
if (!M)
50
M = BB->getParent()->getParent();
51
auto *GV = new GlobalVariable(
52
*M, StrConstant->getType(), true, GlobalValue::PrivateLinkage,
53
StrConstant, Name, nullptr, GlobalVariable::NotThreadLocal, AddressSpace);
54
GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
55
GV->setAlignment(Align(1));
56
return GV;
57
}
58
59
Type *IRBuilderBase::getCurrentFunctionReturnType() const {
60
assert(BB && BB->getParent() && "No current function!");
61
return BB->getParent()->getReturnType();
62
}
63
64
DebugLoc IRBuilderBase::getCurrentDebugLocation() const {
65
for (auto &KV : MetadataToCopy)
66
if (KV.first == LLVMContext::MD_dbg)
67
return {cast<DILocation>(KV.second)};
68
69
return {};
70
}
71
void IRBuilderBase::SetInstDebugLocation(Instruction *I) const {
72
for (const auto &KV : MetadataToCopy)
73
if (KV.first == LLVMContext::MD_dbg) {
74
I->setDebugLoc(DebugLoc(KV.second));
75
return;
76
}
77
}
78
79
CallInst *
80
IRBuilderBase::createCallHelper(Function *Callee, ArrayRef<Value *> Ops,
81
const Twine &Name, Instruction *FMFSource,
82
ArrayRef<OperandBundleDef> OpBundles) {
83
CallInst *CI = CreateCall(Callee, Ops, OpBundles, Name);
84
if (FMFSource)
85
CI->copyFastMathFlags(FMFSource);
86
return CI;
87
}
88
89
Value *IRBuilderBase::CreateVScale(Constant *Scaling, const Twine &Name) {
90
assert(isa<ConstantInt>(Scaling) && "Expected constant integer");
91
if (cast<ConstantInt>(Scaling)->isZero())
92
return Scaling;
93
Module *M = GetInsertBlock()->getParent()->getParent();
94
Function *TheFn =
95
Intrinsic::getDeclaration(M, Intrinsic::vscale, {Scaling->getType()});
96
CallInst *CI = CreateCall(TheFn, {}, {}, Name);
97
return cast<ConstantInt>(Scaling)->isOne() ? CI : CreateMul(CI, Scaling);
98
}
99
100
Value *IRBuilderBase::CreateElementCount(Type *DstType, ElementCount EC) {
101
Constant *MinEC = ConstantInt::get(DstType, EC.getKnownMinValue());
102
return EC.isScalable() ? CreateVScale(MinEC) : MinEC;
103
}
104
105
Value *IRBuilderBase::CreateTypeSize(Type *DstType, TypeSize Size) {
106
Constant *MinSize = ConstantInt::get(DstType, Size.getKnownMinValue());
107
return Size.isScalable() ? CreateVScale(MinSize) : MinSize;
108
}
109
110
Value *IRBuilderBase::CreateStepVector(Type *DstType, const Twine &Name) {
111
Type *STy = DstType->getScalarType();
112
if (isa<ScalableVectorType>(DstType)) {
113
Type *StepVecType = DstType;
114
// TODO: We expect this special case (element type < 8 bits) to be
115
// temporary - once the intrinsic properly supports < 8 bits this code
116
// can be removed.
117
if (STy->getScalarSizeInBits() < 8)
118
StepVecType =
119
VectorType::get(getInt8Ty(), cast<ScalableVectorType>(DstType));
120
Value *Res = CreateIntrinsic(Intrinsic::experimental_stepvector,
121
{StepVecType}, {}, nullptr, Name);
122
if (StepVecType != DstType)
123
Res = CreateTrunc(Res, DstType);
124
return Res;
125
}
126
127
unsigned NumEls = cast<FixedVectorType>(DstType)->getNumElements();
128
129
// Create a vector of consecutive numbers from zero to VF.
130
SmallVector<Constant *, 8> Indices;
131
for (unsigned i = 0; i < NumEls; ++i)
132
Indices.push_back(ConstantInt::get(STy, i));
133
134
// Add the consecutive indices to the vector value.
135
return ConstantVector::get(Indices);
136
}
137
138
CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size,
139
MaybeAlign Align, bool isVolatile,
140
MDNode *TBAATag, MDNode *ScopeTag,
141
MDNode *NoAliasTag) {
142
Value *Ops[] = {Ptr, Val, Size, getInt1(isVolatile)};
143
Type *Tys[] = { Ptr->getType(), Size->getType() };
144
Module *M = BB->getParent()->getParent();
145
Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
146
147
CallInst *CI = CreateCall(TheFn, Ops);
148
149
if (Align)
150
cast<MemSetInst>(CI)->setDestAlignment(*Align);
151
152
// Set the TBAA info if present.
153
if (TBAATag)
154
CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
155
156
if (ScopeTag)
157
CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
158
159
if (NoAliasTag)
160
CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
161
162
return CI;
163
}
164
165
CallInst *IRBuilderBase::CreateMemSetInline(Value *Dst, MaybeAlign DstAlign,
166
Value *Val, Value *Size,
167
bool IsVolatile, MDNode *TBAATag,
168
MDNode *ScopeTag,
169
MDNode *NoAliasTag) {
170
Value *Ops[] = {Dst, Val, Size, getInt1(IsVolatile)};
171
Type *Tys[] = {Dst->getType(), Size->getType()};
172
Module *M = BB->getParent()->getParent();
173
Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset_inline, Tys);
174
175
CallInst *CI = CreateCall(TheFn, Ops);
176
177
if (DstAlign)
178
cast<MemSetInlineInst>(CI)->setDestAlignment(*DstAlign);
179
180
// Set the TBAA info if present.
181
if (TBAATag)
182
CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
183
184
if (ScopeTag)
185
CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
186
187
if (NoAliasTag)
188
CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
189
190
return CI;
191
}
192
193
CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet(
194
Value *Ptr, Value *Val, Value *Size, Align Alignment, uint32_t ElementSize,
195
MDNode *TBAATag, MDNode *ScopeTag, MDNode *NoAliasTag) {
196
197
Value *Ops[] = {Ptr, Val, Size, getInt32(ElementSize)};
198
Type *Tys[] = {Ptr->getType(), Size->getType()};
199
Module *M = BB->getParent()->getParent();
200
Function *TheFn = Intrinsic::getDeclaration(
201
M, Intrinsic::memset_element_unordered_atomic, Tys);
202
203
CallInst *CI = CreateCall(TheFn, Ops);
204
205
cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment);
206
207
// Set the TBAA info if present.
208
if (TBAATag)
209
CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
210
211
if (ScopeTag)
212
CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
213
214
if (NoAliasTag)
215
CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
216
217
return CI;
218
}
219
220
CallInst *IRBuilderBase::CreateMemTransferInst(
221
Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src,
222
MaybeAlign SrcAlign, Value *Size, bool isVolatile, MDNode *TBAATag,
223
MDNode *TBAAStructTag, MDNode *ScopeTag, MDNode *NoAliasTag) {
224
assert((IntrID == Intrinsic::memcpy || IntrID == Intrinsic::memcpy_inline ||
225
IntrID == Intrinsic::memmove) &&
226
"Unexpected intrinsic ID");
227
Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
228
Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
229
Module *M = BB->getParent()->getParent();
230
Function *TheFn = Intrinsic::getDeclaration(M, IntrID, Tys);
231
232
CallInst *CI = CreateCall(TheFn, Ops);
233
234
auto* MCI = cast<MemTransferInst>(CI);
235
if (DstAlign)
236
MCI->setDestAlignment(*DstAlign);
237
if (SrcAlign)
238
MCI->setSourceAlignment(*SrcAlign);
239
240
// Set the TBAA info if present.
241
if (TBAATag)
242
CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
243
244
// Set the TBAA Struct info if present.
245
if (TBAAStructTag)
246
CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
247
248
if (ScopeTag)
249
CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
250
251
if (NoAliasTag)
252
CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
253
254
return CI;
255
}
256
257
CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
258
Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
259
uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
260
MDNode *ScopeTag, MDNode *NoAliasTag) {
261
assert(DstAlign >= ElementSize &&
262
"Pointer alignment must be at least element size");
263
assert(SrcAlign >= ElementSize &&
264
"Pointer alignment must be at least element size");
265
Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
266
Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
267
Module *M = BB->getParent()->getParent();
268
Function *TheFn = Intrinsic::getDeclaration(
269
M, Intrinsic::memcpy_element_unordered_atomic, Tys);
270
271
CallInst *CI = CreateCall(TheFn, Ops);
272
273
// Set the alignment of the pointer args.
274
auto *AMCI = cast<AtomicMemCpyInst>(CI);
275
AMCI->setDestAlignment(DstAlign);
276
AMCI->setSourceAlignment(SrcAlign);
277
278
// Set the TBAA info if present.
279
if (TBAATag)
280
CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
281
282
// Set the TBAA Struct info if present.
283
if (TBAAStructTag)
284
CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
285
286
if (ScopeTag)
287
CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
288
289
if (NoAliasTag)
290
CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
291
292
return CI;
293
}
294
295
/// isConstantOne - Return true only if val is constant int 1
296
static bool isConstantOne(const Value *Val) {
297
assert(Val && "isConstantOne does not work with nullptr Val");
298
const ConstantInt *CVal = dyn_cast<ConstantInt>(Val);
299
return CVal && CVal->isOne();
300
}
301
302
CallInst *IRBuilderBase::CreateMalloc(Type *IntPtrTy, Type *AllocTy,
303
Value *AllocSize, Value *ArraySize,
304
ArrayRef<OperandBundleDef> OpB,
305
Function *MallocF, const Twine &Name) {
306
// malloc(type) becomes:
307
// i8* malloc(typeSize)
308
// malloc(type, arraySize) becomes:
309
// i8* malloc(typeSize*arraySize)
310
if (!ArraySize)
311
ArraySize = ConstantInt::get(IntPtrTy, 1);
312
else if (ArraySize->getType() != IntPtrTy)
313
ArraySize = CreateIntCast(ArraySize, IntPtrTy, false);
314
315
if (!isConstantOne(ArraySize)) {
316
if (isConstantOne(AllocSize)) {
317
AllocSize = ArraySize; // Operand * 1 = Operand
318
} else {
319
// Multiply type size by the array size...
320
AllocSize = CreateMul(ArraySize, AllocSize, "mallocsize");
321
}
322
}
323
324
assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size");
325
// Create the call to Malloc.
326
Module *M = BB->getParent()->getParent();
327
Type *BPTy = PointerType::getUnqual(Context);
328
FunctionCallee MallocFunc = MallocF;
329
if (!MallocFunc)
330
// prototype malloc as "void *malloc(size_t)"
331
MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy);
332
CallInst *MCall = CreateCall(MallocFunc, AllocSize, OpB, Name);
333
334
MCall->setTailCall();
335
if (Function *F = dyn_cast<Function>(MallocFunc.getCallee())) {
336
MCall->setCallingConv(F->getCallingConv());
337
F->setReturnDoesNotAlias();
338
}
339
340
assert(!MCall->getType()->isVoidTy() && "Malloc has void return type");
341
342
return MCall;
343
}
344
345
CallInst *IRBuilderBase::CreateMalloc(Type *IntPtrTy, Type *AllocTy,
346
Value *AllocSize, Value *ArraySize,
347
Function *MallocF, const Twine &Name) {
348
349
return CreateMalloc(IntPtrTy, AllocTy, AllocSize, ArraySize, std::nullopt,
350
MallocF, Name);
351
}
352
353
/// CreateFree - Generate the IR for a call to the builtin free function.
354
CallInst *IRBuilderBase::CreateFree(Value *Source,
355
ArrayRef<OperandBundleDef> Bundles) {
356
assert(Source->getType()->isPointerTy() &&
357
"Can not free something of nonpointer type!");
358
359
Module *M = BB->getParent()->getParent();
360
361
Type *VoidTy = Type::getVoidTy(M->getContext());
362
Type *VoidPtrTy = PointerType::getUnqual(M->getContext());
363
// prototype free as "void free(void*)"
364
FunctionCallee FreeFunc = M->getOrInsertFunction("free", VoidTy, VoidPtrTy);
365
CallInst *Result = CreateCall(FreeFunc, Source, Bundles, "");
366
Result->setTailCall();
367
if (Function *F = dyn_cast<Function>(FreeFunc.getCallee()))
368
Result->setCallingConv(F->getCallingConv());
369
370
return Result;
371
}
372
373
CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove(
374
Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
375
uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
376
MDNode *ScopeTag, MDNode *NoAliasTag) {
377
assert(DstAlign >= ElementSize &&
378
"Pointer alignment must be at least element size");
379
assert(SrcAlign >= ElementSize &&
380
"Pointer alignment must be at least element size");
381
Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
382
Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
383
Module *M = BB->getParent()->getParent();
384
Function *TheFn = Intrinsic::getDeclaration(
385
M, Intrinsic::memmove_element_unordered_atomic, Tys);
386
387
CallInst *CI = CreateCall(TheFn, Ops);
388
389
// Set the alignment of the pointer args.
390
CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), DstAlign));
391
CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), SrcAlign));
392
393
// Set the TBAA info if present.
394
if (TBAATag)
395
CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
396
397
// Set the TBAA Struct info if present.
398
if (TBAAStructTag)
399
CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
400
401
if (ScopeTag)
402
CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
403
404
if (NoAliasTag)
405
CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
406
407
return CI;
408
}
409
410
CallInst *IRBuilderBase::getReductionIntrinsic(Intrinsic::ID ID, Value *Src) {
411
Module *M = GetInsertBlock()->getParent()->getParent();
412
Value *Ops[] = {Src};
413
Type *Tys[] = { Src->getType() };
414
auto Decl = Intrinsic::getDeclaration(M, ID, Tys);
415
return CreateCall(Decl, Ops);
416
}
417
418
CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) {
419
Module *M = GetInsertBlock()->getParent()->getParent();
420
Value *Ops[] = {Acc, Src};
421
auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fadd,
422
{Src->getType()});
423
return CreateCall(Decl, Ops);
424
}
425
426
CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) {
427
Module *M = GetInsertBlock()->getParent()->getParent();
428
Value *Ops[] = {Acc, Src};
429
auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fmul,
430
{Src->getType()});
431
return CreateCall(Decl, Ops);
432
}
433
434
CallInst *IRBuilderBase::CreateAddReduce(Value *Src) {
435
return getReductionIntrinsic(Intrinsic::vector_reduce_add, Src);
436
}
437
438
CallInst *IRBuilderBase::CreateMulReduce(Value *Src) {
439
return getReductionIntrinsic(Intrinsic::vector_reduce_mul, Src);
440
}
441
442
CallInst *IRBuilderBase::CreateAndReduce(Value *Src) {
443
return getReductionIntrinsic(Intrinsic::vector_reduce_and, Src);
444
}
445
446
CallInst *IRBuilderBase::CreateOrReduce(Value *Src) {
447
return getReductionIntrinsic(Intrinsic::vector_reduce_or, Src);
448
}
449
450
CallInst *IRBuilderBase::CreateXorReduce(Value *Src) {
451
return getReductionIntrinsic(Intrinsic::vector_reduce_xor, Src);
452
}
453
454
CallInst *IRBuilderBase::CreateIntMaxReduce(Value *Src, bool IsSigned) {
455
auto ID =
456
IsSigned ? Intrinsic::vector_reduce_smax : Intrinsic::vector_reduce_umax;
457
return getReductionIntrinsic(ID, Src);
458
}
459
460
CallInst *IRBuilderBase::CreateIntMinReduce(Value *Src, bool IsSigned) {
461
auto ID =
462
IsSigned ? Intrinsic::vector_reduce_smin : Intrinsic::vector_reduce_umin;
463
return getReductionIntrinsic(ID, Src);
464
}
465
466
CallInst *IRBuilderBase::CreateFPMaxReduce(Value *Src) {
467
return getReductionIntrinsic(Intrinsic::vector_reduce_fmax, Src);
468
}
469
470
CallInst *IRBuilderBase::CreateFPMinReduce(Value *Src) {
471
return getReductionIntrinsic(Intrinsic::vector_reduce_fmin, Src);
472
}
473
474
CallInst *IRBuilderBase::CreateFPMaximumReduce(Value *Src) {
475
return getReductionIntrinsic(Intrinsic::vector_reduce_fmaximum, Src);
476
}
477
478
CallInst *IRBuilderBase::CreateFPMinimumReduce(Value *Src) {
479
return getReductionIntrinsic(Intrinsic::vector_reduce_fminimum, Src);
480
}
481
482
CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) {
483
assert(isa<PointerType>(Ptr->getType()) &&
484
"lifetime.start only applies to pointers.");
485
if (!Size)
486
Size = getInt64(-1);
487
else
488
assert(Size->getType() == getInt64Ty() &&
489
"lifetime.start requires the size to be an i64");
490
Value *Ops[] = { Size, Ptr };
491
Module *M = BB->getParent()->getParent();
492
Function *TheFn =
493
Intrinsic::getDeclaration(M, Intrinsic::lifetime_start, {Ptr->getType()});
494
return CreateCall(TheFn, Ops);
495
}
496
497
CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) {
498
assert(isa<PointerType>(Ptr->getType()) &&
499
"lifetime.end only applies to pointers.");
500
if (!Size)
501
Size = getInt64(-1);
502
else
503
assert(Size->getType() == getInt64Ty() &&
504
"lifetime.end requires the size to be an i64");
505
Value *Ops[] = { Size, Ptr };
506
Module *M = BB->getParent()->getParent();
507
Function *TheFn =
508
Intrinsic::getDeclaration(M, Intrinsic::lifetime_end, {Ptr->getType()});
509
return CreateCall(TheFn, Ops);
510
}
511
512
CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) {
513
514
assert(isa<PointerType>(Ptr->getType()) &&
515
"invariant.start only applies to pointers.");
516
if (!Size)
517
Size = getInt64(-1);
518
else
519
assert(Size->getType() == getInt64Ty() &&
520
"invariant.start requires the size to be an i64");
521
522
Value *Ops[] = {Size, Ptr};
523
// Fill in the single overloaded type: memory object type.
524
Type *ObjectPtr[1] = {Ptr->getType()};
525
Module *M = BB->getParent()->getParent();
526
Function *TheFn =
527
Intrinsic::getDeclaration(M, Intrinsic::invariant_start, ObjectPtr);
528
return CreateCall(TheFn, Ops);
529
}
530
531
static MaybeAlign getAlign(Value *Ptr) {
532
if (auto *O = dyn_cast<GlobalObject>(Ptr))
533
return O->getAlign();
534
if (auto *A = dyn_cast<GlobalAlias>(Ptr))
535
return A->getAliaseeObject()->getAlign();
536
return {};
537
}
538
539
CallInst *IRBuilderBase::CreateThreadLocalAddress(Value *Ptr) {
540
assert(isa<GlobalValue>(Ptr) && cast<GlobalValue>(Ptr)->isThreadLocal() &&
541
"threadlocal_address only applies to thread local variables.");
542
CallInst *CI = CreateIntrinsic(llvm::Intrinsic::threadlocal_address,
543
{Ptr->getType()}, {Ptr});
544
if (MaybeAlign A = getAlign(Ptr)) {
545
CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), *A));
546
CI->addRetAttr(Attribute::getWithAlignment(CI->getContext(), *A));
547
}
548
return CI;
549
}
550
551
CallInst *
552
IRBuilderBase::CreateAssumption(Value *Cond,
553
ArrayRef<OperandBundleDef> OpBundles) {
554
assert(Cond->getType() == getInt1Ty() &&
555
"an assumption condition must be of type i1");
556
557
Value *Ops[] = { Cond };
558
Module *M = BB->getParent()->getParent();
559
Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume);
560
return CreateCall(FnAssume, Ops, OpBundles);
561
}
562
563
Instruction *IRBuilderBase::CreateNoAliasScopeDeclaration(Value *Scope) {
564
Module *M = BB->getModule();
565
auto *FnIntrinsic = Intrinsic::getDeclaration(
566
M, Intrinsic::experimental_noalias_scope_decl, {});
567
return CreateCall(FnIntrinsic, {Scope});
568
}
569
570
/// Create a call to a Masked Load intrinsic.
571
/// \p Ty - vector type to load
572
/// \p Ptr - base pointer for the load
573
/// \p Alignment - alignment of the source location
574
/// \p Mask - vector of booleans which indicates what vector lanes should
575
/// be accessed in memory
576
/// \p PassThru - pass-through value that is used to fill the masked-off lanes
577
/// of the result
578
/// \p Name - name of the result variable
579
CallInst *IRBuilderBase::CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment,
580
Value *Mask, Value *PassThru,
581
const Twine &Name) {
582
auto *PtrTy = cast<PointerType>(Ptr->getType());
583
assert(Ty->isVectorTy() && "Type should be vector");
584
assert(Mask && "Mask should not be all-ones (null)");
585
if (!PassThru)
586
PassThru = PoisonValue::get(Ty);
587
Type *OverloadedTypes[] = { Ty, PtrTy };
588
Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru};
589
return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
590
OverloadedTypes, Name);
591
}
592
593
/// Create a call to a Masked Store intrinsic.
594
/// \p Val - data to be stored,
595
/// \p Ptr - base pointer for the store
596
/// \p Alignment - alignment of the destination location
597
/// \p Mask - vector of booleans which indicates what vector lanes should
598
/// be accessed in memory
599
CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr,
600
Align Alignment, Value *Mask) {
601
auto *PtrTy = cast<PointerType>(Ptr->getType());
602
Type *DataTy = Val->getType();
603
assert(DataTy->isVectorTy() && "Val should be a vector");
604
assert(Mask && "Mask should not be all-ones (null)");
605
Type *OverloadedTypes[] = { DataTy, PtrTy };
606
Value *Ops[] = {Val, Ptr, getInt32(Alignment.value()), Mask};
607
return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes);
608
}
609
610
/// Create a call to a Masked intrinsic, with given intrinsic Id,
611
/// an array of operands - Ops, and an array of overloaded types -
612
/// OverloadedTypes.
613
CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id,
614
ArrayRef<Value *> Ops,
615
ArrayRef<Type *> OverloadedTypes,
616
const Twine &Name) {
617
Module *M = BB->getParent()->getParent();
618
Function *TheFn = Intrinsic::getDeclaration(M, Id, OverloadedTypes);
619
return CreateCall(TheFn, Ops, {}, Name);
620
}
621
622
/// Create a call to a Masked Gather intrinsic.
623
/// \p Ty - vector type to gather
624
/// \p Ptrs - vector of pointers for loading
625
/// \p Align - alignment for one element
626
/// \p Mask - vector of booleans which indicates what vector lanes should
627
/// be accessed in memory
628
/// \p PassThru - pass-through value that is used to fill the masked-off lanes
629
/// of the result
630
/// \p Name - name of the result variable
631
CallInst *IRBuilderBase::CreateMaskedGather(Type *Ty, Value *Ptrs,
632
Align Alignment, Value *Mask,
633
Value *PassThru,
634
const Twine &Name) {
635
auto *VecTy = cast<VectorType>(Ty);
636
ElementCount NumElts = VecTy->getElementCount();
637
auto *PtrsTy = cast<VectorType>(Ptrs->getType());
638
assert(NumElts == PtrsTy->getElementCount() && "Element count mismatch");
639
640
if (!Mask)
641
Mask = getAllOnesMask(NumElts);
642
643
if (!PassThru)
644
PassThru = PoisonValue::get(Ty);
645
646
Type *OverloadedTypes[] = {Ty, PtrsTy};
647
Value *Ops[] = {Ptrs, getInt32(Alignment.value()), Mask, PassThru};
648
649
// We specify only one type when we create this intrinsic. Types of other
650
// arguments are derived from this type.
651
return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, OverloadedTypes,
652
Name);
653
}
654
655
/// Create a call to a Masked Scatter intrinsic.
656
/// \p Data - data to be stored,
657
/// \p Ptrs - the vector of pointers, where the \p Data elements should be
658
/// stored
659
/// \p Align - alignment for one element
660
/// \p Mask - vector of booleans which indicates what vector lanes should
661
/// be accessed in memory
662
CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs,
663
Align Alignment, Value *Mask) {
664
auto *PtrsTy = cast<VectorType>(Ptrs->getType());
665
auto *DataTy = cast<VectorType>(Data->getType());
666
ElementCount NumElts = PtrsTy->getElementCount();
667
668
if (!Mask)
669
Mask = getAllOnesMask(NumElts);
670
671
Type *OverloadedTypes[] = {DataTy, PtrsTy};
672
Value *Ops[] = {Data, Ptrs, getInt32(Alignment.value()), Mask};
673
674
// We specify only one type when we create this intrinsic. Types of other
675
// arguments are derived from this type.
676
return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes);
677
}
678
679
/// Create a call to Masked Expand Load intrinsic
680
/// \p Ty - vector type to load
681
/// \p Ptr - base pointer for the load
682
/// \p Mask - vector of booleans which indicates what vector lanes should
683
/// be accessed in memory
684
/// \p PassThru - pass-through value that is used to fill the masked-off lanes
685
/// of the result
686
/// \p Name - name of the result variable
687
CallInst *IRBuilderBase::CreateMaskedExpandLoad(Type *Ty, Value *Ptr,
688
Value *Mask, Value *PassThru,
689
const Twine &Name) {
690
assert(Ty->isVectorTy() && "Type should be vector");
691
assert(Mask && "Mask should not be all-ones (null)");
692
if (!PassThru)
693
PassThru = PoisonValue::get(Ty);
694
Type *OverloadedTypes[] = {Ty};
695
Value *Ops[] = {Ptr, Mask, PassThru};
696
return CreateMaskedIntrinsic(Intrinsic::masked_expandload, Ops,
697
OverloadedTypes, Name);
698
}
699
700
/// Create a call to Masked Compress Store intrinsic
701
/// \p Val - data to be stored,
702
/// \p Ptr - base pointer for the store
703
/// \p Mask - vector of booleans which indicates what vector lanes should
704
/// be accessed in memory
705
CallInst *IRBuilderBase::CreateMaskedCompressStore(Value *Val, Value *Ptr,
706
Value *Mask) {
707
Type *DataTy = Val->getType();
708
assert(DataTy->isVectorTy() && "Val should be a vector");
709
assert(Mask && "Mask should not be all-ones (null)");
710
Type *OverloadedTypes[] = {DataTy};
711
Value *Ops[] = {Val, Ptr, Mask};
712
return CreateMaskedIntrinsic(Intrinsic::masked_compressstore, Ops,
713
OverloadedTypes);
714
}
715
716
template <typename T0>
717
static std::vector<Value *>
718
getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes,
719
Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs) {
720
std::vector<Value *> Args;
721
Args.push_back(B.getInt64(ID));
722
Args.push_back(B.getInt32(NumPatchBytes));
723
Args.push_back(ActualCallee);
724
Args.push_back(B.getInt32(CallArgs.size()));
725
Args.push_back(B.getInt32(Flags));
726
llvm::append_range(Args, CallArgs);
727
// GC Transition and Deopt args are now always handled via operand bundle.
728
// They will be removed from the signature of gc.statepoint shortly.
729
Args.push_back(B.getInt32(0));
730
Args.push_back(B.getInt32(0));
731
// GC args are now encoded in the gc-live operand bundle
732
return Args;
733
}
734
735
template<typename T1, typename T2, typename T3>
736
static std::vector<OperandBundleDef>
737
getStatepointBundles(std::optional<ArrayRef<T1>> TransitionArgs,
738
std::optional<ArrayRef<T2>> DeoptArgs,
739
ArrayRef<T3> GCArgs) {
740
std::vector<OperandBundleDef> Rval;
741
if (DeoptArgs) {
742
SmallVector<Value*, 16> DeoptValues;
743
llvm::append_range(DeoptValues, *DeoptArgs);
744
Rval.emplace_back("deopt", DeoptValues);
745
}
746
if (TransitionArgs) {
747
SmallVector<Value*, 16> TransitionValues;
748
llvm::append_range(TransitionValues, *TransitionArgs);
749
Rval.emplace_back("gc-transition", TransitionValues);
750
}
751
if (GCArgs.size()) {
752
SmallVector<Value*, 16> LiveValues;
753
llvm::append_range(LiveValues, GCArgs);
754
Rval.emplace_back("gc-live", LiveValues);
755
}
756
return Rval;
757
}
758
759
template <typename T0, typename T1, typename T2, typename T3>
760
static CallInst *CreateGCStatepointCallCommon(
761
IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
762
FunctionCallee ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs,
763
std::optional<ArrayRef<T1>> TransitionArgs,
764
std::optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs,
765
const Twine &Name) {
766
Module *M = Builder->GetInsertBlock()->getParent()->getParent();
767
// Fill in the one generic type'd argument (the function is also vararg)
768
Function *FnStatepoint =
769
Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint,
770
{ActualCallee.getCallee()->getType()});
771
772
std::vector<Value *> Args = getStatepointArgs(
773
*Builder, ID, NumPatchBytes, ActualCallee.getCallee(), Flags, CallArgs);
774
775
CallInst *CI = Builder->CreateCall(
776
FnStatepoint, Args,
777
getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name);
778
CI->addParamAttr(2,
779
Attribute::get(Builder->getContext(), Attribute::ElementType,
780
ActualCallee.getFunctionType()));
781
return CI;
782
}
783
784
CallInst *IRBuilderBase::CreateGCStatepointCall(
785
uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee,
786
ArrayRef<Value *> CallArgs, std::optional<ArrayRef<Value *>> DeoptArgs,
787
ArrayRef<Value *> GCArgs, const Twine &Name) {
788
return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>(
789
this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
790
CallArgs, std::nullopt /* No Transition Args */, DeoptArgs, GCArgs, Name);
791
}
792
793
CallInst *IRBuilderBase::CreateGCStatepointCall(
794
uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee,
795
uint32_t Flags, ArrayRef<Value *> CallArgs,
796
std::optional<ArrayRef<Use>> TransitionArgs,
797
std::optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
798
const Twine &Name) {
799
return CreateGCStatepointCallCommon<Value *, Use, Use, Value *>(
800
this, ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs,
801
DeoptArgs, GCArgs, Name);
802
}
803
804
CallInst *IRBuilderBase::CreateGCStatepointCall(
805
uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee,
806
ArrayRef<Use> CallArgs, std::optional<ArrayRef<Value *>> DeoptArgs,
807
ArrayRef<Value *> GCArgs, const Twine &Name) {
808
return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>(
809
this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
810
CallArgs, std::nullopt, DeoptArgs, GCArgs, Name);
811
}
812
813
template <typename T0, typename T1, typename T2, typename T3>
814
static InvokeInst *CreateGCStatepointInvokeCommon(
815
IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
816
FunctionCallee ActualInvokee, BasicBlock *NormalDest,
817
BasicBlock *UnwindDest, uint32_t Flags, ArrayRef<T0> InvokeArgs,
818
std::optional<ArrayRef<T1>> TransitionArgs,
819
std::optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs,
820
const Twine &Name) {
821
Module *M = Builder->GetInsertBlock()->getParent()->getParent();
822
// Fill in the one generic type'd argument (the function is also vararg)
823
Function *FnStatepoint =
824
Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint,
825
{ActualInvokee.getCallee()->getType()});
826
827
std::vector<Value *> Args =
828
getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee.getCallee(),
829
Flags, InvokeArgs);
830
831
InvokeInst *II = Builder->CreateInvoke(
832
FnStatepoint, NormalDest, UnwindDest, Args,
833
getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name);
834
II->addParamAttr(2,
835
Attribute::get(Builder->getContext(), Attribute::ElementType,
836
ActualInvokee.getFunctionType()));
837
return II;
838
}
839
840
InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
841
uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee,
842
BasicBlock *NormalDest, BasicBlock *UnwindDest,
843
ArrayRef<Value *> InvokeArgs, std::optional<ArrayRef<Value *>> DeoptArgs,
844
ArrayRef<Value *> GCArgs, const Twine &Name) {
845
return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>(
846
this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
847
uint32_t(StatepointFlags::None), InvokeArgs,
848
std::nullopt /* No Transition Args*/, DeoptArgs, GCArgs, Name);
849
}
850
851
InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
852
uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee,
853
BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
854
ArrayRef<Value *> InvokeArgs, std::optional<ArrayRef<Use>> TransitionArgs,
855
std::optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
856
const Twine &Name) {
857
return CreateGCStatepointInvokeCommon<Value *, Use, Use, Value *>(
858
this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags,
859
InvokeArgs, TransitionArgs, DeoptArgs, GCArgs, Name);
860
}
861
862
InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
863
uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee,
864
BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
865
std::optional<ArrayRef<Value *>> DeoptArgs, ArrayRef<Value *> GCArgs,
866
const Twine &Name) {
867
return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>(
868
this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
869
uint32_t(StatepointFlags::None), InvokeArgs, std::nullopt, DeoptArgs,
870
GCArgs, Name);
871
}
872
873
CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint,
874
Type *ResultType, const Twine &Name) {
875
Intrinsic::ID ID = Intrinsic::experimental_gc_result;
876
Module *M = BB->getParent()->getParent();
877
Type *Types[] = {ResultType};
878
Function *FnGCResult = Intrinsic::getDeclaration(M, ID, Types);
879
880
Value *Args[] = {Statepoint};
881
return CreateCall(FnGCResult, Args, {}, Name);
882
}
883
884
CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint,
885
int BaseOffset, int DerivedOffset,
886
Type *ResultType, const Twine &Name) {
887
Module *M = BB->getParent()->getParent();
888
Type *Types[] = {ResultType};
889
Function *FnGCRelocate =
890
Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, Types);
891
892
Value *Args[] = {Statepoint, getInt32(BaseOffset), getInt32(DerivedOffset)};
893
return CreateCall(FnGCRelocate, Args, {}, Name);
894
}
895
896
CallInst *IRBuilderBase::CreateGCGetPointerBase(Value *DerivedPtr,
897
const Twine &Name) {
898
Module *M = BB->getParent()->getParent();
899
Type *PtrTy = DerivedPtr->getType();
900
Function *FnGCFindBase = Intrinsic::getDeclaration(
901
M, Intrinsic::experimental_gc_get_pointer_base, {PtrTy, PtrTy});
902
return CreateCall(FnGCFindBase, {DerivedPtr}, {}, Name);
903
}
904
905
CallInst *IRBuilderBase::CreateGCGetPointerOffset(Value *DerivedPtr,
906
const Twine &Name) {
907
Module *M = BB->getParent()->getParent();
908
Type *PtrTy = DerivedPtr->getType();
909
Function *FnGCGetOffset = Intrinsic::getDeclaration(
910
M, Intrinsic::experimental_gc_get_pointer_offset, {PtrTy});
911
return CreateCall(FnGCGetOffset, {DerivedPtr}, {}, Name);
912
}
913
914
CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
915
Instruction *FMFSource,
916
const Twine &Name) {
917
Module *M = BB->getModule();
918
Function *Fn = Intrinsic::getDeclaration(M, ID, {V->getType()});
919
return createCallHelper(Fn, {V}, Name, FMFSource);
920
}
921
922
Value *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS,
923
Value *RHS, Instruction *FMFSource,
924
const Twine &Name) {
925
Module *M = BB->getModule();
926
Function *Fn = Intrinsic::getDeclaration(M, ID, { LHS->getType() });
927
if (Value *V = Folder.FoldBinaryIntrinsic(ID, LHS, RHS, Fn->getReturnType(),
928
FMFSource))
929
return V;
930
return createCallHelper(Fn, {LHS, RHS}, Name, FMFSource);
931
}
932
933
CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID,
934
ArrayRef<Type *> Types,
935
ArrayRef<Value *> Args,
936
Instruction *FMFSource,
937
const Twine &Name) {
938
Module *M = BB->getModule();
939
Function *Fn = Intrinsic::getDeclaration(M, ID, Types);
940
return createCallHelper(Fn, Args, Name, FMFSource);
941
}
942
943
CallInst *IRBuilderBase::CreateIntrinsic(Type *RetTy, Intrinsic::ID ID,
944
ArrayRef<Value *> Args,
945
Instruction *FMFSource,
946
const Twine &Name) {
947
Module *M = BB->getModule();
948
949
SmallVector<Intrinsic::IITDescriptor> Table;
950
Intrinsic::getIntrinsicInfoTableEntries(ID, Table);
951
ArrayRef<Intrinsic::IITDescriptor> TableRef(Table);
952
953
SmallVector<Type *> ArgTys;
954
ArgTys.reserve(Args.size());
955
for (auto &I : Args)
956
ArgTys.push_back(I->getType());
957
FunctionType *FTy = FunctionType::get(RetTy, ArgTys, false);
958
SmallVector<Type *> OverloadTys;
959
Intrinsic::MatchIntrinsicTypesResult Res =
960
matchIntrinsicSignature(FTy, TableRef, OverloadTys);
961
(void)Res;
962
assert(Res == Intrinsic::MatchIntrinsicTypes_Match && TableRef.empty() &&
963
"Wrong types for intrinsic!");
964
// TODO: Handle varargs intrinsics.
965
966
Function *Fn = Intrinsic::getDeclaration(M, ID, OverloadTys);
967
return createCallHelper(Fn, Args, Name, FMFSource);
968
}
969
970
CallInst *IRBuilderBase::CreateConstrainedFPBinOp(
971
Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource,
972
const Twine &Name, MDNode *FPMathTag,
973
std::optional<RoundingMode> Rounding,
974
std::optional<fp::ExceptionBehavior> Except) {
975
Value *RoundingV = getConstrainedFPRounding(Rounding);
976
Value *ExceptV = getConstrainedFPExcept(Except);
977
978
FastMathFlags UseFMF = FMF;
979
if (FMFSource)
980
UseFMF = FMFSource->getFastMathFlags();
981
982
CallInst *C = CreateIntrinsic(ID, {L->getType()},
983
{L, R, RoundingV, ExceptV}, nullptr, Name);
984
setConstrainedFPCallAttr(C);
985
setFPAttrs(C, FPMathTag, UseFMF);
986
return C;
987
}
988
989
CallInst *IRBuilderBase::CreateConstrainedFPUnroundedBinOp(
990
Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource,
991
const Twine &Name, MDNode *FPMathTag,
992
std::optional<fp::ExceptionBehavior> Except) {
993
Value *ExceptV = getConstrainedFPExcept(Except);
994
995
FastMathFlags UseFMF = FMF;
996
if (FMFSource)
997
UseFMF = FMFSource->getFastMathFlags();
998
999
CallInst *C =
1000
CreateIntrinsic(ID, {L->getType()}, {L, R, ExceptV}, nullptr, Name);
1001
setConstrainedFPCallAttr(C);
1002
setFPAttrs(C, FPMathTag, UseFMF);
1003
return C;
1004
}
1005
1006
Value *IRBuilderBase::CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
1007
const Twine &Name, MDNode *FPMathTag) {
1008
if (Instruction::isBinaryOp(Opc)) {
1009
assert(Ops.size() == 2 && "Invalid number of operands!");
1010
return CreateBinOp(static_cast<Instruction::BinaryOps>(Opc),
1011
Ops[0], Ops[1], Name, FPMathTag);
1012
}
1013
if (Instruction::isUnaryOp(Opc)) {
1014
assert(Ops.size() == 1 && "Invalid number of operands!");
1015
return CreateUnOp(static_cast<Instruction::UnaryOps>(Opc),
1016
Ops[0], Name, FPMathTag);
1017
}
1018
llvm_unreachable("Unexpected opcode!");
1019
}
1020
1021
CallInst *IRBuilderBase::CreateConstrainedFPCast(
1022
Intrinsic::ID ID, Value *V, Type *DestTy,
1023
Instruction *FMFSource, const Twine &Name, MDNode *FPMathTag,
1024
std::optional<RoundingMode> Rounding,
1025
std::optional<fp::ExceptionBehavior> Except) {
1026
Value *ExceptV = getConstrainedFPExcept(Except);
1027
1028
FastMathFlags UseFMF = FMF;
1029
if (FMFSource)
1030
UseFMF = FMFSource->getFastMathFlags();
1031
1032
CallInst *C;
1033
if (Intrinsic::hasConstrainedFPRoundingModeOperand(ID)) {
1034
Value *RoundingV = getConstrainedFPRounding(Rounding);
1035
C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV},
1036
nullptr, Name);
1037
} else
1038
C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr,
1039
Name);
1040
1041
setConstrainedFPCallAttr(C);
1042
1043
if (isa<FPMathOperator>(C))
1044
setFPAttrs(C, FPMathTag, UseFMF);
1045
return C;
1046
}
1047
1048
Value *IRBuilderBase::CreateFCmpHelper(
1049
CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name,
1050
MDNode *FPMathTag, bool IsSignaling) {
1051
if (IsFPConstrained) {
1052
auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps
1053
: Intrinsic::experimental_constrained_fcmp;
1054
return CreateConstrainedFPCmp(ID, P, LHS, RHS, Name);
1055
}
1056
1057
if (auto *V = Folder.FoldCmp(P, LHS, RHS))
1058
return V;
1059
return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name);
1060
}
1061
1062
CallInst *IRBuilderBase::CreateConstrainedFPCmp(
1063
Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
1064
const Twine &Name, std::optional<fp::ExceptionBehavior> Except) {
1065
Value *PredicateV = getConstrainedFPPredicate(P);
1066
Value *ExceptV = getConstrainedFPExcept(Except);
1067
1068
CallInst *C = CreateIntrinsic(ID, {L->getType()},
1069
{L, R, PredicateV, ExceptV}, nullptr, Name);
1070
setConstrainedFPCallAttr(C);
1071
return C;
1072
}
1073
1074
CallInst *IRBuilderBase::CreateConstrainedFPCall(
1075
Function *Callee, ArrayRef<Value *> Args, const Twine &Name,
1076
std::optional<RoundingMode> Rounding,
1077
std::optional<fp::ExceptionBehavior> Except) {
1078
llvm::SmallVector<Value *, 6> UseArgs;
1079
1080
append_range(UseArgs, Args);
1081
1082
if (Intrinsic::hasConstrainedFPRoundingModeOperand(Callee->getIntrinsicID()))
1083
UseArgs.push_back(getConstrainedFPRounding(Rounding));
1084
UseArgs.push_back(getConstrainedFPExcept(Except));
1085
1086
CallInst *C = CreateCall(Callee, UseArgs, Name);
1087
setConstrainedFPCallAttr(C);
1088
return C;
1089
}
1090
1091
Value *IRBuilderBase::CreateSelect(Value *C, Value *True, Value *False,
1092
const Twine &Name, Instruction *MDFrom) {
1093
if (auto *V = Folder.FoldSelect(C, True, False))
1094
return V;
1095
1096
SelectInst *Sel = SelectInst::Create(C, True, False);
1097
if (MDFrom) {
1098
MDNode *Prof = MDFrom->getMetadata(LLVMContext::MD_prof);
1099
MDNode *Unpred = MDFrom->getMetadata(LLVMContext::MD_unpredictable);
1100
Sel = addBranchMetadata(Sel, Prof, Unpred);
1101
}
1102
if (isa<FPMathOperator>(Sel))
1103
setFPAttrs(Sel, nullptr /* MDNode* */, FMF);
1104
return Insert(Sel, Name);
1105
}
1106
1107
Value *IRBuilderBase::CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS,
1108
const Twine &Name) {
1109
assert(LHS->getType() == RHS->getType() &&
1110
"Pointer subtraction operand types must match!");
1111
Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context));
1112
Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context));
1113
Value *Difference = CreateSub(LHS_int, RHS_int);
1114
return CreateExactSDiv(Difference, ConstantExpr::getSizeOf(ElemTy),
1115
Name);
1116
}
1117
1118
Value *IRBuilderBase::CreateLaunderInvariantGroup(Value *Ptr) {
1119
assert(isa<PointerType>(Ptr->getType()) &&
1120
"launder.invariant.group only applies to pointers.");
1121
auto *PtrType = Ptr->getType();
1122
Module *M = BB->getParent()->getParent();
1123
Function *FnLaunderInvariantGroup = Intrinsic::getDeclaration(
1124
M, Intrinsic::launder_invariant_group, {PtrType});
1125
1126
assert(FnLaunderInvariantGroup->getReturnType() == PtrType &&
1127
FnLaunderInvariantGroup->getFunctionType()->getParamType(0) ==
1128
PtrType &&
1129
"LaunderInvariantGroup should take and return the same type");
1130
1131
return CreateCall(FnLaunderInvariantGroup, {Ptr});
1132
}
1133
1134
Value *IRBuilderBase::CreateStripInvariantGroup(Value *Ptr) {
1135
assert(isa<PointerType>(Ptr->getType()) &&
1136
"strip.invariant.group only applies to pointers.");
1137
1138
auto *PtrType = Ptr->getType();
1139
Module *M = BB->getParent()->getParent();
1140
Function *FnStripInvariantGroup = Intrinsic::getDeclaration(
1141
M, Intrinsic::strip_invariant_group, {PtrType});
1142
1143
assert(FnStripInvariantGroup->getReturnType() == PtrType &&
1144
FnStripInvariantGroup->getFunctionType()->getParamType(0) ==
1145
PtrType &&
1146
"StripInvariantGroup should take and return the same type");
1147
1148
return CreateCall(FnStripInvariantGroup, {Ptr});
1149
}
1150
1151
Value *IRBuilderBase::CreateVectorReverse(Value *V, const Twine &Name) {
1152
auto *Ty = cast<VectorType>(V->getType());
1153
if (isa<ScalableVectorType>(Ty)) {
1154
Module *M = BB->getParent()->getParent();
1155
Function *F = Intrinsic::getDeclaration(M, Intrinsic::vector_reverse, Ty);
1156
return Insert(CallInst::Create(F, V), Name);
1157
}
1158
// Keep the original behaviour for fixed vector
1159
SmallVector<int, 8> ShuffleMask;
1160
int NumElts = Ty->getElementCount().getKnownMinValue();
1161
for (int i = 0; i < NumElts; ++i)
1162
ShuffleMask.push_back(NumElts - i - 1);
1163
return CreateShuffleVector(V, ShuffleMask, Name);
1164
}
1165
1166
Value *IRBuilderBase::CreateVectorSplice(Value *V1, Value *V2, int64_t Imm,
1167
const Twine &Name) {
1168
assert(isa<VectorType>(V1->getType()) && "Unexpected type");
1169
assert(V1->getType() == V2->getType() &&
1170
"Splice expects matching operand types!");
1171
1172
if (auto *VTy = dyn_cast<ScalableVectorType>(V1->getType())) {
1173
Module *M = BB->getParent()->getParent();
1174
Function *F = Intrinsic::getDeclaration(M, Intrinsic::vector_splice, VTy);
1175
1176
Value *Ops[] = {V1, V2, getInt32(Imm)};
1177
return Insert(CallInst::Create(F, Ops), Name);
1178
}
1179
1180
unsigned NumElts = cast<FixedVectorType>(V1->getType())->getNumElements();
1181
assert(((-Imm <= NumElts) || (Imm < NumElts)) &&
1182
"Invalid immediate for vector splice!");
1183
1184
// Keep the original behaviour for fixed vector
1185
unsigned Idx = (NumElts + Imm) % NumElts;
1186
SmallVector<int, 8> Mask;
1187
for (unsigned I = 0; I < NumElts; ++I)
1188
Mask.push_back(Idx + I);
1189
1190
return CreateShuffleVector(V1, V2, Mask);
1191
}
1192
1193
Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V,
1194
const Twine &Name) {
1195
auto EC = ElementCount::getFixed(NumElts);
1196
return CreateVectorSplat(EC, V, Name);
1197
}
1198
1199
Value *IRBuilderBase::CreateVectorSplat(ElementCount EC, Value *V,
1200
const Twine &Name) {
1201
assert(EC.isNonZero() && "Cannot splat to an empty vector!");
1202
1203
// First insert it into a poison vector so we can shuffle it.
1204
Value *Poison = PoisonValue::get(VectorType::get(V->getType(), EC));
1205
V = CreateInsertElement(Poison, V, getInt64(0), Name + ".splatinsert");
1206
1207
// Shuffle the value across the desired number of elements.
1208
SmallVector<int, 16> Zeros;
1209
Zeros.resize(EC.getKnownMinValue());
1210
return CreateShuffleVector(V, Zeros, Name + ".splat");
1211
}
1212
1213
Value *IRBuilderBase::CreatePreserveArrayAccessIndex(
1214
Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex,
1215
MDNode *DbgInfo) {
1216
auto *BaseType = Base->getType();
1217
assert(isa<PointerType>(BaseType) &&
1218
"Invalid Base ptr type for preserve.array.access.index.");
1219
1220
Value *LastIndexV = getInt32(LastIndex);
1221
Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1222
SmallVector<Value *, 4> IdxList(Dimension, Zero);
1223
IdxList.push_back(LastIndexV);
1224
1225
Type *ResultType = GetElementPtrInst::getGEPReturnType(Base, IdxList);
1226
1227
Module *M = BB->getParent()->getParent();
1228
Function *FnPreserveArrayAccessIndex = Intrinsic::getDeclaration(
1229
M, Intrinsic::preserve_array_access_index, {ResultType, BaseType});
1230
1231
Value *DimV = getInt32(Dimension);
1232
CallInst *Fn =
1233
CreateCall(FnPreserveArrayAccessIndex, {Base, DimV, LastIndexV});
1234
Fn->addParamAttr(
1235
0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy));
1236
if (DbgInfo)
1237
Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1238
1239
return Fn;
1240
}
1241
1242
Value *IRBuilderBase::CreatePreserveUnionAccessIndex(
1243
Value *Base, unsigned FieldIndex, MDNode *DbgInfo) {
1244
assert(isa<PointerType>(Base->getType()) &&
1245
"Invalid Base ptr type for preserve.union.access.index.");
1246
auto *BaseType = Base->getType();
1247
1248
Module *M = BB->getParent()->getParent();
1249
Function *FnPreserveUnionAccessIndex = Intrinsic::getDeclaration(
1250
M, Intrinsic::preserve_union_access_index, {BaseType, BaseType});
1251
1252
Value *DIIndex = getInt32(FieldIndex);
1253
CallInst *Fn =
1254
CreateCall(FnPreserveUnionAccessIndex, {Base, DIIndex});
1255
if (DbgInfo)
1256
Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1257
1258
return Fn;
1259
}
1260
1261
Value *IRBuilderBase::CreatePreserveStructAccessIndex(
1262
Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex,
1263
MDNode *DbgInfo) {
1264
auto *BaseType = Base->getType();
1265
assert(isa<PointerType>(BaseType) &&
1266
"Invalid Base ptr type for preserve.struct.access.index.");
1267
1268
Value *GEPIndex = getInt32(Index);
1269
Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1270
Type *ResultType =
1271
GetElementPtrInst::getGEPReturnType(Base, {Zero, GEPIndex});
1272
1273
Module *M = BB->getParent()->getParent();
1274
Function *FnPreserveStructAccessIndex = Intrinsic::getDeclaration(
1275
M, Intrinsic::preserve_struct_access_index, {ResultType, BaseType});
1276
1277
Value *DIIndex = getInt32(FieldIndex);
1278
CallInst *Fn = CreateCall(FnPreserveStructAccessIndex,
1279
{Base, GEPIndex, DIIndex});
1280
Fn->addParamAttr(
1281
0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy));
1282
if (DbgInfo)
1283
Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1284
1285
return Fn;
1286
}
1287
1288
Value *IRBuilderBase::createIsFPClass(Value *FPNum, unsigned Test) {
1289
ConstantInt *TestV = getInt32(Test);
1290
Module *M = BB->getParent()->getParent();
1291
Function *FnIsFPClass =
1292
Intrinsic::getDeclaration(M, Intrinsic::is_fpclass, {FPNum->getType()});
1293
return CreateCall(FnIsFPClass, {FPNum, TestV});
1294
}
1295
1296
CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL,
1297
Value *PtrValue,
1298
Value *AlignValue,
1299
Value *OffsetValue) {
1300
SmallVector<Value *, 4> Vals({PtrValue, AlignValue});
1301
if (OffsetValue)
1302
Vals.push_back(OffsetValue);
1303
OperandBundleDefT<Value *> AlignOpB("align", Vals);
1304
return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB});
1305
}
1306
1307
CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
1308
Value *PtrValue,
1309
unsigned Alignment,
1310
Value *OffsetValue) {
1311
assert(isa<PointerType>(PtrValue->getType()) &&
1312
"trying to create an alignment assumption on a non-pointer?");
1313
assert(Alignment != 0 && "Invalid Alignment");
1314
auto *PtrTy = cast<PointerType>(PtrValue->getType());
1315
Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
1316
Value *AlignValue = ConstantInt::get(IntPtrTy, Alignment);
1317
return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue);
1318
}
1319
1320
CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
1321
Value *PtrValue,
1322
Value *Alignment,
1323
Value *OffsetValue) {
1324
assert(isa<PointerType>(PtrValue->getType()) &&
1325
"trying to create an alignment assumption on a non-pointer?");
1326
return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue);
1327
}
1328
1329
IRBuilderDefaultInserter::~IRBuilderDefaultInserter() = default;
1330
IRBuilderCallbackInserter::~IRBuilderCallbackInserter() = default;
1331
IRBuilderFolder::~IRBuilderFolder() = default;
1332
void ConstantFolder::anchor() {}
1333
void NoFolder::anchor() {}
1334
1335