Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/llvm/lib/Analysis/IVDescriptors.cpp
35234 views
1
//===- llvm/Analysis/IVDescriptors.cpp - IndVar Descriptors -----*- C++ -*-===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file "describes" induction and recurrence variables.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "llvm/Analysis/IVDescriptors.h"
14
#include "llvm/Analysis/DemandedBits.h"
15
#include "llvm/Analysis/LoopInfo.h"
16
#include "llvm/Analysis/ScalarEvolution.h"
17
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
18
#include "llvm/Analysis/ValueTracking.h"
19
#include "llvm/IR/Dominators.h"
20
#include "llvm/IR/Instructions.h"
21
#include "llvm/IR/Module.h"
22
#include "llvm/IR/PatternMatch.h"
23
#include "llvm/IR/ValueHandle.h"
24
#include "llvm/Support/Debug.h"
25
#include "llvm/Support/KnownBits.h"
26
27
using namespace llvm;
28
using namespace llvm::PatternMatch;
29
30
#define DEBUG_TYPE "iv-descriptors"
31
32
bool RecurrenceDescriptor::areAllUsesIn(Instruction *I,
33
SmallPtrSetImpl<Instruction *> &Set) {
34
for (const Use &Use : I->operands())
35
if (!Set.count(dyn_cast<Instruction>(Use)))
36
return false;
37
return true;
38
}
39
40
bool RecurrenceDescriptor::isIntegerRecurrenceKind(RecurKind Kind) {
41
switch (Kind) {
42
default:
43
break;
44
case RecurKind::Add:
45
case RecurKind::Mul:
46
case RecurKind::Or:
47
case RecurKind::And:
48
case RecurKind::Xor:
49
case RecurKind::SMax:
50
case RecurKind::SMin:
51
case RecurKind::UMax:
52
case RecurKind::UMin:
53
case RecurKind::IAnyOf:
54
case RecurKind::FAnyOf:
55
return true;
56
}
57
return false;
58
}
59
60
bool RecurrenceDescriptor::isFloatingPointRecurrenceKind(RecurKind Kind) {
61
return (Kind != RecurKind::None) && !isIntegerRecurrenceKind(Kind);
62
}
63
64
/// Determines if Phi may have been type-promoted. If Phi has a single user
65
/// that ANDs the Phi with a type mask, return the user. RT is updated to
66
/// account for the narrower bit width represented by the mask, and the AND
67
/// instruction is added to CI.
68
static Instruction *lookThroughAnd(PHINode *Phi, Type *&RT,
69
SmallPtrSetImpl<Instruction *> &Visited,
70
SmallPtrSetImpl<Instruction *> &CI) {
71
if (!Phi->hasOneUse())
72
return Phi;
73
74
const APInt *M = nullptr;
75
Instruction *I, *J = cast<Instruction>(Phi->use_begin()->getUser());
76
77
// Matches either I & 2^x-1 or 2^x-1 & I. If we find a match, we update RT
78
// with a new integer type of the corresponding bit width.
79
if (match(J, m_And(m_Instruction(I), m_APInt(M)))) {
80
int32_t Bits = (*M + 1).exactLogBase2();
81
if (Bits > 0) {
82
RT = IntegerType::get(Phi->getContext(), Bits);
83
Visited.insert(Phi);
84
CI.insert(J);
85
return J;
86
}
87
}
88
return Phi;
89
}
90
91
/// Compute the minimal bit width needed to represent a reduction whose exit
92
/// instruction is given by Exit.
93
static std::pair<Type *, bool> computeRecurrenceType(Instruction *Exit,
94
DemandedBits *DB,
95
AssumptionCache *AC,
96
DominatorTree *DT) {
97
bool IsSigned = false;
98
const DataLayout &DL = Exit->getDataLayout();
99
uint64_t MaxBitWidth = DL.getTypeSizeInBits(Exit->getType());
100
101
if (DB) {
102
// Use the demanded bits analysis to determine the bits that are live out
103
// of the exit instruction, rounding up to the nearest power of two. If the
104
// use of demanded bits results in a smaller bit width, we know the value
105
// must be positive (i.e., IsSigned = false), because if this were not the
106
// case, the sign bit would have been demanded.
107
auto Mask = DB->getDemandedBits(Exit);
108
MaxBitWidth = Mask.getBitWidth() - Mask.countl_zero();
109
}
110
111
if (MaxBitWidth == DL.getTypeSizeInBits(Exit->getType()) && AC && DT) {
112
// If demanded bits wasn't able to limit the bit width, we can try to use
113
// value tracking instead. This can be the case, for example, if the value
114
// may be negative.
115
auto NumSignBits = ComputeNumSignBits(Exit, DL, 0, AC, nullptr, DT);
116
auto NumTypeBits = DL.getTypeSizeInBits(Exit->getType());
117
MaxBitWidth = NumTypeBits - NumSignBits;
118
KnownBits Bits = computeKnownBits(Exit, DL);
119
if (!Bits.isNonNegative()) {
120
// If the value is not known to be non-negative, we set IsSigned to true,
121
// meaning that we will use sext instructions instead of zext
122
// instructions to restore the original type.
123
IsSigned = true;
124
// Make sure at least one sign bit is included in the result, so it
125
// will get properly sign-extended.
126
++MaxBitWidth;
127
}
128
}
129
MaxBitWidth = llvm::bit_ceil(MaxBitWidth);
130
131
return std::make_pair(Type::getIntNTy(Exit->getContext(), MaxBitWidth),
132
IsSigned);
133
}
134
135
/// Collect cast instructions that can be ignored in the vectorizer's cost
136
/// model, given a reduction exit value and the minimal type in which the
137
// reduction can be represented. Also search casts to the recurrence type
138
// to find the minimum width used by the recurrence.
139
static void collectCastInstrs(Loop *TheLoop, Instruction *Exit,
140
Type *RecurrenceType,
141
SmallPtrSetImpl<Instruction *> &Casts,
142
unsigned &MinWidthCastToRecurTy) {
143
144
SmallVector<Instruction *, 8> Worklist;
145
SmallPtrSet<Instruction *, 8> Visited;
146
Worklist.push_back(Exit);
147
MinWidthCastToRecurTy = -1U;
148
149
while (!Worklist.empty()) {
150
Instruction *Val = Worklist.pop_back_val();
151
Visited.insert(Val);
152
if (auto *Cast = dyn_cast<CastInst>(Val)) {
153
if (Cast->getSrcTy() == RecurrenceType) {
154
// If the source type of a cast instruction is equal to the recurrence
155
// type, it will be eliminated, and should be ignored in the vectorizer
156
// cost model.
157
Casts.insert(Cast);
158
continue;
159
}
160
if (Cast->getDestTy() == RecurrenceType) {
161
// The minimum width used by the recurrence is found by checking for
162
// casts on its operands. The minimum width is used by the vectorizer
163
// when finding the widest type for in-loop reductions without any
164
// loads/stores.
165
MinWidthCastToRecurTy = std::min<unsigned>(
166
MinWidthCastToRecurTy, Cast->getSrcTy()->getScalarSizeInBits());
167
continue;
168
}
169
}
170
// Add all operands to the work list if they are loop-varying values that
171
// we haven't yet visited.
172
for (Value *O : cast<User>(Val)->operands())
173
if (auto *I = dyn_cast<Instruction>(O))
174
if (TheLoop->contains(I) && !Visited.count(I))
175
Worklist.push_back(I);
176
}
177
}
178
179
// Check if a given Phi node can be recognized as an ordered reduction for
180
// vectorizing floating point operations without unsafe math.
181
static bool checkOrderedReduction(RecurKind Kind, Instruction *ExactFPMathInst,
182
Instruction *Exit, PHINode *Phi) {
183
// Currently only FAdd and FMulAdd are supported.
184
if (Kind != RecurKind::FAdd && Kind != RecurKind::FMulAdd)
185
return false;
186
187
if (Kind == RecurKind::FAdd && Exit->getOpcode() != Instruction::FAdd)
188
return false;
189
190
if (Kind == RecurKind::FMulAdd &&
191
!RecurrenceDescriptor::isFMulAddIntrinsic(Exit))
192
return false;
193
194
// Ensure the exit instruction has only one user other than the reduction PHI
195
if (Exit != ExactFPMathInst || Exit->hasNUsesOrMore(3))
196
return false;
197
198
// The only pattern accepted is the one in which the reduction PHI
199
// is used as one of the operands of the exit instruction
200
auto *Op0 = Exit->getOperand(0);
201
auto *Op1 = Exit->getOperand(1);
202
if (Kind == RecurKind::FAdd && Op0 != Phi && Op1 != Phi)
203
return false;
204
if (Kind == RecurKind::FMulAdd && Exit->getOperand(2) != Phi)
205
return false;
206
207
LLVM_DEBUG(dbgs() << "LV: Found an ordered reduction: Phi: " << *Phi
208
<< ", ExitInst: " << *Exit << "\n");
209
210
return true;
211
}
212
213
bool RecurrenceDescriptor::AddReductionVar(
214
PHINode *Phi, RecurKind Kind, Loop *TheLoop, FastMathFlags FuncFMF,
215
RecurrenceDescriptor &RedDes, DemandedBits *DB, AssumptionCache *AC,
216
DominatorTree *DT, ScalarEvolution *SE) {
217
if (Phi->getNumIncomingValues() != 2)
218
return false;
219
220
// Reduction variables are only found in the loop header block.
221
if (Phi->getParent() != TheLoop->getHeader())
222
return false;
223
224
// Obtain the reduction start value from the value that comes from the loop
225
// preheader.
226
Value *RdxStart = Phi->getIncomingValueForBlock(TheLoop->getLoopPreheader());
227
228
// ExitInstruction is the single value which is used outside the loop.
229
// We only allow for a single reduction value to be used outside the loop.
230
// This includes users of the reduction, variables (which form a cycle
231
// which ends in the phi node).
232
Instruction *ExitInstruction = nullptr;
233
234
// Variable to keep last visited store instruction. By the end of the
235
// algorithm this variable will be either empty or having intermediate
236
// reduction value stored in invariant address.
237
StoreInst *IntermediateStore = nullptr;
238
239
// Indicates that we found a reduction operation in our scan.
240
bool FoundReduxOp = false;
241
242
// We start with the PHI node and scan for all of the users of this
243
// instruction. All users must be instructions that can be used as reduction
244
// variables (such as ADD). We must have a single out-of-block user. The cycle
245
// must include the original PHI.
246
bool FoundStartPHI = false;
247
248
// To recognize min/max patterns formed by a icmp select sequence, we store
249
// the number of instruction we saw from the recognized min/max pattern,
250
// to make sure we only see exactly the two instructions.
251
unsigned NumCmpSelectPatternInst = 0;
252
InstDesc ReduxDesc(false, nullptr);
253
254
// Data used for determining if the recurrence has been type-promoted.
255
Type *RecurrenceType = Phi->getType();
256
SmallPtrSet<Instruction *, 4> CastInsts;
257
unsigned MinWidthCastToRecurrenceType;
258
Instruction *Start = Phi;
259
bool IsSigned = false;
260
261
SmallPtrSet<Instruction *, 8> VisitedInsts;
262
SmallVector<Instruction *, 8> Worklist;
263
264
// Return early if the recurrence kind does not match the type of Phi. If the
265
// recurrence kind is arithmetic, we attempt to look through AND operations
266
// resulting from the type promotion performed by InstCombine. Vector
267
// operations are not limited to the legal integer widths, so we may be able
268
// to evaluate the reduction in the narrower width.
269
if (RecurrenceType->isFloatingPointTy()) {
270
if (!isFloatingPointRecurrenceKind(Kind))
271
return false;
272
} else if (RecurrenceType->isIntegerTy()) {
273
if (!isIntegerRecurrenceKind(Kind))
274
return false;
275
if (!isMinMaxRecurrenceKind(Kind))
276
Start = lookThroughAnd(Phi, RecurrenceType, VisitedInsts, CastInsts);
277
} else {
278
// Pointer min/max may exist, but it is not supported as a reduction op.
279
return false;
280
}
281
282
Worklist.push_back(Start);
283
VisitedInsts.insert(Start);
284
285
// Start with all flags set because we will intersect this with the reduction
286
// flags from all the reduction operations.
287
FastMathFlags FMF = FastMathFlags::getFast();
288
289
// The first instruction in the use-def chain of the Phi node that requires
290
// exact floating point operations.
291
Instruction *ExactFPMathInst = nullptr;
292
293
// A value in the reduction can be used:
294
// - By the reduction:
295
// - Reduction operation:
296
// - One use of reduction value (safe).
297
// - Multiple use of reduction value (not safe).
298
// - PHI:
299
// - All uses of the PHI must be the reduction (safe).
300
// - Otherwise, not safe.
301
// - By instructions outside of the loop (safe).
302
// * One value may have several outside users, but all outside
303
// uses must be of the same value.
304
// - By store instructions with a loop invariant address (safe with
305
// the following restrictions):
306
// * If there are several stores, all must have the same address.
307
// * Final value should be stored in that loop invariant address.
308
// - By an instruction that is not part of the reduction (not safe).
309
// This is either:
310
// * An instruction type other than PHI or the reduction operation.
311
// * A PHI in the header other than the initial PHI.
312
while (!Worklist.empty()) {
313
Instruction *Cur = Worklist.pop_back_val();
314
315
// Store instructions are allowed iff it is the store of the reduction
316
// value to the same loop invariant memory location.
317
if (auto *SI = dyn_cast<StoreInst>(Cur)) {
318
if (!SE) {
319
LLVM_DEBUG(dbgs() << "Store instructions are not processed without "
320
<< "Scalar Evolution Analysis\n");
321
return false;
322
}
323
324
const SCEV *PtrScev = SE->getSCEV(SI->getPointerOperand());
325
// Check it is the same address as previous stores
326
if (IntermediateStore) {
327
const SCEV *OtherScev =
328
SE->getSCEV(IntermediateStore->getPointerOperand());
329
330
if (OtherScev != PtrScev) {
331
LLVM_DEBUG(dbgs() << "Storing reduction value to different addresses "
332
<< "inside the loop: " << *SI->getPointerOperand()
333
<< " and "
334
<< *IntermediateStore->getPointerOperand() << '\n');
335
return false;
336
}
337
}
338
339
// Check the pointer is loop invariant
340
if (!SE->isLoopInvariant(PtrScev, TheLoop)) {
341
LLVM_DEBUG(dbgs() << "Storing reduction value to non-uniform address "
342
<< "inside the loop: " << *SI->getPointerOperand()
343
<< '\n');
344
return false;
345
}
346
347
// IntermediateStore is always the last store in the loop.
348
IntermediateStore = SI;
349
continue;
350
}
351
352
// No Users.
353
// If the instruction has no users then this is a broken chain and can't be
354
// a reduction variable.
355
if (Cur->use_empty())
356
return false;
357
358
bool IsAPhi = isa<PHINode>(Cur);
359
360
// A header PHI use other than the original PHI.
361
if (Cur != Phi && IsAPhi && Cur->getParent() == Phi->getParent())
362
return false;
363
364
// Reductions of instructions such as Div, and Sub is only possible if the
365
// LHS is the reduction variable.
366
if (!Cur->isCommutative() && !IsAPhi && !isa<SelectInst>(Cur) &&
367
!isa<ICmpInst>(Cur) && !isa<FCmpInst>(Cur) &&
368
!VisitedInsts.count(dyn_cast<Instruction>(Cur->getOperand(0))))
369
return false;
370
371
// Any reduction instruction must be of one of the allowed kinds. We ignore
372
// the starting value (the Phi or an AND instruction if the Phi has been
373
// type-promoted).
374
if (Cur != Start) {
375
ReduxDesc =
376
isRecurrenceInstr(TheLoop, Phi, Cur, Kind, ReduxDesc, FuncFMF);
377
ExactFPMathInst = ExactFPMathInst == nullptr
378
? ReduxDesc.getExactFPMathInst()
379
: ExactFPMathInst;
380
if (!ReduxDesc.isRecurrence())
381
return false;
382
// FIXME: FMF is allowed on phi, but propagation is not handled correctly.
383
if (isa<FPMathOperator>(ReduxDesc.getPatternInst()) && !IsAPhi) {
384
FastMathFlags CurFMF = ReduxDesc.getPatternInst()->getFastMathFlags();
385
if (auto *Sel = dyn_cast<SelectInst>(ReduxDesc.getPatternInst())) {
386
// Accept FMF on either fcmp or select of a min/max idiom.
387
// TODO: This is a hack to work-around the fact that FMF may not be
388
// assigned/propagated correctly. If that problem is fixed or we
389
// standardize on fmin/fmax via intrinsics, this can be removed.
390
if (auto *FCmp = dyn_cast<FCmpInst>(Sel->getCondition()))
391
CurFMF |= FCmp->getFastMathFlags();
392
}
393
FMF &= CurFMF;
394
}
395
// Update this reduction kind if we matched a new instruction.
396
// TODO: Can we eliminate the need for a 2nd InstDesc by keeping 'Kind'
397
// state accurate while processing the worklist?
398
if (ReduxDesc.getRecKind() != RecurKind::None)
399
Kind = ReduxDesc.getRecKind();
400
}
401
402
bool IsASelect = isa<SelectInst>(Cur);
403
404
// A conditional reduction operation must only have 2 or less uses in
405
// VisitedInsts.
406
if (IsASelect && (Kind == RecurKind::FAdd || Kind == RecurKind::FMul) &&
407
hasMultipleUsesOf(Cur, VisitedInsts, 2))
408
return false;
409
410
// A reduction operation must only have one use of the reduction value.
411
if (!IsAPhi && !IsASelect && !isMinMaxRecurrenceKind(Kind) &&
412
!isAnyOfRecurrenceKind(Kind) && hasMultipleUsesOf(Cur, VisitedInsts, 1))
413
return false;
414
415
// All inputs to a PHI node must be a reduction value.
416
if (IsAPhi && Cur != Phi && !areAllUsesIn(Cur, VisitedInsts))
417
return false;
418
419
if ((isIntMinMaxRecurrenceKind(Kind) || Kind == RecurKind::IAnyOf) &&
420
(isa<ICmpInst>(Cur) || isa<SelectInst>(Cur)))
421
++NumCmpSelectPatternInst;
422
if ((isFPMinMaxRecurrenceKind(Kind) || Kind == RecurKind::FAnyOf) &&
423
(isa<FCmpInst>(Cur) || isa<SelectInst>(Cur)))
424
++NumCmpSelectPatternInst;
425
426
// Check whether we found a reduction operator.
427
FoundReduxOp |= !IsAPhi && Cur != Start;
428
429
// Process users of current instruction. Push non-PHI nodes after PHI nodes
430
// onto the stack. This way we are going to have seen all inputs to PHI
431
// nodes once we get to them.
432
SmallVector<Instruction *, 8> NonPHIs;
433
SmallVector<Instruction *, 8> PHIs;
434
for (User *U : Cur->users()) {
435
Instruction *UI = cast<Instruction>(U);
436
437
// If the user is a call to llvm.fmuladd then the instruction can only be
438
// the final operand.
439
if (isFMulAddIntrinsic(UI))
440
if (Cur == UI->getOperand(0) || Cur == UI->getOperand(1))
441
return false;
442
443
// Check if we found the exit user.
444
BasicBlock *Parent = UI->getParent();
445
if (!TheLoop->contains(Parent)) {
446
// If we already know this instruction is used externally, move on to
447
// the next user.
448
if (ExitInstruction == Cur)
449
continue;
450
451
// Exit if you find multiple values used outside or if the header phi
452
// node is being used. In this case the user uses the value of the
453
// previous iteration, in which case we would loose "VF-1" iterations of
454
// the reduction operation if we vectorize.
455
if (ExitInstruction != nullptr || Cur == Phi)
456
return false;
457
458
// The instruction used by an outside user must be the last instruction
459
// before we feed back to the reduction phi. Otherwise, we loose VF-1
460
// operations on the value.
461
if (!is_contained(Phi->operands(), Cur))
462
return false;
463
464
ExitInstruction = Cur;
465
continue;
466
}
467
468
// Process instructions only once (termination). Each reduction cycle
469
// value must only be used once, except by phi nodes and min/max
470
// reductions which are represented as a cmp followed by a select.
471
InstDesc IgnoredVal(false, nullptr);
472
if (VisitedInsts.insert(UI).second) {
473
if (isa<PHINode>(UI)) {
474
PHIs.push_back(UI);
475
} else {
476
StoreInst *SI = dyn_cast<StoreInst>(UI);
477
if (SI && SI->getPointerOperand() == Cur) {
478
// Reduction variable chain can only be stored somewhere but it
479
// can't be used as an address.
480
return false;
481
}
482
NonPHIs.push_back(UI);
483
}
484
} else if (!isa<PHINode>(UI) &&
485
((!isa<FCmpInst>(UI) && !isa<ICmpInst>(UI) &&
486
!isa<SelectInst>(UI)) ||
487
(!isConditionalRdxPattern(Kind, UI).isRecurrence() &&
488
!isAnyOfPattern(TheLoop, Phi, UI, IgnoredVal)
489
.isRecurrence() &&
490
!isMinMaxPattern(UI, Kind, IgnoredVal).isRecurrence())))
491
return false;
492
493
// Remember that we completed the cycle.
494
if (UI == Phi)
495
FoundStartPHI = true;
496
}
497
Worklist.append(PHIs.begin(), PHIs.end());
498
Worklist.append(NonPHIs.begin(), NonPHIs.end());
499
}
500
501
// This means we have seen one but not the other instruction of the
502
// pattern or more than just a select and cmp. Zero implies that we saw a
503
// llvm.min/max intrinsic, which is always OK.
504
if (isMinMaxRecurrenceKind(Kind) && NumCmpSelectPatternInst != 2 &&
505
NumCmpSelectPatternInst != 0)
506
return false;
507
508
if (isAnyOfRecurrenceKind(Kind) && NumCmpSelectPatternInst != 1)
509
return false;
510
511
if (IntermediateStore) {
512
// Check that stored value goes to the phi node again. This way we make sure
513
// that the value stored in IntermediateStore is indeed the final reduction
514
// value.
515
if (!is_contained(Phi->operands(), IntermediateStore->getValueOperand())) {
516
LLVM_DEBUG(dbgs() << "Not a final reduction value stored: "
517
<< *IntermediateStore << '\n');
518
return false;
519
}
520
521
// If there is an exit instruction it's value should be stored in
522
// IntermediateStore
523
if (ExitInstruction &&
524
IntermediateStore->getValueOperand() != ExitInstruction) {
525
LLVM_DEBUG(dbgs() << "Last store Instruction of reduction value does not "
526
"store last calculated value of the reduction: "
527
<< *IntermediateStore << '\n');
528
return false;
529
}
530
531
// If all uses are inside the loop (intermediate stores), then the
532
// reduction value after the loop will be the one used in the last store.
533
if (!ExitInstruction)
534
ExitInstruction = cast<Instruction>(IntermediateStore->getValueOperand());
535
}
536
537
if (!FoundStartPHI || !FoundReduxOp || !ExitInstruction)
538
return false;
539
540
const bool IsOrdered =
541
checkOrderedReduction(Kind, ExactFPMathInst, ExitInstruction, Phi);
542
543
if (Start != Phi) {
544
// If the starting value is not the same as the phi node, we speculatively
545
// looked through an 'and' instruction when evaluating a potential
546
// arithmetic reduction to determine if it may have been type-promoted.
547
//
548
// We now compute the minimal bit width that is required to represent the
549
// reduction. If this is the same width that was indicated by the 'and', we
550
// can represent the reduction in the smaller type. The 'and' instruction
551
// will be eliminated since it will essentially be a cast instruction that
552
// can be ignore in the cost model. If we compute a different type than we
553
// did when evaluating the 'and', the 'and' will not be eliminated, and we
554
// will end up with different kinds of operations in the recurrence
555
// expression (e.g., IntegerAND, IntegerADD). We give up if this is
556
// the case.
557
//
558
// The vectorizer relies on InstCombine to perform the actual
559
// type-shrinking. It does this by inserting instructions to truncate the
560
// exit value of the reduction to the width indicated by RecurrenceType and
561
// then extend this value back to the original width. If IsSigned is false,
562
// a 'zext' instruction will be generated; otherwise, a 'sext' will be
563
// used.
564
//
565
// TODO: We should not rely on InstCombine to rewrite the reduction in the
566
// smaller type. We should just generate a correctly typed expression
567
// to begin with.
568
Type *ComputedType;
569
std::tie(ComputedType, IsSigned) =
570
computeRecurrenceType(ExitInstruction, DB, AC, DT);
571
if (ComputedType != RecurrenceType)
572
return false;
573
}
574
575
// Collect cast instructions and the minimum width used by the recurrence.
576
// If the starting value is not the same as the phi node and the computed
577
// recurrence type is equal to the recurrence type, the recurrence expression
578
// will be represented in a narrower or wider type. If there are any cast
579
// instructions that will be unnecessary, collect them in CastsFromRecurTy.
580
// Note that the 'and' instruction was already included in this list.
581
//
582
// TODO: A better way to represent this may be to tag in some way all the
583
// instructions that are a part of the reduction. The vectorizer cost
584
// model could then apply the recurrence type to these instructions,
585
// without needing a white list of instructions to ignore.
586
// This may also be useful for the inloop reductions, if it can be
587
// kept simple enough.
588
collectCastInstrs(TheLoop, ExitInstruction, RecurrenceType, CastInsts,
589
MinWidthCastToRecurrenceType);
590
591
// We found a reduction var if we have reached the original phi node and we
592
// only have a single instruction with out-of-loop users.
593
594
// The ExitInstruction(Instruction which is allowed to have out-of-loop users)
595
// is saved as part of the RecurrenceDescriptor.
596
597
// Save the description of this reduction variable.
598
RecurrenceDescriptor RD(RdxStart, ExitInstruction, IntermediateStore, Kind,
599
FMF, ExactFPMathInst, RecurrenceType, IsSigned,
600
IsOrdered, CastInsts, MinWidthCastToRecurrenceType);
601
RedDes = RD;
602
603
return true;
604
}
605
606
// We are looking for loops that do something like this:
607
// int r = 0;
608
// for (int i = 0; i < n; i++) {
609
// if (src[i] > 3)
610
// r = 3;
611
// }
612
// where the reduction value (r) only has two states, in this example 0 or 3.
613
// The generated LLVM IR for this type of loop will be like this:
614
// for.body:
615
// %r = phi i32 [ %spec.select, %for.body ], [ 0, %entry ]
616
// ...
617
// %cmp = icmp sgt i32 %5, 3
618
// %spec.select = select i1 %cmp, i32 3, i32 %r
619
// ...
620
// In general we can support vectorization of loops where 'r' flips between
621
// any two non-constants, provided they are loop invariant. The only thing
622
// we actually care about at the end of the loop is whether or not any lane
623
// in the selected vector is different from the start value. The final
624
// across-vector reduction after the loop simply involves choosing the start
625
// value if nothing changed (0 in the example above) or the other selected
626
// value (3 in the example above).
627
RecurrenceDescriptor::InstDesc
628
RecurrenceDescriptor::isAnyOfPattern(Loop *Loop, PHINode *OrigPhi,
629
Instruction *I, InstDesc &Prev) {
630
// We must handle the select(cmp(),x,y) as a single instruction. Advance to
631
// the select.
632
CmpInst::Predicate Pred;
633
if (match(I, m_OneUse(m_Cmp(Pred, m_Value(), m_Value())))) {
634
if (auto *Select = dyn_cast<SelectInst>(*I->user_begin()))
635
return InstDesc(Select, Prev.getRecKind());
636
}
637
638
if (!match(I,
639
m_Select(m_Cmp(Pred, m_Value(), m_Value()), m_Value(), m_Value())))
640
return InstDesc(false, I);
641
642
SelectInst *SI = cast<SelectInst>(I);
643
Value *NonPhi = nullptr;
644
645
if (OrigPhi == dyn_cast<PHINode>(SI->getTrueValue()))
646
NonPhi = SI->getFalseValue();
647
else if (OrigPhi == dyn_cast<PHINode>(SI->getFalseValue()))
648
NonPhi = SI->getTrueValue();
649
else
650
return InstDesc(false, I);
651
652
// We are looking for selects of the form:
653
// select(cmp(), phi, loop_invariant) or
654
// select(cmp(), loop_invariant, phi)
655
if (!Loop->isLoopInvariant(NonPhi))
656
return InstDesc(false, I);
657
658
return InstDesc(I, isa<ICmpInst>(I->getOperand(0)) ? RecurKind::IAnyOf
659
: RecurKind::FAnyOf);
660
}
661
662
RecurrenceDescriptor::InstDesc
663
RecurrenceDescriptor::isMinMaxPattern(Instruction *I, RecurKind Kind,
664
const InstDesc &Prev) {
665
assert((isa<CmpInst>(I) || isa<SelectInst>(I) || isa<CallInst>(I)) &&
666
"Expected a cmp or select or call instruction");
667
if (!isMinMaxRecurrenceKind(Kind))
668
return InstDesc(false, I);
669
670
// We must handle the select(cmp()) as a single instruction. Advance to the
671
// select.
672
CmpInst::Predicate Pred;
673
if (match(I, m_OneUse(m_Cmp(Pred, m_Value(), m_Value())))) {
674
if (auto *Select = dyn_cast<SelectInst>(*I->user_begin()))
675
return InstDesc(Select, Prev.getRecKind());
676
}
677
678
// Only match select with single use cmp condition, or a min/max intrinsic.
679
if (!isa<IntrinsicInst>(I) &&
680
!match(I, m_Select(m_OneUse(m_Cmp(Pred, m_Value(), m_Value())), m_Value(),
681
m_Value())))
682
return InstDesc(false, I);
683
684
// Look for a min/max pattern.
685
if (match(I, m_UMin(m_Value(), m_Value())))
686
return InstDesc(Kind == RecurKind::UMin, I);
687
if (match(I, m_UMax(m_Value(), m_Value())))
688
return InstDesc(Kind == RecurKind::UMax, I);
689
if (match(I, m_SMax(m_Value(), m_Value())))
690
return InstDesc(Kind == RecurKind::SMax, I);
691
if (match(I, m_SMin(m_Value(), m_Value())))
692
return InstDesc(Kind == RecurKind::SMin, I);
693
if (match(I, m_OrdFMin(m_Value(), m_Value())))
694
return InstDesc(Kind == RecurKind::FMin, I);
695
if (match(I, m_OrdFMax(m_Value(), m_Value())))
696
return InstDesc(Kind == RecurKind::FMax, I);
697
if (match(I, m_UnordFMin(m_Value(), m_Value())))
698
return InstDesc(Kind == RecurKind::FMin, I);
699
if (match(I, m_UnordFMax(m_Value(), m_Value())))
700
return InstDesc(Kind == RecurKind::FMax, I);
701
if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value())))
702
return InstDesc(Kind == RecurKind::FMin, I);
703
if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value())))
704
return InstDesc(Kind == RecurKind::FMax, I);
705
if (match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(), m_Value())))
706
return InstDesc(Kind == RecurKind::FMinimum, I);
707
if (match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(), m_Value())))
708
return InstDesc(Kind == RecurKind::FMaximum, I);
709
710
return InstDesc(false, I);
711
}
712
713
/// Returns true if the select instruction has users in the compare-and-add
714
/// reduction pattern below. The select instruction argument is the last one
715
/// in the sequence.
716
///
717
/// %sum.1 = phi ...
718
/// ...
719
/// %cmp = fcmp pred %0, %CFP
720
/// %add = fadd %0, %sum.1
721
/// %sum.2 = select %cmp, %add, %sum.1
722
RecurrenceDescriptor::InstDesc
723
RecurrenceDescriptor::isConditionalRdxPattern(RecurKind Kind, Instruction *I) {
724
SelectInst *SI = dyn_cast<SelectInst>(I);
725
if (!SI)
726
return InstDesc(false, I);
727
728
CmpInst *CI = dyn_cast<CmpInst>(SI->getCondition());
729
// Only handle single use cases for now.
730
if (!CI || !CI->hasOneUse())
731
return InstDesc(false, I);
732
733
Value *TrueVal = SI->getTrueValue();
734
Value *FalseVal = SI->getFalseValue();
735
// Handle only when either of operands of select instruction is a PHI
736
// node for now.
737
if ((isa<PHINode>(*TrueVal) && isa<PHINode>(*FalseVal)) ||
738
(!isa<PHINode>(*TrueVal) && !isa<PHINode>(*FalseVal)))
739
return InstDesc(false, I);
740
741
Instruction *I1 =
742
isa<PHINode>(*TrueVal) ? dyn_cast<Instruction>(FalseVal)
743
: dyn_cast<Instruction>(TrueVal);
744
if (!I1 || !I1->isBinaryOp())
745
return InstDesc(false, I);
746
747
Value *Op1, *Op2;
748
if (!(((m_FAdd(m_Value(Op1), m_Value(Op2)).match(I1) ||
749
m_FSub(m_Value(Op1), m_Value(Op2)).match(I1)) &&
750
I1->isFast()) ||
751
(m_FMul(m_Value(Op1), m_Value(Op2)).match(I1) && (I1->isFast())) ||
752
((m_Add(m_Value(Op1), m_Value(Op2)).match(I1) ||
753
m_Sub(m_Value(Op1), m_Value(Op2)).match(I1))) ||
754
(m_Mul(m_Value(Op1), m_Value(Op2)).match(I1))))
755
return InstDesc(false, I);
756
757
Instruction *IPhi = isa<PHINode>(*Op1) ? dyn_cast<Instruction>(Op1)
758
: dyn_cast<Instruction>(Op2);
759
if (!IPhi || IPhi != FalseVal)
760
return InstDesc(false, I);
761
762
return InstDesc(true, SI);
763
}
764
765
RecurrenceDescriptor::InstDesc
766
RecurrenceDescriptor::isRecurrenceInstr(Loop *L, PHINode *OrigPhi,
767
Instruction *I, RecurKind Kind,
768
InstDesc &Prev, FastMathFlags FuncFMF) {
769
assert(Prev.getRecKind() == RecurKind::None || Prev.getRecKind() == Kind);
770
switch (I->getOpcode()) {
771
default:
772
return InstDesc(false, I);
773
case Instruction::PHI:
774
return InstDesc(I, Prev.getRecKind(), Prev.getExactFPMathInst());
775
case Instruction::Sub:
776
case Instruction::Add:
777
return InstDesc(Kind == RecurKind::Add, I);
778
case Instruction::Mul:
779
return InstDesc(Kind == RecurKind::Mul, I);
780
case Instruction::And:
781
return InstDesc(Kind == RecurKind::And, I);
782
case Instruction::Or:
783
return InstDesc(Kind == RecurKind::Or, I);
784
case Instruction::Xor:
785
return InstDesc(Kind == RecurKind::Xor, I);
786
case Instruction::FDiv:
787
case Instruction::FMul:
788
return InstDesc(Kind == RecurKind::FMul, I,
789
I->hasAllowReassoc() ? nullptr : I);
790
case Instruction::FSub:
791
case Instruction::FAdd:
792
return InstDesc(Kind == RecurKind::FAdd, I,
793
I->hasAllowReassoc() ? nullptr : I);
794
case Instruction::Select:
795
if (Kind == RecurKind::FAdd || Kind == RecurKind::FMul ||
796
Kind == RecurKind::Add || Kind == RecurKind::Mul)
797
return isConditionalRdxPattern(Kind, I);
798
[[fallthrough]];
799
case Instruction::FCmp:
800
case Instruction::ICmp:
801
case Instruction::Call:
802
if (isAnyOfRecurrenceKind(Kind))
803
return isAnyOfPattern(L, OrigPhi, I, Prev);
804
auto HasRequiredFMF = [&]() {
805
if (FuncFMF.noNaNs() && FuncFMF.noSignedZeros())
806
return true;
807
if (isa<FPMathOperator>(I) && I->hasNoNaNs() && I->hasNoSignedZeros())
808
return true;
809
// minimum and maximum intrinsics do not require nsz and nnan flags since
810
// NaN and signed zeroes are propagated in the intrinsic implementation.
811
return match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(), m_Value())) ||
812
match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(), m_Value()));
813
};
814
if (isIntMinMaxRecurrenceKind(Kind) ||
815
(HasRequiredFMF() && isFPMinMaxRecurrenceKind(Kind)))
816
return isMinMaxPattern(I, Kind, Prev);
817
else if (isFMulAddIntrinsic(I))
818
return InstDesc(Kind == RecurKind::FMulAdd, I,
819
I->hasAllowReassoc() ? nullptr : I);
820
return InstDesc(false, I);
821
}
822
}
823
824
bool RecurrenceDescriptor::hasMultipleUsesOf(
825
Instruction *I, SmallPtrSetImpl<Instruction *> &Insts,
826
unsigned MaxNumUses) {
827
unsigned NumUses = 0;
828
for (const Use &U : I->operands()) {
829
if (Insts.count(dyn_cast<Instruction>(U)))
830
++NumUses;
831
if (NumUses > MaxNumUses)
832
return true;
833
}
834
835
return false;
836
}
837
838
bool RecurrenceDescriptor::isReductionPHI(PHINode *Phi, Loop *TheLoop,
839
RecurrenceDescriptor &RedDes,
840
DemandedBits *DB, AssumptionCache *AC,
841
DominatorTree *DT,
842
ScalarEvolution *SE) {
843
BasicBlock *Header = TheLoop->getHeader();
844
Function &F = *Header->getParent();
845
FastMathFlags FMF;
846
FMF.setNoNaNs(
847
F.getFnAttribute("no-nans-fp-math").getValueAsBool());
848
FMF.setNoSignedZeros(
849
F.getFnAttribute("no-signed-zeros-fp-math").getValueAsBool());
850
851
if (AddReductionVar(Phi, RecurKind::Add, TheLoop, FMF, RedDes, DB, AC, DT,
852
SE)) {
853
LLVM_DEBUG(dbgs() << "Found an ADD reduction PHI." << *Phi << "\n");
854
return true;
855
}
856
if (AddReductionVar(Phi, RecurKind::Mul, TheLoop, FMF, RedDes, DB, AC, DT,
857
SE)) {
858
LLVM_DEBUG(dbgs() << "Found a MUL reduction PHI." << *Phi << "\n");
859
return true;
860
}
861
if (AddReductionVar(Phi, RecurKind::Or, TheLoop, FMF, RedDes, DB, AC, DT,
862
SE)) {
863
LLVM_DEBUG(dbgs() << "Found an OR reduction PHI." << *Phi << "\n");
864
return true;
865
}
866
if (AddReductionVar(Phi, RecurKind::And, TheLoop, FMF, RedDes, DB, AC, DT,
867
SE)) {
868
LLVM_DEBUG(dbgs() << "Found an AND reduction PHI." << *Phi << "\n");
869
return true;
870
}
871
if (AddReductionVar(Phi, RecurKind::Xor, TheLoop, FMF, RedDes, DB, AC, DT,
872
SE)) {
873
LLVM_DEBUG(dbgs() << "Found a XOR reduction PHI." << *Phi << "\n");
874
return true;
875
}
876
if (AddReductionVar(Phi, RecurKind::SMax, TheLoop, FMF, RedDes, DB, AC, DT,
877
SE)) {
878
LLVM_DEBUG(dbgs() << "Found a SMAX reduction PHI." << *Phi << "\n");
879
return true;
880
}
881
if (AddReductionVar(Phi, RecurKind::SMin, TheLoop, FMF, RedDes, DB, AC, DT,
882
SE)) {
883
LLVM_DEBUG(dbgs() << "Found a SMIN reduction PHI." << *Phi << "\n");
884
return true;
885
}
886
if (AddReductionVar(Phi, RecurKind::UMax, TheLoop, FMF, RedDes, DB, AC, DT,
887
SE)) {
888
LLVM_DEBUG(dbgs() << "Found a UMAX reduction PHI." << *Phi << "\n");
889
return true;
890
}
891
if (AddReductionVar(Phi, RecurKind::UMin, TheLoop, FMF, RedDes, DB, AC, DT,
892
SE)) {
893
LLVM_DEBUG(dbgs() << "Found a UMIN reduction PHI." << *Phi << "\n");
894
return true;
895
}
896
if (AddReductionVar(Phi, RecurKind::IAnyOf, TheLoop, FMF, RedDes, DB, AC, DT,
897
SE)) {
898
LLVM_DEBUG(dbgs() << "Found an integer conditional select reduction PHI."
899
<< *Phi << "\n");
900
return true;
901
}
902
if (AddReductionVar(Phi, RecurKind::FMul, TheLoop, FMF, RedDes, DB, AC, DT,
903
SE)) {
904
LLVM_DEBUG(dbgs() << "Found an FMult reduction PHI." << *Phi << "\n");
905
return true;
906
}
907
if (AddReductionVar(Phi, RecurKind::FAdd, TheLoop, FMF, RedDes, DB, AC, DT,
908
SE)) {
909
LLVM_DEBUG(dbgs() << "Found an FAdd reduction PHI." << *Phi << "\n");
910
return true;
911
}
912
if (AddReductionVar(Phi, RecurKind::FMax, TheLoop, FMF, RedDes, DB, AC, DT,
913
SE)) {
914
LLVM_DEBUG(dbgs() << "Found a float MAX reduction PHI." << *Phi << "\n");
915
return true;
916
}
917
if (AddReductionVar(Phi, RecurKind::FMin, TheLoop, FMF, RedDes, DB, AC, DT,
918
SE)) {
919
LLVM_DEBUG(dbgs() << "Found a float MIN reduction PHI." << *Phi << "\n");
920
return true;
921
}
922
if (AddReductionVar(Phi, RecurKind::FAnyOf, TheLoop, FMF, RedDes, DB, AC, DT,
923
SE)) {
924
LLVM_DEBUG(dbgs() << "Found a float conditional select reduction PHI."
925
<< " PHI." << *Phi << "\n");
926
return true;
927
}
928
if (AddReductionVar(Phi, RecurKind::FMulAdd, TheLoop, FMF, RedDes, DB, AC, DT,
929
SE)) {
930
LLVM_DEBUG(dbgs() << "Found an FMulAdd reduction PHI." << *Phi << "\n");
931
return true;
932
}
933
if (AddReductionVar(Phi, RecurKind::FMaximum, TheLoop, FMF, RedDes, DB, AC, DT,
934
SE)) {
935
LLVM_DEBUG(dbgs() << "Found a float MAXIMUM reduction PHI." << *Phi << "\n");
936
return true;
937
}
938
if (AddReductionVar(Phi, RecurKind::FMinimum, TheLoop, FMF, RedDes, DB, AC, DT,
939
SE)) {
940
LLVM_DEBUG(dbgs() << "Found a float MINIMUM reduction PHI." << *Phi << "\n");
941
return true;
942
}
943
// Not a reduction of known type.
944
return false;
945
}
946
947
bool RecurrenceDescriptor::isFixedOrderRecurrence(PHINode *Phi, Loop *TheLoop,
948
DominatorTree *DT) {
949
950
// Ensure the phi node is in the loop header and has two incoming values.
951
if (Phi->getParent() != TheLoop->getHeader() ||
952
Phi->getNumIncomingValues() != 2)
953
return false;
954
955
// Ensure the loop has a preheader and a single latch block. The loop
956
// vectorizer will need the latch to set up the next iteration of the loop.
957
auto *Preheader = TheLoop->getLoopPreheader();
958
auto *Latch = TheLoop->getLoopLatch();
959
if (!Preheader || !Latch)
960
return false;
961
962
// Ensure the phi node's incoming blocks are the loop preheader and latch.
963
if (Phi->getBasicBlockIndex(Preheader) < 0 ||
964
Phi->getBasicBlockIndex(Latch) < 0)
965
return false;
966
967
// Get the previous value. The previous value comes from the latch edge while
968
// the initial value comes from the preheader edge.
969
auto *Previous = dyn_cast<Instruction>(Phi->getIncomingValueForBlock(Latch));
970
971
// If Previous is a phi in the header, go through incoming values from the
972
// latch until we find a non-phi value. Use this as the new Previous, all uses
973
// in the header will be dominated by the original phi, but need to be moved
974
// after the non-phi previous value.
975
SmallPtrSet<PHINode *, 4> SeenPhis;
976
while (auto *PrevPhi = dyn_cast_or_null<PHINode>(Previous)) {
977
if (PrevPhi->getParent() != Phi->getParent())
978
return false;
979
if (!SeenPhis.insert(PrevPhi).second)
980
return false;
981
Previous = dyn_cast<Instruction>(PrevPhi->getIncomingValueForBlock(Latch));
982
}
983
984
if (!Previous || !TheLoop->contains(Previous) || isa<PHINode>(Previous))
985
return false;
986
987
// Ensure every user of the phi node (recursively) is dominated by the
988
// previous value. The dominance requirement ensures the loop vectorizer will
989
// not need to vectorize the initial value prior to the first iteration of the
990
// loop.
991
// TODO: Consider extending this sinking to handle memory instructions.
992
993
SmallPtrSet<Value *, 8> Seen;
994
BasicBlock *PhiBB = Phi->getParent();
995
SmallVector<Instruction *, 8> WorkList;
996
auto TryToPushSinkCandidate = [&](Instruction *SinkCandidate) {
997
// Cyclic dependence.
998
if (Previous == SinkCandidate)
999
return false;
1000
1001
if (!Seen.insert(SinkCandidate).second)
1002
return true;
1003
if (DT->dominates(Previous,
1004
SinkCandidate)) // We already are good w/o sinking.
1005
return true;
1006
1007
if (SinkCandidate->getParent() != PhiBB ||
1008
SinkCandidate->mayHaveSideEffects() ||
1009
SinkCandidate->mayReadFromMemory() || SinkCandidate->isTerminator())
1010
return false;
1011
1012
// If we reach a PHI node that is not dominated by Previous, we reached a
1013
// header PHI. No need for sinking.
1014
if (isa<PHINode>(SinkCandidate))
1015
return true;
1016
1017
// Sink User tentatively and check its users
1018
WorkList.push_back(SinkCandidate);
1019
return true;
1020
};
1021
1022
WorkList.push_back(Phi);
1023
// Try to recursively sink instructions and their users after Previous.
1024
while (!WorkList.empty()) {
1025
Instruction *Current = WorkList.pop_back_val();
1026
for (User *User : Current->users()) {
1027
if (!TryToPushSinkCandidate(cast<Instruction>(User)))
1028
return false;
1029
}
1030
}
1031
1032
return true;
1033
}
1034
1035
/// This function returns the identity element (or neutral element) for
1036
/// the operation K.
1037
Value *RecurrenceDescriptor::getRecurrenceIdentity(RecurKind K, Type *Tp,
1038
FastMathFlags FMF) const {
1039
switch (K) {
1040
case RecurKind::Xor:
1041
case RecurKind::Add:
1042
case RecurKind::Or:
1043
// Adding, Xoring, Oring zero to a number does not change it.
1044
return ConstantInt::get(Tp, 0);
1045
case RecurKind::Mul:
1046
// Multiplying a number by 1 does not change it.
1047
return ConstantInt::get(Tp, 1);
1048
case RecurKind::And:
1049
// AND-ing a number with an all-1 value does not change it.
1050
return ConstantInt::get(Tp, -1, true);
1051
case RecurKind::FMul:
1052
// Multiplying a number by 1 does not change it.
1053
return ConstantFP::get(Tp, 1.0L);
1054
case RecurKind::FMulAdd:
1055
case RecurKind::FAdd:
1056
// Adding zero to a number does not change it.
1057
// FIXME: Ideally we should not need to check FMF for FAdd and should always
1058
// use -0.0. However, this will currently result in mixed vectors of 0.0/-0.0.
1059
// Instead, we should ensure that 1) the FMF from FAdd are propagated to the PHI
1060
// nodes where possible, and 2) PHIs with the nsz flag + -0.0 use 0.0. This would
1061
// mean we can then remove the check for noSignedZeros() below (see D98963).
1062
if (FMF.noSignedZeros())
1063
return ConstantFP::get(Tp, 0.0L);
1064
return ConstantFP::get(Tp, -0.0L);
1065
case RecurKind::UMin:
1066
return ConstantInt::get(Tp, -1, true);
1067
case RecurKind::UMax:
1068
return ConstantInt::get(Tp, 0);
1069
case RecurKind::SMin:
1070
return ConstantInt::get(Tp,
1071
APInt::getSignedMaxValue(Tp->getIntegerBitWidth()));
1072
case RecurKind::SMax:
1073
return ConstantInt::get(Tp,
1074
APInt::getSignedMinValue(Tp->getIntegerBitWidth()));
1075
case RecurKind::FMin:
1076
assert((FMF.noNaNs() && FMF.noSignedZeros()) &&
1077
"nnan, nsz is expected to be set for FP min reduction.");
1078
return ConstantFP::getInfinity(Tp, false /*Negative*/);
1079
case RecurKind::FMax:
1080
assert((FMF.noNaNs() && FMF.noSignedZeros()) &&
1081
"nnan, nsz is expected to be set for FP max reduction.");
1082
return ConstantFP::getInfinity(Tp, true /*Negative*/);
1083
case RecurKind::FMinimum:
1084
return ConstantFP::getInfinity(Tp, false /*Negative*/);
1085
case RecurKind::FMaximum:
1086
return ConstantFP::getInfinity(Tp, true /*Negative*/);
1087
case RecurKind::IAnyOf:
1088
case RecurKind::FAnyOf:
1089
return getRecurrenceStartValue();
1090
break;
1091
default:
1092
llvm_unreachable("Unknown recurrence kind");
1093
}
1094
}
1095
1096
unsigned RecurrenceDescriptor::getOpcode(RecurKind Kind) {
1097
switch (Kind) {
1098
case RecurKind::Add:
1099
return Instruction::Add;
1100
case RecurKind::Mul:
1101
return Instruction::Mul;
1102
case RecurKind::Or:
1103
return Instruction::Or;
1104
case RecurKind::And:
1105
return Instruction::And;
1106
case RecurKind::Xor:
1107
return Instruction::Xor;
1108
case RecurKind::FMul:
1109
return Instruction::FMul;
1110
case RecurKind::FMulAdd:
1111
case RecurKind::FAdd:
1112
return Instruction::FAdd;
1113
case RecurKind::SMax:
1114
case RecurKind::SMin:
1115
case RecurKind::UMax:
1116
case RecurKind::UMin:
1117
case RecurKind::IAnyOf:
1118
return Instruction::ICmp;
1119
case RecurKind::FMax:
1120
case RecurKind::FMin:
1121
case RecurKind::FMaximum:
1122
case RecurKind::FMinimum:
1123
case RecurKind::FAnyOf:
1124
return Instruction::FCmp;
1125
default:
1126
llvm_unreachable("Unknown recurrence operation");
1127
}
1128
}
1129
1130
SmallVector<Instruction *, 4>
1131
RecurrenceDescriptor::getReductionOpChain(PHINode *Phi, Loop *L) const {
1132
SmallVector<Instruction *, 4> ReductionOperations;
1133
unsigned RedOp = getOpcode(Kind);
1134
1135
// Search down from the Phi to the LoopExitInstr, looking for instructions
1136
// with a single user of the correct type for the reduction.
1137
1138
// Note that we check that the type of the operand is correct for each item in
1139
// the chain, including the last (the loop exit value). This can come up from
1140
// sub, which would otherwise be treated as an add reduction. MinMax also need
1141
// to check for a pair of icmp/select, for which we use getNextInstruction and
1142
// isCorrectOpcode functions to step the right number of instruction, and
1143
// check the icmp/select pair.
1144
// FIXME: We also do not attempt to look through Select's yet, which might
1145
// be part of the reduction chain, or attempt to looks through And's to find a
1146
// smaller bitwidth. Subs are also currently not allowed (which are usually
1147
// treated as part of a add reduction) as they are expected to generally be
1148
// more expensive than out-of-loop reductions, and need to be costed more
1149
// carefully.
1150
unsigned ExpectedUses = 1;
1151
if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp)
1152
ExpectedUses = 2;
1153
1154
auto getNextInstruction = [&](Instruction *Cur) -> Instruction * {
1155
for (auto *User : Cur->users()) {
1156
Instruction *UI = cast<Instruction>(User);
1157
if (isa<PHINode>(UI))
1158
continue;
1159
if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp) {
1160
// We are expecting a icmp/select pair, which we go to the next select
1161
// instruction if we can. We already know that Cur has 2 uses.
1162
if (isa<SelectInst>(UI))
1163
return UI;
1164
continue;
1165
}
1166
return UI;
1167
}
1168
return nullptr;
1169
};
1170
auto isCorrectOpcode = [&](Instruction *Cur) {
1171
if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp) {
1172
Value *LHS, *RHS;
1173
return SelectPatternResult::isMinOrMax(
1174
matchSelectPattern(Cur, LHS, RHS).Flavor);
1175
}
1176
// Recognize a call to the llvm.fmuladd intrinsic.
1177
if (isFMulAddIntrinsic(Cur))
1178
return true;
1179
1180
return Cur->getOpcode() == RedOp;
1181
};
1182
1183
// Attempt to look through Phis which are part of the reduction chain
1184
unsigned ExtraPhiUses = 0;
1185
Instruction *RdxInstr = LoopExitInstr;
1186
if (auto ExitPhi = dyn_cast<PHINode>(LoopExitInstr)) {
1187
if (ExitPhi->getNumIncomingValues() != 2)
1188
return {};
1189
1190
Instruction *Inc0 = dyn_cast<Instruction>(ExitPhi->getIncomingValue(0));
1191
Instruction *Inc1 = dyn_cast<Instruction>(ExitPhi->getIncomingValue(1));
1192
1193
Instruction *Chain = nullptr;
1194
if (Inc0 == Phi)
1195
Chain = Inc1;
1196
else if (Inc1 == Phi)
1197
Chain = Inc0;
1198
else
1199
return {};
1200
1201
RdxInstr = Chain;
1202
ExtraPhiUses = 1;
1203
}
1204
1205
// The loop exit instruction we check first (as a quick test) but add last. We
1206
// check the opcode is correct (and dont allow them to be Subs) and that they
1207
// have expected to have the expected number of uses. They will have one use
1208
// from the phi and one from a LCSSA value, no matter the type.
1209
if (!isCorrectOpcode(RdxInstr) || !LoopExitInstr->hasNUses(2))
1210
return {};
1211
1212
// Check that the Phi has one (or two for min/max) uses, plus an extra use
1213
// for conditional reductions.
1214
if (!Phi->hasNUses(ExpectedUses + ExtraPhiUses))
1215
return {};
1216
1217
Instruction *Cur = getNextInstruction(Phi);
1218
1219
// Each other instruction in the chain should have the expected number of uses
1220
// and be the correct opcode.
1221
while (Cur != RdxInstr) {
1222
if (!Cur || !isCorrectOpcode(Cur) || !Cur->hasNUses(ExpectedUses))
1223
return {};
1224
1225
ReductionOperations.push_back(Cur);
1226
Cur = getNextInstruction(Cur);
1227
}
1228
1229
ReductionOperations.push_back(Cur);
1230
return ReductionOperations;
1231
}
1232
1233
InductionDescriptor::InductionDescriptor(Value *Start, InductionKind K,
1234
const SCEV *Step, BinaryOperator *BOp,
1235
SmallVectorImpl<Instruction *> *Casts)
1236
: StartValue(Start), IK(K), Step(Step), InductionBinOp(BOp) {
1237
assert(IK != IK_NoInduction && "Not an induction");
1238
1239
// Start value type should match the induction kind and the value
1240
// itself should not be null.
1241
assert(StartValue && "StartValue is null");
1242
assert((IK != IK_PtrInduction || StartValue->getType()->isPointerTy()) &&
1243
"StartValue is not a pointer for pointer induction");
1244
assert((IK != IK_IntInduction || StartValue->getType()->isIntegerTy()) &&
1245
"StartValue is not an integer for integer induction");
1246
1247
// Check the Step Value. It should be non-zero integer value.
1248
assert((!getConstIntStepValue() || !getConstIntStepValue()->isZero()) &&
1249
"Step value is zero");
1250
1251
assert((IK == IK_FpInduction || Step->getType()->isIntegerTy()) &&
1252
"StepValue is not an integer");
1253
1254
assert((IK != IK_FpInduction || Step->getType()->isFloatingPointTy()) &&
1255
"StepValue is not FP for FpInduction");
1256
assert((IK != IK_FpInduction ||
1257
(InductionBinOp &&
1258
(InductionBinOp->getOpcode() == Instruction::FAdd ||
1259
InductionBinOp->getOpcode() == Instruction::FSub))) &&
1260
"Binary opcode should be specified for FP induction");
1261
1262
if (Casts) {
1263
for (auto &Inst : *Casts) {
1264
RedundantCasts.push_back(Inst);
1265
}
1266
}
1267
}
1268
1269
ConstantInt *InductionDescriptor::getConstIntStepValue() const {
1270
if (isa<SCEVConstant>(Step))
1271
return dyn_cast<ConstantInt>(cast<SCEVConstant>(Step)->getValue());
1272
return nullptr;
1273
}
1274
1275
bool InductionDescriptor::isFPInductionPHI(PHINode *Phi, const Loop *TheLoop,
1276
ScalarEvolution *SE,
1277
InductionDescriptor &D) {
1278
1279
// Here we only handle FP induction variables.
1280
assert(Phi->getType()->isFloatingPointTy() && "Unexpected Phi type");
1281
1282
if (TheLoop->getHeader() != Phi->getParent())
1283
return false;
1284
1285
// The loop may have multiple entrances or multiple exits; we can analyze
1286
// this phi if it has a unique entry value and a unique backedge value.
1287
if (Phi->getNumIncomingValues() != 2)
1288
return false;
1289
Value *BEValue = nullptr, *StartValue = nullptr;
1290
if (TheLoop->contains(Phi->getIncomingBlock(0))) {
1291
BEValue = Phi->getIncomingValue(0);
1292
StartValue = Phi->getIncomingValue(1);
1293
} else {
1294
assert(TheLoop->contains(Phi->getIncomingBlock(1)) &&
1295
"Unexpected Phi node in the loop");
1296
BEValue = Phi->getIncomingValue(1);
1297
StartValue = Phi->getIncomingValue(0);
1298
}
1299
1300
BinaryOperator *BOp = dyn_cast<BinaryOperator>(BEValue);
1301
if (!BOp)
1302
return false;
1303
1304
Value *Addend = nullptr;
1305
if (BOp->getOpcode() == Instruction::FAdd) {
1306
if (BOp->getOperand(0) == Phi)
1307
Addend = BOp->getOperand(1);
1308
else if (BOp->getOperand(1) == Phi)
1309
Addend = BOp->getOperand(0);
1310
} else if (BOp->getOpcode() == Instruction::FSub)
1311
if (BOp->getOperand(0) == Phi)
1312
Addend = BOp->getOperand(1);
1313
1314
if (!Addend)
1315
return false;
1316
1317
// The addend should be loop invariant
1318
if (auto *I = dyn_cast<Instruction>(Addend))
1319
if (TheLoop->contains(I))
1320
return false;
1321
1322
// FP Step has unknown SCEV
1323
const SCEV *Step = SE->getUnknown(Addend);
1324
D = InductionDescriptor(StartValue, IK_FpInduction, Step, BOp);
1325
return true;
1326
}
1327
1328
/// This function is called when we suspect that the update-chain of a phi node
1329
/// (whose symbolic SCEV expression sin \p PhiScev) contains redundant casts,
1330
/// that can be ignored. (This can happen when the PSCEV rewriter adds a runtime
1331
/// predicate P under which the SCEV expression for the phi can be the
1332
/// AddRecurrence \p AR; See createAddRecFromPHIWithCast). We want to find the
1333
/// cast instructions that are involved in the update-chain of this induction.
1334
/// A caller that adds the required runtime predicate can be free to drop these
1335
/// cast instructions, and compute the phi using \p AR (instead of some scev
1336
/// expression with casts).
1337
///
1338
/// For example, without a predicate the scev expression can take the following
1339
/// form:
1340
/// (Ext ix (Trunc iy ( Start + i*Step ) to ix) to iy)
1341
///
1342
/// It corresponds to the following IR sequence:
1343
/// %for.body:
1344
/// %x = phi i64 [ 0, %ph ], [ %add, %for.body ]
1345
/// %casted_phi = "ExtTrunc i64 %x"
1346
/// %add = add i64 %casted_phi, %step
1347
///
1348
/// where %x is given in \p PN,
1349
/// PSE.getSCEV(%x) is equal to PSE.getSCEV(%casted_phi) under a predicate,
1350
/// and the IR sequence that "ExtTrunc i64 %x" represents can take one of
1351
/// several forms, for example, such as:
1352
/// ExtTrunc1: %casted_phi = and %x, 2^n-1
1353
/// or:
1354
/// ExtTrunc2: %t = shl %x, m
1355
/// %casted_phi = ashr %t, m
1356
///
1357
/// If we are able to find such sequence, we return the instructions
1358
/// we found, namely %casted_phi and the instructions on its use-def chain up
1359
/// to the phi (not including the phi).
1360
static bool getCastsForInductionPHI(PredicatedScalarEvolution &PSE,
1361
const SCEVUnknown *PhiScev,
1362
const SCEVAddRecExpr *AR,
1363
SmallVectorImpl<Instruction *> &CastInsts) {
1364
1365
assert(CastInsts.empty() && "CastInsts is expected to be empty.");
1366
auto *PN = cast<PHINode>(PhiScev->getValue());
1367
assert(PSE.getSCEV(PN) == AR && "Unexpected phi node SCEV expression");
1368
const Loop *L = AR->getLoop();
1369
1370
// Find any cast instructions that participate in the def-use chain of
1371
// PhiScev in the loop.
1372
// FORNOW/TODO: We currently expect the def-use chain to include only
1373
// two-operand instructions, where one of the operands is an invariant.
1374
// createAddRecFromPHIWithCasts() currently does not support anything more
1375
// involved than that, so we keep the search simple. This can be
1376
// extended/generalized as needed.
1377
1378
auto getDef = [&](const Value *Val) -> Value * {
1379
const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Val);
1380
if (!BinOp)
1381
return nullptr;
1382
Value *Op0 = BinOp->getOperand(0);
1383
Value *Op1 = BinOp->getOperand(1);
1384
Value *Def = nullptr;
1385
if (L->isLoopInvariant(Op0))
1386
Def = Op1;
1387
else if (L->isLoopInvariant(Op1))
1388
Def = Op0;
1389
return Def;
1390
};
1391
1392
// Look for the instruction that defines the induction via the
1393
// loop backedge.
1394
BasicBlock *Latch = L->getLoopLatch();
1395
if (!Latch)
1396
return false;
1397
Value *Val = PN->getIncomingValueForBlock(Latch);
1398
if (!Val)
1399
return false;
1400
1401
// Follow the def-use chain until the induction phi is reached.
1402
// If on the way we encounter a Value that has the same SCEV Expr as the
1403
// phi node, we can consider the instructions we visit from that point
1404
// as part of the cast-sequence that can be ignored.
1405
bool InCastSequence = false;
1406
auto *Inst = dyn_cast<Instruction>(Val);
1407
while (Val != PN) {
1408
// If we encountered a phi node other than PN, or if we left the loop,
1409
// we bail out.
1410
if (!Inst || !L->contains(Inst)) {
1411
return false;
1412
}
1413
auto *AddRec = dyn_cast<SCEVAddRecExpr>(PSE.getSCEV(Val));
1414
if (AddRec && PSE.areAddRecsEqualWithPreds(AddRec, AR))
1415
InCastSequence = true;
1416
if (InCastSequence) {
1417
// Only the last instruction in the cast sequence is expected to have
1418
// uses outside the induction def-use chain.
1419
if (!CastInsts.empty())
1420
if (!Inst->hasOneUse())
1421
return false;
1422
CastInsts.push_back(Inst);
1423
}
1424
Val = getDef(Val);
1425
if (!Val)
1426
return false;
1427
Inst = dyn_cast<Instruction>(Val);
1428
}
1429
1430
return InCastSequence;
1431
}
1432
1433
bool InductionDescriptor::isInductionPHI(PHINode *Phi, const Loop *TheLoop,
1434
PredicatedScalarEvolution &PSE,
1435
InductionDescriptor &D, bool Assume) {
1436
Type *PhiTy = Phi->getType();
1437
1438
// Handle integer and pointer inductions variables.
1439
// Now we handle also FP induction but not trying to make a
1440
// recurrent expression from the PHI node in-place.
1441
1442
if (!PhiTy->isIntegerTy() && !PhiTy->isPointerTy() && !PhiTy->isFloatTy() &&
1443
!PhiTy->isDoubleTy() && !PhiTy->isHalfTy())
1444
return false;
1445
1446
if (PhiTy->isFloatingPointTy())
1447
return isFPInductionPHI(Phi, TheLoop, PSE.getSE(), D);
1448
1449
const SCEV *PhiScev = PSE.getSCEV(Phi);
1450
const auto *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
1451
1452
// We need this expression to be an AddRecExpr.
1453
if (Assume && !AR)
1454
AR = PSE.getAsAddRec(Phi);
1455
1456
if (!AR) {
1457
LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n");
1458
return false;
1459
}
1460
1461
// Record any Cast instructions that participate in the induction update
1462
const auto *SymbolicPhi = dyn_cast<SCEVUnknown>(PhiScev);
1463
// If we started from an UnknownSCEV, and managed to build an addRecurrence
1464
// only after enabling Assume with PSCEV, this means we may have encountered
1465
// cast instructions that required adding a runtime check in order to
1466
// guarantee the correctness of the AddRecurrence respresentation of the
1467
// induction.
1468
if (PhiScev != AR && SymbolicPhi) {
1469
SmallVector<Instruction *, 2> Casts;
1470
if (getCastsForInductionPHI(PSE, SymbolicPhi, AR, Casts))
1471
return isInductionPHI(Phi, TheLoop, PSE.getSE(), D, AR, &Casts);
1472
}
1473
1474
return isInductionPHI(Phi, TheLoop, PSE.getSE(), D, AR);
1475
}
1476
1477
bool InductionDescriptor::isInductionPHI(
1478
PHINode *Phi, const Loop *TheLoop, ScalarEvolution *SE,
1479
InductionDescriptor &D, const SCEV *Expr,
1480
SmallVectorImpl<Instruction *> *CastsToIgnore) {
1481
Type *PhiTy = Phi->getType();
1482
// We only handle integer and pointer inductions variables.
1483
if (!PhiTy->isIntegerTy() && !PhiTy->isPointerTy())
1484
return false;
1485
1486
// Check that the PHI is consecutive.
1487
const SCEV *PhiScev = Expr ? Expr : SE->getSCEV(Phi);
1488
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
1489
1490
if (!AR) {
1491
LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n");
1492
return false;
1493
}
1494
1495
if (AR->getLoop() != TheLoop) {
1496
// FIXME: We should treat this as a uniform. Unfortunately, we
1497
// don't currently know how to handled uniform PHIs.
1498
LLVM_DEBUG(
1499
dbgs() << "LV: PHI is a recurrence with respect to an outer loop.\n");
1500
return false;
1501
}
1502
1503
// This function assumes that InductionPhi is called only on Phi nodes
1504
// present inside loop headers. Check for the same, and throw an assert if
1505
// the current Phi is not present inside the loop header.
1506
assert(Phi->getParent() == AR->getLoop()->getHeader()
1507
&& "Invalid Phi node, not present in loop header");
1508
1509
Value *StartValue =
1510
Phi->getIncomingValueForBlock(AR->getLoop()->getLoopPreheader());
1511
1512
BasicBlock *Latch = AR->getLoop()->getLoopLatch();
1513
if (!Latch)
1514
return false;
1515
1516
const SCEV *Step = AR->getStepRecurrence(*SE);
1517
// Calculate the pointer stride and check if it is consecutive.
1518
// The stride may be a constant or a loop invariant integer value.
1519
const SCEVConstant *ConstStep = dyn_cast<SCEVConstant>(Step);
1520
if (!ConstStep && !SE->isLoopInvariant(Step, TheLoop))
1521
return false;
1522
1523
if (PhiTy->isIntegerTy()) {
1524
BinaryOperator *BOp =
1525
dyn_cast<BinaryOperator>(Phi->getIncomingValueForBlock(Latch));
1526
D = InductionDescriptor(StartValue, IK_IntInduction, Step, BOp,
1527
CastsToIgnore);
1528
return true;
1529
}
1530
1531
assert(PhiTy->isPointerTy() && "The PHI must be a pointer");
1532
1533
// This allows induction variables w/non-constant steps.
1534
D = InductionDescriptor(StartValue, IK_PtrInduction, Step);
1535
return true;
1536
}
1537
1538