Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/compiler/nir/nir_loop_analyze.c
4547 views
1
/*
2
* Copyright © 2015 Thomas Helland
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include "nir.h"
25
#include "nir_constant_expressions.h"
26
#include "nir_loop_analyze.h"
27
#include "util/bitset.h"
28
29
typedef enum {
30
undefined,
31
invariant,
32
not_invariant,
33
basic_induction
34
} nir_loop_variable_type;
35
36
typedef struct nir_basic_induction_var {
37
nir_alu_instr *alu; /* The def of the alu-operation */
38
nir_ssa_def *def_outside_loop; /* The phi-src outside the loop */
39
} nir_basic_induction_var;
40
41
typedef struct {
42
/* A link for the work list */
43
struct list_head process_link;
44
45
bool in_loop;
46
47
/* The ssa_def associated with this info */
48
nir_ssa_def *def;
49
50
/* The type of this ssa_def */
51
nir_loop_variable_type type;
52
53
/* If this is of type basic_induction */
54
struct nir_basic_induction_var *ind;
55
56
/* True if variable is in an if branch */
57
bool in_if_branch;
58
59
/* True if variable is in a nested loop */
60
bool in_nested_loop;
61
62
} nir_loop_variable;
63
64
typedef struct {
65
/* The loop we store information for */
66
nir_loop *loop;
67
68
/* Loop_variable for all ssa_defs in function */
69
nir_loop_variable *loop_vars;
70
BITSET_WORD *loop_vars_init;
71
72
/* A list of the loop_vars to analyze */
73
struct list_head process_list;
74
75
nir_variable_mode indirect_mask;
76
77
} loop_info_state;
78
79
static nir_loop_variable *
80
get_loop_var(nir_ssa_def *value, loop_info_state *state)
81
{
82
nir_loop_variable *var = &(state->loop_vars[value->index]);
83
84
if (!BITSET_TEST(state->loop_vars_init, value->index)) {
85
var->in_loop = false;
86
var->def = value;
87
var->in_if_branch = false;
88
var->in_nested_loop = false;
89
if (value->parent_instr->type == nir_instr_type_load_const)
90
var->type = invariant;
91
else
92
var->type = undefined;
93
94
BITSET_SET(state->loop_vars_init, value->index);
95
}
96
97
return var;
98
}
99
100
typedef struct {
101
loop_info_state *state;
102
bool in_if_branch;
103
bool in_nested_loop;
104
} init_loop_state;
105
106
static bool
107
init_loop_def(nir_ssa_def *def, void *void_init_loop_state)
108
{
109
init_loop_state *loop_init_state = void_init_loop_state;
110
nir_loop_variable *var = get_loop_var(def, loop_init_state->state);
111
112
if (loop_init_state->in_nested_loop) {
113
var->in_nested_loop = true;
114
} else if (loop_init_state->in_if_branch) {
115
var->in_if_branch = true;
116
} else {
117
/* Add to the tail of the list. That way we start at the beginning of
118
* the defs in the loop instead of the end when walking the list. This
119
* means less recursive calls. Only add defs that are not in nested
120
* loops or conditional blocks.
121
*/
122
list_addtail(&var->process_link, &loop_init_state->state->process_list);
123
}
124
125
var->in_loop = true;
126
127
return true;
128
}
129
130
/** Calculate an estimated cost in number of instructions
131
*
132
* We do this so that we don't unroll loops which will later get massively
133
* inflated due to int64 or fp64 lowering. The estimates provided here don't
134
* have to be massively accurate; they just have to be good enough that loop
135
* unrolling doesn't cause things to blow up too much.
136
*/
137
static unsigned
138
instr_cost(nir_instr *instr, const nir_shader_compiler_options *options)
139
{
140
if (instr->type == nir_instr_type_intrinsic ||
141
instr->type == nir_instr_type_tex)
142
return 1;
143
144
if (instr->type != nir_instr_type_alu)
145
return 0;
146
147
nir_alu_instr *alu = nir_instr_as_alu(instr);
148
const nir_op_info *info = &nir_op_infos[alu->op];
149
150
/* Assume everything 16 or 32-bit is cheap.
151
*
152
* There are no 64-bit ops that don't have a 64-bit thing as their
153
* destination or first source.
154
*/
155
if (nir_dest_bit_size(alu->dest.dest) < 64 &&
156
nir_src_bit_size(alu->src[0].src) < 64)
157
return 1;
158
159
bool is_fp64 = nir_dest_bit_size(alu->dest.dest) == 64 &&
160
nir_alu_type_get_base_type(info->output_type) == nir_type_float;
161
for (unsigned i = 0; i < info->num_inputs; i++) {
162
if (nir_src_bit_size(alu->src[i].src) == 64 &&
163
nir_alu_type_get_base_type(info->input_types[i]) == nir_type_float)
164
is_fp64 = true;
165
}
166
167
if (is_fp64) {
168
/* If it's something lowered normally, it's expensive. */
169
unsigned cost = 1;
170
if (options->lower_doubles_options &
171
nir_lower_doubles_op_to_options_mask(alu->op))
172
cost *= 20;
173
174
/* If it's full software, it's even more expensive */
175
if (options->lower_doubles_options & nir_lower_fp64_full_software)
176
cost *= 100;
177
178
return cost;
179
} else {
180
if (options->lower_int64_options &
181
nir_lower_int64_op_to_options_mask(alu->op)) {
182
/* These require a doing the division algorithm. */
183
if (alu->op == nir_op_idiv || alu->op == nir_op_udiv ||
184
alu->op == nir_op_imod || alu->op == nir_op_umod ||
185
alu->op == nir_op_irem)
186
return 100;
187
188
/* Other int64 lowering isn't usually all that expensive */
189
return 5;
190
}
191
192
return 1;
193
}
194
}
195
196
static bool
197
init_loop_block(nir_block *block, loop_info_state *state,
198
bool in_if_branch, bool in_nested_loop,
199
const nir_shader_compiler_options *options)
200
{
201
init_loop_state init_state = {.in_if_branch = in_if_branch,
202
.in_nested_loop = in_nested_loop,
203
.state = state };
204
205
nir_foreach_instr(instr, block) {
206
state->loop->info->instr_cost += instr_cost(instr, options);
207
nir_foreach_ssa_def(instr, init_loop_def, &init_state);
208
}
209
210
return true;
211
}
212
213
static inline bool
214
is_var_alu(nir_loop_variable *var)
215
{
216
return var->def->parent_instr->type == nir_instr_type_alu;
217
}
218
219
static inline bool
220
is_var_phi(nir_loop_variable *var)
221
{
222
return var->def->parent_instr->type == nir_instr_type_phi;
223
}
224
225
static inline bool
226
mark_invariant(nir_ssa_def *def, loop_info_state *state)
227
{
228
nir_loop_variable *var = get_loop_var(def, state);
229
230
if (var->type == invariant)
231
return true;
232
233
if (!var->in_loop) {
234
var->type = invariant;
235
return true;
236
}
237
238
if (var->type == not_invariant)
239
return false;
240
241
if (is_var_alu(var)) {
242
nir_alu_instr *alu = nir_instr_as_alu(def->parent_instr);
243
244
for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
245
if (!mark_invariant(alu->src[i].src.ssa, state)) {
246
var->type = not_invariant;
247
return false;
248
}
249
}
250
var->type = invariant;
251
return true;
252
}
253
254
/* Phis shouldn't be invariant except if one operand is invariant, and the
255
* other is the phi itself. These should be removed by opt_remove_phis.
256
* load_consts are already set to invariant and constant during init,
257
* and so should return earlier. Remaining op_codes are set undefined.
258
*/
259
var->type = not_invariant;
260
return false;
261
}
262
263
static void
264
compute_invariance_information(loop_info_state *state)
265
{
266
/* An expression is invariant in a loop L if:
267
* (base cases)
268
* – it’s a constant
269
* – it’s a variable use, all of whose single defs are outside of L
270
* (inductive cases)
271
* – it’s a pure computation all of whose args are loop invariant
272
* – it’s a variable use whose single reaching def, and the
273
* rhs of that def is loop-invariant
274
*/
275
list_for_each_entry_safe(nir_loop_variable, var, &state->process_list,
276
process_link) {
277
assert(!var->in_if_branch && !var->in_nested_loop);
278
279
if (mark_invariant(var->def, state))
280
list_del(&var->process_link);
281
}
282
}
283
284
/* If all of the instruction sources point to identical ALU instructions (as
285
* per nir_instrs_equal), return one of the ALU instructions. Otherwise,
286
* return NULL.
287
*/
288
static nir_alu_instr *
289
phi_instr_as_alu(nir_phi_instr *phi)
290
{
291
nir_alu_instr *first = NULL;
292
nir_foreach_phi_src(src, phi) {
293
assert(src->src.is_ssa);
294
if (src->src.ssa->parent_instr->type != nir_instr_type_alu)
295
return NULL;
296
297
nir_alu_instr *alu = nir_instr_as_alu(src->src.ssa->parent_instr);
298
if (first == NULL) {
299
first = alu;
300
} else {
301
if (!nir_instrs_equal(&first->instr, &alu->instr))
302
return NULL;
303
}
304
}
305
306
return first;
307
}
308
309
static bool
310
alu_src_has_identity_swizzle(nir_alu_instr *alu, unsigned src_idx)
311
{
312
assert(nir_op_infos[alu->op].input_sizes[src_idx] == 0);
313
assert(alu->dest.dest.is_ssa);
314
for (unsigned i = 0; i < alu->dest.dest.ssa.num_components; i++) {
315
if (alu->src[src_idx].swizzle[i] != i)
316
return false;
317
}
318
319
return true;
320
}
321
322
static bool
323
compute_induction_information(loop_info_state *state)
324
{
325
bool found_induction_var = false;
326
list_for_each_entry_safe(nir_loop_variable, var, &state->process_list,
327
process_link) {
328
329
/* It can't be an induction variable if it is invariant. Invariants and
330
* things in nested loops or conditionals should have been removed from
331
* the list by compute_invariance_information().
332
*/
333
assert(!var->in_if_branch && !var->in_nested_loop &&
334
var->type != invariant);
335
336
/* We are only interested in checking phis for the basic induction
337
* variable case as its simple to detect. All basic induction variables
338
* have a phi node
339
*/
340
if (!is_var_phi(var))
341
continue;
342
343
nir_phi_instr *phi = nir_instr_as_phi(var->def->parent_instr);
344
nir_basic_induction_var *biv = rzalloc(state, nir_basic_induction_var);
345
346
nir_loop_variable *alu_src_var = NULL;
347
nir_foreach_phi_src(src, phi) {
348
nir_loop_variable *src_var = get_loop_var(src->src.ssa, state);
349
350
/* If one of the sources is in an if branch or nested loop then don't
351
* attempt to go any further.
352
*/
353
if (src_var->in_if_branch || src_var->in_nested_loop)
354
break;
355
356
/* Detect inductions variables that are incremented in both branches
357
* of an unnested if rather than in a loop block.
358
*/
359
if (is_var_phi(src_var)) {
360
nir_phi_instr *src_phi =
361
nir_instr_as_phi(src_var->def->parent_instr);
362
nir_alu_instr *src_phi_alu = phi_instr_as_alu(src_phi);
363
if (src_phi_alu) {
364
src_var = get_loop_var(&src_phi_alu->dest.dest.ssa, state);
365
if (!src_var->in_if_branch)
366
break;
367
}
368
}
369
370
if (!src_var->in_loop && !biv->def_outside_loop) {
371
biv->def_outside_loop = src_var->def;
372
} else if (is_var_alu(src_var) && !biv->alu) {
373
alu_src_var = src_var;
374
nir_alu_instr *alu = nir_instr_as_alu(src_var->def->parent_instr);
375
376
if (nir_op_infos[alu->op].num_inputs == 2) {
377
for (unsigned i = 0; i < 2; i++) {
378
/* Is one of the operands const, and the other the phi. The
379
* phi source can't be swizzled in any way.
380
*/
381
if (nir_src_is_const(alu->src[i].src) &&
382
alu->src[1-i].src.ssa == &phi->dest.ssa &&
383
alu_src_has_identity_swizzle(alu, 1 - i))
384
biv->alu = alu;
385
}
386
}
387
388
if (!biv->alu)
389
break;
390
} else {
391
biv->alu = NULL;
392
break;
393
}
394
}
395
396
if (biv->alu && biv->def_outside_loop &&
397
biv->def_outside_loop->parent_instr->type == nir_instr_type_load_const) {
398
alu_src_var->type = basic_induction;
399
alu_src_var->ind = biv;
400
var->type = basic_induction;
401
var->ind = biv;
402
403
found_induction_var = true;
404
} else {
405
ralloc_free(biv);
406
}
407
}
408
return found_induction_var;
409
}
410
411
static bool
412
find_loop_terminators(loop_info_state *state)
413
{
414
bool success = false;
415
foreach_list_typed_safe(nir_cf_node, node, node, &state->loop->body) {
416
if (node->type == nir_cf_node_if) {
417
nir_if *nif = nir_cf_node_as_if(node);
418
419
nir_block *break_blk = NULL;
420
nir_block *continue_from_blk = NULL;
421
bool continue_from_then = true;
422
423
nir_block *last_then = nir_if_last_then_block(nif);
424
nir_block *last_else = nir_if_last_else_block(nif);
425
if (nir_block_ends_in_break(last_then)) {
426
break_blk = last_then;
427
continue_from_blk = last_else;
428
continue_from_then = false;
429
} else if (nir_block_ends_in_break(last_else)) {
430
break_blk = last_else;
431
continue_from_blk = last_then;
432
}
433
434
/* If there is a break then we should find a terminator. If we can
435
* not find a loop terminator, but there is a break-statement then
436
* we should return false so that we do not try to find trip-count
437
*/
438
if (!nir_is_trivial_loop_if(nif, break_blk)) {
439
state->loop->info->complex_loop = true;
440
return false;
441
}
442
443
/* Continue if the if contained no jumps at all */
444
if (!break_blk)
445
continue;
446
447
if (nif->condition.ssa->parent_instr->type == nir_instr_type_phi) {
448
state->loop->info->complex_loop = true;
449
return false;
450
}
451
452
nir_loop_terminator *terminator =
453
rzalloc(state->loop->info, nir_loop_terminator);
454
455
list_addtail(&terminator->loop_terminator_link,
456
&state->loop->info->loop_terminator_list);
457
458
terminator->nif = nif;
459
terminator->break_block = break_blk;
460
terminator->continue_from_block = continue_from_blk;
461
terminator->continue_from_then = continue_from_then;
462
terminator->conditional_instr = nif->condition.ssa->parent_instr;
463
464
success = true;
465
}
466
}
467
468
return success;
469
}
470
471
/* This function looks for an array access within a loop that uses an
472
* induction variable for the array index. If found it returns the size of the
473
* array, otherwise 0 is returned. If we find an induction var we pass it back
474
* to the caller via array_index_out.
475
*/
476
static unsigned
477
find_array_access_via_induction(loop_info_state *state,
478
nir_deref_instr *deref,
479
nir_loop_variable **array_index_out)
480
{
481
for (nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) {
482
if (d->deref_type != nir_deref_type_array)
483
continue;
484
485
assert(d->arr.index.is_ssa);
486
nir_loop_variable *array_index = get_loop_var(d->arr.index.ssa, state);
487
488
if (array_index->type != basic_induction)
489
continue;
490
491
if (array_index_out)
492
*array_index_out = array_index;
493
494
nir_deref_instr *parent = nir_deref_instr_parent(d);
495
if (glsl_type_is_array_or_matrix(parent->type)) {
496
return glsl_get_length(parent->type);
497
} else {
498
assert(glsl_type_is_vector(parent->type));
499
return glsl_get_vector_elements(parent->type);
500
}
501
}
502
503
return 0;
504
}
505
506
static bool
507
guess_loop_limit(loop_info_state *state, nir_const_value *limit_val,
508
nir_ssa_scalar basic_ind)
509
{
510
unsigned min_array_size = 0;
511
512
nir_foreach_block_in_cf_node(block, &state->loop->cf_node) {
513
nir_foreach_instr(instr, block) {
514
if (instr->type != nir_instr_type_intrinsic)
515
continue;
516
517
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
518
519
/* Check for arrays variably-indexed by a loop induction variable. */
520
if (intrin->intrinsic == nir_intrinsic_load_deref ||
521
intrin->intrinsic == nir_intrinsic_store_deref ||
522
intrin->intrinsic == nir_intrinsic_copy_deref) {
523
524
nir_loop_variable *array_idx = NULL;
525
unsigned array_size =
526
find_array_access_via_induction(state,
527
nir_src_as_deref(intrin->src[0]),
528
&array_idx);
529
if (array_idx && basic_ind.def == array_idx->def &&
530
(min_array_size == 0 || min_array_size > array_size)) {
531
/* Array indices are scalars */
532
assert(basic_ind.def->num_components == 1);
533
min_array_size = array_size;
534
}
535
536
if (intrin->intrinsic != nir_intrinsic_copy_deref)
537
continue;
538
539
array_size =
540
find_array_access_via_induction(state,
541
nir_src_as_deref(intrin->src[1]),
542
&array_idx);
543
if (array_idx && basic_ind.def == array_idx->def &&
544
(min_array_size == 0 || min_array_size > array_size)) {
545
/* Array indices are scalars */
546
assert(basic_ind.def->num_components == 1);
547
min_array_size = array_size;
548
}
549
}
550
}
551
}
552
553
if (min_array_size) {
554
*limit_val = nir_const_value_for_uint(min_array_size,
555
basic_ind.def->bit_size);
556
return true;
557
}
558
559
return false;
560
}
561
562
static bool
563
try_find_limit_of_alu(nir_ssa_scalar limit, nir_const_value *limit_val,
564
nir_loop_terminator *terminator, loop_info_state *state)
565
{
566
if (!nir_ssa_scalar_is_alu(limit))
567
return false;
568
569
nir_op limit_op = nir_ssa_scalar_alu_op(limit);
570
if (limit_op == nir_op_imin || limit_op == nir_op_fmin) {
571
for (unsigned i = 0; i < 2; i++) {
572
nir_ssa_scalar src = nir_ssa_scalar_chase_alu_src(limit, i);
573
if (nir_ssa_scalar_is_const(src)) {
574
*limit_val = nir_ssa_scalar_as_const_value(src);
575
terminator->exact_trip_count_unknown = true;
576
return true;
577
}
578
}
579
}
580
581
return false;
582
}
583
584
static nir_const_value
585
eval_const_unop(nir_op op, unsigned bit_size, nir_const_value src0,
586
unsigned execution_mode)
587
{
588
assert(nir_op_infos[op].num_inputs == 1);
589
nir_const_value dest;
590
nir_const_value *src[1] = { &src0 };
591
nir_eval_const_opcode(op, &dest, 1, bit_size, src, execution_mode);
592
return dest;
593
}
594
595
static nir_const_value
596
eval_const_binop(nir_op op, unsigned bit_size,
597
nir_const_value src0, nir_const_value src1,
598
unsigned execution_mode)
599
{
600
assert(nir_op_infos[op].num_inputs == 2);
601
nir_const_value dest;
602
nir_const_value *src[2] = { &src0, &src1 };
603
nir_eval_const_opcode(op, &dest, 1, bit_size, src, execution_mode);
604
return dest;
605
}
606
607
static int32_t
608
get_iteration(nir_op cond_op, nir_const_value initial, nir_const_value step,
609
nir_const_value limit, unsigned bit_size,
610
unsigned execution_mode)
611
{
612
nir_const_value span, iter;
613
614
switch (cond_op) {
615
case nir_op_ige:
616
case nir_op_ilt:
617
case nir_op_ieq:
618
case nir_op_ine:
619
span = eval_const_binop(nir_op_isub, bit_size, limit, initial,
620
execution_mode);
621
iter = eval_const_binop(nir_op_idiv, bit_size, span, step,
622
execution_mode);
623
break;
624
625
case nir_op_uge:
626
case nir_op_ult:
627
span = eval_const_binop(nir_op_isub, bit_size, limit, initial,
628
execution_mode);
629
iter = eval_const_binop(nir_op_udiv, bit_size, span, step,
630
execution_mode);
631
break;
632
633
case nir_op_fge:
634
case nir_op_flt:
635
case nir_op_feq:
636
case nir_op_fneu:
637
span = eval_const_binop(nir_op_fsub, bit_size, limit, initial,
638
execution_mode);
639
iter = eval_const_binop(nir_op_fdiv, bit_size, span,
640
step, execution_mode);
641
iter = eval_const_unop(nir_op_f2i64, bit_size, iter, execution_mode);
642
break;
643
644
default:
645
return -1;
646
}
647
648
uint64_t iter_u64 = nir_const_value_as_uint(iter, bit_size);
649
return iter_u64 > INT_MAX ? -1 : (int)iter_u64;
650
}
651
652
static bool
653
will_break_on_first_iteration(nir_const_value step,
654
nir_alu_type induction_base_type,
655
unsigned trip_offset,
656
nir_op cond_op, unsigned bit_size,
657
nir_const_value initial,
658
nir_const_value limit,
659
bool limit_rhs, bool invert_cond,
660
unsigned execution_mode)
661
{
662
if (trip_offset == 1) {
663
nir_op add_op;
664
switch (induction_base_type) {
665
case nir_type_float:
666
add_op = nir_op_fadd;
667
break;
668
case nir_type_int:
669
case nir_type_uint:
670
add_op = nir_op_iadd;
671
break;
672
default:
673
unreachable("Unhandled induction variable base type!");
674
}
675
676
initial = eval_const_binop(add_op, bit_size, initial, step,
677
execution_mode);
678
}
679
680
nir_const_value *src[2];
681
src[limit_rhs ? 0 : 1] = &initial;
682
src[limit_rhs ? 1 : 0] = &limit;
683
684
/* Evaluate the loop exit condition */
685
nir_const_value result;
686
nir_eval_const_opcode(cond_op, &result, 1, bit_size, src, execution_mode);
687
688
return invert_cond ? !result.b : result.b;
689
}
690
691
static bool
692
test_iterations(int32_t iter_int, nir_const_value step,
693
nir_const_value limit, nir_op cond_op, unsigned bit_size,
694
nir_alu_type induction_base_type,
695
nir_const_value initial, bool limit_rhs, bool invert_cond,
696
unsigned execution_mode)
697
{
698
assert(nir_op_infos[cond_op].num_inputs == 2);
699
700
nir_const_value iter_src;
701
nir_op mul_op;
702
nir_op add_op;
703
switch (induction_base_type) {
704
case nir_type_float:
705
iter_src = nir_const_value_for_float(iter_int, bit_size);
706
mul_op = nir_op_fmul;
707
add_op = nir_op_fadd;
708
break;
709
case nir_type_int:
710
case nir_type_uint:
711
iter_src = nir_const_value_for_int(iter_int, bit_size);
712
mul_op = nir_op_imul;
713
add_op = nir_op_iadd;
714
break;
715
default:
716
unreachable("Unhandled induction variable base type!");
717
}
718
719
/* Multiple the iteration count we are testing by the number of times we
720
* step the induction variable each iteration.
721
*/
722
nir_const_value mul_result =
723
eval_const_binop(mul_op, bit_size, iter_src, step, execution_mode);
724
725
/* Add the initial value to the accumulated induction variable total */
726
nir_const_value add_result =
727
eval_const_binop(add_op, bit_size, mul_result, initial, execution_mode);
728
729
nir_const_value *src[2];
730
src[limit_rhs ? 0 : 1] = &add_result;
731
src[limit_rhs ? 1 : 0] = &limit;
732
733
/* Evaluate the loop exit condition */
734
nir_const_value result;
735
nir_eval_const_opcode(cond_op, &result, 1, bit_size, src, execution_mode);
736
737
return invert_cond ? !result.b : result.b;
738
}
739
740
static int
741
calculate_iterations(nir_const_value initial, nir_const_value step,
742
nir_const_value limit, nir_alu_instr *alu,
743
nir_ssa_scalar cond, nir_op alu_op, bool limit_rhs,
744
bool invert_cond, unsigned execution_mode)
745
{
746
/* nir_op_isub should have been lowered away by this point */
747
assert(alu->op != nir_op_isub);
748
749
/* Make sure the alu type for our induction variable is compatible with the
750
* conditional alus input type. If its not something has gone really wrong.
751
*/
752
nir_alu_type induction_base_type =
753
nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type);
754
if (induction_base_type == nir_type_int || induction_base_type == nir_type_uint) {
755
assert(nir_alu_type_get_base_type(nir_op_infos[alu_op].input_types[1]) == nir_type_int ||
756
nir_alu_type_get_base_type(nir_op_infos[alu_op].input_types[1]) == nir_type_uint);
757
} else {
758
assert(nir_alu_type_get_base_type(nir_op_infos[alu_op].input_types[0]) ==
759
induction_base_type);
760
}
761
762
/* Check for nsupported alu operations */
763
if (alu->op != nir_op_iadd && alu->op != nir_op_fadd)
764
return -1;
765
766
/* do-while loops can increment the starting value before the condition is
767
* checked. e.g.
768
*
769
* do {
770
* ndx++;
771
* } while (ndx < 3);
772
*
773
* Here we check if the induction variable is used directly by the loop
774
* condition and if so we assume we need to step the initial value.
775
*/
776
unsigned trip_offset = 0;
777
nir_alu_instr *cond_alu = nir_instr_as_alu(cond.def->parent_instr);
778
if (cond_alu->src[0].src.ssa == &alu->dest.dest.ssa ||
779
cond_alu->src[1].src.ssa == &alu->dest.dest.ssa) {
780
trip_offset = 1;
781
}
782
783
assert(nir_src_bit_size(alu->src[0].src) ==
784
nir_src_bit_size(alu->src[1].src));
785
unsigned bit_size = nir_src_bit_size(alu->src[0].src);
786
787
/* get_iteration works under assumption that iterator will be
788
* incremented or decremented until it hits the limit,
789
* however if the loop condition is false on the first iteration
790
* get_iteration's assumption is broken. Handle such loops first.
791
*/
792
if (will_break_on_first_iteration(step, induction_base_type, trip_offset,
793
alu_op, bit_size, initial,
794
limit, limit_rhs, invert_cond,
795
execution_mode)) {
796
return 0;
797
}
798
799
int iter_int = get_iteration(alu_op, initial, step, limit, bit_size,
800
execution_mode);
801
802
/* If iter_int is negative the loop is ill-formed or is the conditional is
803
* unsigned with a huge iteration count so don't bother going any further.
804
*/
805
if (iter_int < 0)
806
return -1;
807
808
/* An explanation from the GLSL unrolling pass:
809
*
810
* Make sure that the calculated number of iterations satisfies the exit
811
* condition. This is needed to catch off-by-one errors and some types of
812
* ill-formed loops. For example, we need to detect that the following
813
* loop does not have a maximum iteration count.
814
*
815
* for (float x = 0.0; x != 0.9; x += 0.2);
816
*/
817
for (int bias = -1; bias <= 1; bias++) {
818
const int iter_bias = iter_int + bias;
819
820
if (test_iterations(iter_bias, step, limit, alu_op, bit_size,
821
induction_base_type, initial,
822
limit_rhs, invert_cond, execution_mode)) {
823
return iter_bias > 0 ? iter_bias - trip_offset : iter_bias;
824
}
825
}
826
827
return -1;
828
}
829
830
static nir_op
831
inverse_comparison(nir_op alu_op)
832
{
833
switch (alu_op) {
834
case nir_op_fge:
835
return nir_op_flt;
836
case nir_op_ige:
837
return nir_op_ilt;
838
case nir_op_uge:
839
return nir_op_ult;
840
case nir_op_flt:
841
return nir_op_fge;
842
case nir_op_ilt:
843
return nir_op_ige;
844
case nir_op_ult:
845
return nir_op_uge;
846
case nir_op_feq:
847
return nir_op_fneu;
848
case nir_op_ieq:
849
return nir_op_ine;
850
case nir_op_fneu:
851
return nir_op_feq;
852
case nir_op_ine:
853
return nir_op_ieq;
854
default:
855
unreachable("Unsuported comparison!");
856
}
857
}
858
859
static bool
860
is_supported_terminator_condition(nir_ssa_scalar cond)
861
{
862
if (!nir_ssa_scalar_is_alu(cond))
863
return false;
864
865
nir_alu_instr *alu = nir_instr_as_alu(cond.def->parent_instr);
866
return nir_alu_instr_is_comparison(alu) &&
867
nir_op_infos[alu->op].num_inputs == 2;
868
}
869
870
static bool
871
get_induction_and_limit_vars(nir_ssa_scalar cond,
872
nir_ssa_scalar *ind,
873
nir_ssa_scalar *limit,
874
bool *limit_rhs,
875
loop_info_state *state)
876
{
877
nir_ssa_scalar rhs, lhs;
878
lhs = nir_ssa_scalar_chase_alu_src(cond, 0);
879
rhs = nir_ssa_scalar_chase_alu_src(cond, 1);
880
881
if (get_loop_var(lhs.def, state)->type == basic_induction) {
882
*ind = lhs;
883
*limit = rhs;
884
*limit_rhs = true;
885
return true;
886
} else if (get_loop_var(rhs.def, state)->type == basic_induction) {
887
*ind = rhs;
888
*limit = lhs;
889
*limit_rhs = false;
890
return true;
891
} else {
892
return false;
893
}
894
}
895
896
static bool
897
try_find_trip_count_vars_in_iand(nir_ssa_scalar *cond,
898
nir_ssa_scalar *ind,
899
nir_ssa_scalar *limit,
900
bool *limit_rhs,
901
loop_info_state *state)
902
{
903
const nir_op alu_op = nir_ssa_scalar_alu_op(*cond);
904
assert(alu_op == nir_op_ieq || alu_op == nir_op_inot);
905
906
nir_ssa_scalar iand = nir_ssa_scalar_chase_alu_src(*cond, 0);
907
908
if (alu_op == nir_op_ieq) {
909
nir_ssa_scalar zero = nir_ssa_scalar_chase_alu_src(*cond, 1);
910
911
if (!nir_ssa_scalar_is_alu(iand) || !nir_ssa_scalar_is_const(zero)) {
912
/* Maybe we had it the wrong way, flip things around */
913
nir_ssa_scalar tmp = zero;
914
zero = iand;
915
iand = tmp;
916
917
/* If we still didn't find what we need then return */
918
if (!nir_ssa_scalar_is_const(zero))
919
return false;
920
}
921
922
/* If the loop is not breaking on (x && y) == 0 then return */
923
if (nir_ssa_scalar_as_uint(zero) != 0)
924
return false;
925
}
926
927
if (!nir_ssa_scalar_is_alu(iand))
928
return false;
929
930
if (nir_ssa_scalar_alu_op(iand) != nir_op_iand)
931
return false;
932
933
/* Check if iand src is a terminator condition and try get induction var
934
* and trip limit var.
935
*/
936
bool found_induction_var = false;
937
for (unsigned i = 0; i < 2; i++) {
938
nir_ssa_scalar src = nir_ssa_scalar_chase_alu_src(iand, i);
939
if (is_supported_terminator_condition(src) &&
940
get_induction_and_limit_vars(src, ind, limit, limit_rhs, state)) {
941
*cond = src;
942
found_induction_var = true;
943
944
/* If we've found one with a constant limit, stop. */
945
if (nir_ssa_scalar_is_const(*limit))
946
return true;
947
}
948
}
949
950
return found_induction_var;
951
}
952
953
/* Run through each of the terminators of the loop and try to infer a possible
954
* trip-count. We need to check them all, and set the lowest trip-count as the
955
* trip-count of our loop. If one of the terminators has an undecidable
956
* trip-count we can not safely assume anything about the duration of the
957
* loop.
958
*/
959
static void
960
find_trip_count(loop_info_state *state, unsigned execution_mode)
961
{
962
bool trip_count_known = true;
963
bool guessed_trip_count = false;
964
nir_loop_terminator *limiting_terminator = NULL;
965
int max_trip_count = -1;
966
967
list_for_each_entry(nir_loop_terminator, terminator,
968
&state->loop->info->loop_terminator_list,
969
loop_terminator_link) {
970
assert(terminator->nif->condition.is_ssa);
971
nir_ssa_scalar cond = { terminator->nif->condition.ssa, 0 };
972
973
if (!nir_ssa_scalar_is_alu(cond)) {
974
/* If we get here the loop is dead and will get cleaned up by the
975
* nir_opt_dead_cf pass.
976
*/
977
trip_count_known = false;
978
continue;
979
}
980
981
nir_op alu_op = nir_ssa_scalar_alu_op(cond);
982
983
bool limit_rhs;
984
nir_ssa_scalar basic_ind = { NULL, 0 };
985
nir_ssa_scalar limit;
986
if ((alu_op == nir_op_inot || alu_op == nir_op_ieq) &&
987
try_find_trip_count_vars_in_iand(&cond, &basic_ind, &limit,
988
&limit_rhs, state)) {
989
990
/* The loop is exiting on (x && y) == 0 so we need to get the
991
* inverse of x or y (i.e. which ever contained the induction var) in
992
* order to compute the trip count.
993
*/
994
alu_op = inverse_comparison(nir_ssa_scalar_alu_op(cond));
995
trip_count_known = false;
996
terminator->exact_trip_count_unknown = true;
997
}
998
999
if (!basic_ind.def) {
1000
if (is_supported_terminator_condition(cond)) {
1001
get_induction_and_limit_vars(cond, &basic_ind,
1002
&limit, &limit_rhs, state);
1003
}
1004
}
1005
1006
/* The comparison has to have a basic induction variable for us to be
1007
* able to find trip counts.
1008
*/
1009
if (!basic_ind.def) {
1010
trip_count_known = false;
1011
continue;
1012
}
1013
1014
terminator->induction_rhs = !limit_rhs;
1015
1016
/* Attempt to find a constant limit for the loop */
1017
nir_const_value limit_val;
1018
if (nir_ssa_scalar_is_const(limit)) {
1019
limit_val = nir_ssa_scalar_as_const_value(limit);
1020
} else {
1021
trip_count_known = false;
1022
1023
if (!try_find_limit_of_alu(limit, &limit_val, terminator, state)) {
1024
/* Guess loop limit based on array access */
1025
if (!guess_loop_limit(state, &limit_val, basic_ind)) {
1026
continue;
1027
}
1028
1029
guessed_trip_count = true;
1030
}
1031
}
1032
1033
/* We have determined that we have the following constants:
1034
* (With the typical int i = 0; i < x; i++; as an example)
1035
* - Upper limit.
1036
* - Starting value
1037
* - Step / iteration size
1038
* Thats all thats needed to calculate the trip-count
1039
*/
1040
1041
nir_basic_induction_var *ind_var =
1042
get_loop_var(basic_ind.def, state)->ind;
1043
1044
/* The basic induction var might be a vector but, because we guarantee
1045
* earlier that the phi source has a scalar swizzle, we can take the
1046
* component from basic_ind.
1047
*/
1048
nir_ssa_scalar initial_s = { ind_var->def_outside_loop, basic_ind.comp };
1049
nir_ssa_scalar alu_s = { &ind_var->alu->dest.dest.ssa, basic_ind.comp };
1050
1051
nir_const_value initial_val = nir_ssa_scalar_as_const_value(initial_s);
1052
1053
/* We are guaranteed by earlier code that at least one of these sources
1054
* is a constant but we don't know which.
1055
*/
1056
nir_const_value step_val;
1057
memset(&step_val, 0, sizeof(step_val));
1058
UNUSED bool found_step_value = false;
1059
assert(nir_op_infos[ind_var->alu->op].num_inputs == 2);
1060
for (unsigned i = 0; i < 2; i++) {
1061
nir_ssa_scalar alu_src = nir_ssa_scalar_chase_alu_src(alu_s, i);
1062
if (nir_ssa_scalar_is_const(alu_src)) {
1063
found_step_value = true;
1064
step_val = nir_ssa_scalar_as_const_value(alu_src);
1065
break;
1066
}
1067
}
1068
assert(found_step_value);
1069
1070
int iterations = calculate_iterations(initial_val, step_val, limit_val,
1071
ind_var->alu, cond,
1072
alu_op, limit_rhs,
1073
terminator->continue_from_then,
1074
execution_mode);
1075
1076
/* Where we not able to calculate the iteration count */
1077
if (iterations == -1) {
1078
trip_count_known = false;
1079
guessed_trip_count = false;
1080
continue;
1081
}
1082
1083
if (guessed_trip_count) {
1084
guessed_trip_count = false;
1085
if (state->loop->info->guessed_trip_count == 0 ||
1086
state->loop->info->guessed_trip_count > iterations)
1087
state->loop->info->guessed_trip_count = iterations;
1088
1089
continue;
1090
}
1091
1092
/* If this is the first run or we have found a smaller amount of
1093
* iterations than previously (we have identified a more limiting
1094
* terminator) set the trip count and limiting terminator.
1095
*/
1096
if (max_trip_count == -1 || iterations < max_trip_count) {
1097
max_trip_count = iterations;
1098
limiting_terminator = terminator;
1099
}
1100
}
1101
1102
state->loop->info->exact_trip_count_known = trip_count_known;
1103
if (max_trip_count > -1)
1104
state->loop->info->max_trip_count = max_trip_count;
1105
state->loop->info->limiting_terminator = limiting_terminator;
1106
}
1107
1108
static bool
1109
force_unroll_array_access(loop_info_state *state, nir_deref_instr *deref)
1110
{
1111
unsigned array_size = find_array_access_via_induction(state, deref, NULL);
1112
if (array_size) {
1113
if ((array_size == state->loop->info->max_trip_count) &&
1114
nir_deref_mode_must_be(deref, nir_var_shader_in |
1115
nir_var_shader_out |
1116
nir_var_shader_temp |
1117
nir_var_function_temp))
1118
return true;
1119
1120
if (nir_deref_mode_must_be(deref, state->indirect_mask))
1121
return true;
1122
}
1123
1124
return false;
1125
}
1126
1127
static bool
1128
force_unroll_heuristics(loop_info_state *state, nir_block *block)
1129
{
1130
nir_foreach_instr(instr, block) {
1131
if (instr->type != nir_instr_type_intrinsic)
1132
continue;
1133
1134
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1135
1136
/* Check for arrays variably-indexed by a loop induction variable.
1137
* Unrolling the loop may convert that access into constant-indexing.
1138
*/
1139
if (intrin->intrinsic == nir_intrinsic_load_deref ||
1140
intrin->intrinsic == nir_intrinsic_store_deref ||
1141
intrin->intrinsic == nir_intrinsic_copy_deref) {
1142
if (force_unroll_array_access(state,
1143
nir_src_as_deref(intrin->src[0])))
1144
return true;
1145
1146
if (intrin->intrinsic == nir_intrinsic_copy_deref &&
1147
force_unroll_array_access(state,
1148
nir_src_as_deref(intrin->src[1])))
1149
return true;
1150
}
1151
}
1152
1153
return false;
1154
}
1155
1156
static void
1157
get_loop_info(loop_info_state *state, nir_function_impl *impl)
1158
{
1159
nir_shader *shader = impl->function->shader;
1160
const nir_shader_compiler_options *options = shader->options;
1161
1162
/* Add all entries in the outermost part of the loop to the processing list
1163
* Mark the entries in conditionals or in nested loops accordingly
1164
*/
1165
foreach_list_typed_safe(nir_cf_node, node, node, &state->loop->body) {
1166
switch (node->type) {
1167
1168
case nir_cf_node_block:
1169
init_loop_block(nir_cf_node_as_block(node), state,
1170
false, false, options);
1171
break;
1172
1173
case nir_cf_node_if:
1174
nir_foreach_block_in_cf_node(block, node)
1175
init_loop_block(block, state, true, false, options);
1176
break;
1177
1178
case nir_cf_node_loop:
1179
nir_foreach_block_in_cf_node(block, node) {
1180
init_loop_block(block, state, false, true, options);
1181
}
1182
break;
1183
1184
case nir_cf_node_function:
1185
break;
1186
}
1187
}
1188
1189
/* Try to find all simple terminators of the loop. If we can't find any,
1190
* or we find possible terminators that have side effects then bail.
1191
*/
1192
if (!find_loop_terminators(state)) {
1193
list_for_each_entry_safe(nir_loop_terminator, terminator,
1194
&state->loop->info->loop_terminator_list,
1195
loop_terminator_link) {
1196
list_del(&terminator->loop_terminator_link);
1197
ralloc_free(terminator);
1198
}
1199
return;
1200
}
1201
1202
/* Induction analysis needs invariance information so get that first */
1203
compute_invariance_information(state);
1204
1205
/* We have invariance information so try to find induction variables */
1206
if (!compute_induction_information(state))
1207
return;
1208
1209
/* Run through each of the terminators and try to compute a trip-count */
1210
find_trip_count(state, impl->function->shader->info.float_controls_execution_mode);
1211
1212
nir_foreach_block_in_cf_node(block, &state->loop->cf_node) {
1213
if (force_unroll_heuristics(state, block)) {
1214
state->loop->info->force_unroll = true;
1215
break;
1216
}
1217
}
1218
}
1219
1220
static loop_info_state *
1221
initialize_loop_info_state(nir_loop *loop, void *mem_ctx,
1222
nir_function_impl *impl)
1223
{
1224
loop_info_state *state = rzalloc(mem_ctx, loop_info_state);
1225
state->loop_vars = ralloc_array(mem_ctx, nir_loop_variable,
1226
impl->ssa_alloc);
1227
state->loop_vars_init = rzalloc_array(mem_ctx, BITSET_WORD,
1228
BITSET_WORDS(impl->ssa_alloc));
1229
state->loop = loop;
1230
1231
list_inithead(&state->process_list);
1232
1233
if (loop->info)
1234
ralloc_free(loop->info);
1235
1236
loop->info = rzalloc(loop, nir_loop_info);
1237
1238
list_inithead(&loop->info->loop_terminator_list);
1239
1240
return state;
1241
}
1242
1243
static void
1244
process_loops(nir_cf_node *cf_node, nir_variable_mode indirect_mask)
1245
{
1246
switch (cf_node->type) {
1247
case nir_cf_node_block:
1248
return;
1249
case nir_cf_node_if: {
1250
nir_if *if_stmt = nir_cf_node_as_if(cf_node);
1251
foreach_list_typed(nir_cf_node, nested_node, node, &if_stmt->then_list)
1252
process_loops(nested_node, indirect_mask);
1253
foreach_list_typed(nir_cf_node, nested_node, node, &if_stmt->else_list)
1254
process_loops(nested_node, indirect_mask);
1255
return;
1256
}
1257
case nir_cf_node_loop: {
1258
nir_loop *loop = nir_cf_node_as_loop(cf_node);
1259
foreach_list_typed(nir_cf_node, nested_node, node, &loop->body)
1260
process_loops(nested_node, indirect_mask);
1261
break;
1262
}
1263
default:
1264
unreachable("unknown cf node type");
1265
}
1266
1267
nir_loop *loop = nir_cf_node_as_loop(cf_node);
1268
nir_function_impl *impl = nir_cf_node_get_function(cf_node);
1269
void *mem_ctx = ralloc_context(NULL);
1270
1271
loop_info_state *state = initialize_loop_info_state(loop, mem_ctx, impl);
1272
state->indirect_mask = indirect_mask;
1273
1274
get_loop_info(state, impl);
1275
1276
ralloc_free(mem_ctx);
1277
}
1278
1279
void
1280
nir_loop_analyze_impl(nir_function_impl *impl,
1281
nir_variable_mode indirect_mask)
1282
{
1283
nir_index_ssa_defs(impl);
1284
foreach_list_typed(nir_cf_node, node, node, &impl->body)
1285
process_loops(node, indirect_mask);
1286
}
1287
1288