Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/broadcom/compiler/vir_register_allocate.c
7405 views
1
/*
2
* Copyright © 2014 Broadcom
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include "util/ralloc.h"
25
#include "util/register_allocate.h"
26
#include "common/v3d_device_info.h"
27
#include "v3d_compiler.h"
28
29
#define QPU_R(i) { .magic = false, .index = i }
30
31
#define ACC_INDEX 0
32
#define ACC_COUNT 6
33
#define PHYS_INDEX (ACC_INDEX + ACC_COUNT)
34
#define PHYS_COUNT 64
35
36
static inline bool
37
qinst_writes_tmu(const struct v3d_device_info *devinfo,
38
struct qinst *inst)
39
{
40
return (inst->dst.file == QFILE_MAGIC &&
41
v3d_qpu_magic_waddr_is_tmu(devinfo, inst->dst.index)) ||
42
inst->qpu.sig.wrtmuc;
43
}
44
45
static bool
46
is_end_of_tmu_sequence(const struct v3d_device_info *devinfo,
47
struct qinst *inst, struct qblock *block)
48
{
49
if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
50
inst->qpu.alu.add.op == V3D_QPU_A_TMUWT) {
51
return true;
52
}
53
54
if (!inst->qpu.sig.ldtmu)
55
return false;
56
57
list_for_each_entry_from(struct qinst, scan_inst, inst->link.next,
58
&block->instructions, link) {
59
if (scan_inst->qpu.sig.ldtmu)
60
return false;
61
62
if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
63
inst->qpu.alu.add.op == V3D_QPU_A_TMUWT) {
64
return true;
65
}
66
67
if (qinst_writes_tmu(devinfo, scan_inst))
68
return true;
69
}
70
71
return true;
72
}
73
74
static bool
75
vir_is_mov_uniform(struct v3d_compile *c, int temp)
76
{
77
struct qinst *def = c->defs[temp];
78
79
return def && def->qpu.sig.ldunif;
80
}
81
82
static int
83
v3d_choose_spill_node(struct v3d_compile *c, struct ra_graph *g,
84
uint32_t *temp_to_node)
85
{
86
const float tmu_scale = 5;
87
float block_scale = 1.0;
88
float spill_costs[c->num_temps];
89
bool in_tmu_operation = false;
90
bool started_last_seg = false;
91
92
for (unsigned i = 0; i < c->num_temps; i++)
93
spill_costs[i] = 0.0;
94
95
/* XXX: Scale the cost up when inside of a loop. */
96
vir_for_each_block(block, c) {
97
vir_for_each_inst(inst, block) {
98
/* We can't insert new thread switches after
99
* starting output writes.
100
*/
101
bool no_spilling =
102
c->threads > 1 && started_last_seg;
103
104
/* Discourage spilling of TMU operations */
105
for (int i = 0; i < vir_get_nsrc(inst); i++) {
106
if (inst->src[i].file != QFILE_TEMP)
107
continue;
108
109
int temp = inst->src[i].index;
110
if (vir_is_mov_uniform(c, temp)) {
111
spill_costs[temp] += block_scale;
112
} else if (!no_spilling) {
113
float tmu_op_scale = in_tmu_operation ?
114
3.0 : 1.0;
115
spill_costs[temp] += (block_scale *
116
tmu_scale *
117
tmu_op_scale);
118
} else {
119
BITSET_CLEAR(c->spillable, temp);
120
}
121
}
122
123
if (inst->dst.file == QFILE_TEMP) {
124
int temp = inst->dst.index;
125
126
if (vir_is_mov_uniform(c, temp)) {
127
/* We just rematerialize the unform
128
* later.
129
*/
130
} else if (!no_spilling) {
131
spill_costs[temp] += (block_scale *
132
tmu_scale);
133
} else {
134
BITSET_CLEAR(c->spillable, temp);
135
}
136
}
137
138
/* Refuse to spill a ldvary's dst, because that means
139
* that ldvary's r5 would end up being used across a
140
* thrsw.
141
*/
142
if (inst->qpu.sig.ldvary) {
143
assert(inst->dst.file == QFILE_TEMP);
144
BITSET_CLEAR(c->spillable, inst->dst.index);
145
}
146
147
if (inst->is_last_thrsw)
148
started_last_seg = true;
149
150
if (v3d_qpu_writes_vpm(&inst->qpu) ||
151
v3d_qpu_uses_tlb(&inst->qpu))
152
started_last_seg = true;
153
154
/* Track when we're in between a TMU setup and the
155
* final LDTMU or TMUWT from that TMU setup. We
156
* penalize spills during that time.
157
*/
158
if (is_end_of_tmu_sequence(c->devinfo, inst, block))
159
in_tmu_operation = false;
160
161
if (qinst_writes_tmu(c->devinfo, inst))
162
in_tmu_operation = true;
163
}
164
}
165
166
for (unsigned i = 0; i < c->num_temps; i++) {
167
if (BITSET_TEST(c->spillable, i))
168
ra_set_node_spill_cost(g, temp_to_node[i], spill_costs[i]);
169
}
170
171
return ra_get_best_spill_node(g);
172
}
173
174
/* The spill offset for this thread takes a bit of setup, so do it once at
175
* program start.
176
*/
177
void
178
v3d_setup_spill_base(struct v3d_compile *c)
179
{
180
c->cursor = vir_before_block(vir_entry_block(c));
181
182
int start_num_temps = c->num_temps;
183
184
/* Each thread wants to be in a separate region of the scratch space
185
* so that the QPUs aren't fighting over cache lines. We have the
186
* driver keep a single global spill BO rather than
187
* per-spilling-program BOs, so we need a uniform from the driver for
188
* what the per-thread scale is.
189
*/
190
struct qreg thread_offset =
191
vir_UMUL(c,
192
vir_TIDX(c),
193
vir_uniform(c, QUNIFORM_SPILL_SIZE_PER_THREAD, 0));
194
195
/* Each channel in a reg is 4 bytes, so scale them up by that. */
196
struct qreg element_offset = vir_SHL(c, vir_EIDX(c),
197
vir_uniform_ui(c, 2));
198
199
c->spill_base = vir_ADD(c,
200
vir_ADD(c, thread_offset, element_offset),
201
vir_uniform(c, QUNIFORM_SPILL_OFFSET, 0));
202
203
/* Make sure that we don't spill the spilling setup instructions. */
204
for (int i = start_num_temps; i < c->num_temps; i++)
205
BITSET_CLEAR(c->spillable, i);
206
207
c->cursor = vir_after_block(c->cur_block);
208
}
209
210
static void
211
v3d_emit_spill_tmua(struct v3d_compile *c, uint32_t spill_offset)
212
{
213
vir_ADD_dest(c, vir_reg(QFILE_MAGIC,
214
V3D_QPU_WADDR_TMUA),
215
c->spill_base,
216
vir_uniform_ui(c, spill_offset));
217
}
218
219
220
static void
221
v3d_emit_tmu_spill(struct v3d_compile *c, struct qinst *inst,
222
struct qinst *position, uint32_t spill_offset)
223
{
224
c->cursor = vir_after_inst(position);
225
inst->dst = vir_get_temp(c);
226
vir_MOV_dest(c, vir_reg(QFILE_MAGIC,
227
V3D_QPU_WADDR_TMUD),
228
inst->dst);
229
v3d_emit_spill_tmua(c, spill_offset);
230
vir_emit_thrsw(c);
231
vir_TMUWT(c);
232
c->spills++;
233
c->tmu_dirty_rcl = true;
234
}
235
236
static void
237
v3d_spill_reg(struct v3d_compile *c, int spill_temp)
238
{
239
c->spill_count++;
240
241
bool is_uniform = vir_is_mov_uniform(c, spill_temp);
242
243
uint32_t spill_offset = 0;
244
245
if (!is_uniform) {
246
spill_offset = c->spill_size;
247
c->spill_size += V3D_CHANNELS * sizeof(uint32_t);
248
249
if (spill_offset == 0)
250
v3d_setup_spill_base(c);
251
}
252
253
struct qinst *last_thrsw = c->last_thrsw;
254
assert(!last_thrsw || last_thrsw->is_last_thrsw);
255
256
int start_num_temps = c->num_temps;
257
258
int uniform_index = ~0;
259
if (is_uniform) {
260
struct qinst *orig_unif = c->defs[spill_temp];
261
uniform_index = orig_unif->uniform;
262
}
263
264
/* We must disable the ldunif optimization if we are spilling uniforms */
265
bool had_disable_ldunif_opt = c->disable_ldunif_opt;
266
c->disable_ldunif_opt = true;
267
268
struct qinst *start_of_tmu_sequence = NULL;
269
struct qinst *postponed_spill = NULL;
270
vir_for_each_block(block, c) {
271
vir_for_each_inst_safe(inst, block) {
272
/* Track when we're in between a TMU setup and the final
273
* LDTMU or TMUWT from that TMU setup. We can't spill/fill any
274
* temps during that time, because that involves inserting a
275
* new TMU setup/LDTMU sequence, so we postpone the spill or
276
* move the fill up to not intrude in the middle of the TMU
277
* sequence.
278
*/
279
if (is_end_of_tmu_sequence(c->devinfo, inst, block)) {
280
if (postponed_spill) {
281
v3d_emit_tmu_spill(c, postponed_spill,
282
inst, spill_offset);
283
}
284
285
start_of_tmu_sequence = NULL;
286
postponed_spill = NULL;
287
}
288
289
if (!start_of_tmu_sequence &&
290
qinst_writes_tmu(c->devinfo, inst)) {
291
start_of_tmu_sequence = inst;
292
}
293
294
/* fills */
295
for (int i = 0; i < vir_get_nsrc(inst); i++) {
296
if (inst->src[i].file != QFILE_TEMP ||
297
inst->src[i].index != spill_temp) {
298
continue;
299
}
300
301
c->cursor = vir_before_inst(inst);
302
303
if (is_uniform) {
304
struct qreg unif =
305
vir_uniform(c,
306
c->uniform_contents[uniform_index],
307
c->uniform_data[uniform_index]);
308
inst->src[i] = unif;
309
} else {
310
/* If we have a postponed spill, we don't need
311
* a fill as the temp would not have been
312
* spilled yet.
313
*/
314
if (postponed_spill)
315
continue;
316
if (start_of_tmu_sequence)
317
c->cursor = vir_before_inst(start_of_tmu_sequence);
318
319
v3d_emit_spill_tmua(c, spill_offset);
320
vir_emit_thrsw(c);
321
inst->src[i] = vir_LDTMU(c);
322
c->fills++;
323
}
324
}
325
326
/* spills */
327
if (inst->dst.file == QFILE_TEMP &&
328
inst->dst.index == spill_temp) {
329
if (is_uniform) {
330
c->cursor.link = NULL;
331
vir_remove_instruction(c, inst);
332
} else {
333
if (start_of_tmu_sequence)
334
postponed_spill = inst;
335
else
336
v3d_emit_tmu_spill(c, inst, inst,
337
spill_offset);
338
}
339
}
340
341
/* If we didn't have a last-thrsw inserted by nir_to_vir and
342
* we've been inserting thrsws, then insert a new last_thrsw
343
* right before we start the vpm/tlb sequence for the last
344
* thread segment.
345
*/
346
if (!is_uniform && !last_thrsw && c->last_thrsw &&
347
(v3d_qpu_writes_vpm(&inst->qpu) ||
348
v3d_qpu_uses_tlb(&inst->qpu))) {
349
c->cursor = vir_before_inst(inst);
350
vir_emit_thrsw(c);
351
352
last_thrsw = c->last_thrsw;
353
last_thrsw->is_last_thrsw = true;
354
}
355
}
356
}
357
358
/* Make sure c->last_thrsw is the actual last thrsw, not just one we
359
* inserted in our most recent unspill.
360
*/
361
if (last_thrsw)
362
c->last_thrsw = last_thrsw;
363
364
/* Don't allow spilling of our spilling instructions. There's no way
365
* they can help get things colored.
366
*/
367
for (int i = start_num_temps; i < c->num_temps; i++)
368
BITSET_CLEAR(c->spillable, i);
369
370
c->disable_ldunif_opt = had_disable_ldunif_opt;
371
}
372
373
struct node_to_temp_map {
374
uint32_t temp;
375
uint32_t priority;
376
};
377
378
struct v3d_ra_select_callback_data {
379
uint32_t next_acc;
380
uint32_t next_phys;
381
struct node_to_temp_map *map;
382
};
383
384
/* Choosing accumulators improves chances of merging QPU instructions
385
* due to these merges requiring that at most 2 rf registers are used
386
* by the add and mul instructions.
387
*/
388
static bool
389
v3d_ra_favor_accum(struct v3d_ra_select_callback_data *v3d_ra,
390
BITSET_WORD *regs,
391
int priority)
392
{
393
/* Favor accumulators if we have less that this number of physical
394
* registers. Accumulators have more restrictions (like being
395
* invalidated through thrsw), so running out of physical registers
396
* even if we have accumulators available can lead to register
397
* allocation failures.
398
*/
399
static const int available_rf_threshold = 5;
400
int available_rf = 0 ;
401
for (int i = 0; i < PHYS_COUNT; i++) {
402
if (BITSET_TEST(regs, PHYS_INDEX + i))
403
available_rf++;
404
if (available_rf >= available_rf_threshold)
405
break;
406
}
407
if (available_rf < available_rf_threshold)
408
return true;
409
410
/* Favor accumulators for short-lived temps (our priority represents
411
* liveness), to prevent long-lived temps from grabbing accumulators
412
* and preventing follow-up instructions from using them, potentially
413
* leading to large portions of the shader being unable to use
414
* accumulators and therefore merge instructions successfully.
415
*/
416
static const int priority_threshold = 20;
417
if (priority <= priority_threshold)
418
return true;
419
420
return false;
421
}
422
423
static bool
424
v3d_ra_select_accum(struct v3d_ra_select_callback_data *v3d_ra,
425
BITSET_WORD *regs,
426
unsigned int *out)
427
{
428
/* Round-robin through our accumulators to give post-RA instruction
429
* selection more options.
430
*/
431
for (int i = 0; i < ACC_COUNT; i++) {
432
int acc_off = (v3d_ra->next_acc + i) % ACC_COUNT;
433
int acc = ACC_INDEX + acc_off;
434
435
if (BITSET_TEST(regs, acc)) {
436
v3d_ra->next_acc = acc_off + 1;
437
*out = acc;
438
return true;
439
}
440
}
441
442
return false;
443
}
444
445
static bool
446
v3d_ra_select_rf(struct v3d_ra_select_callback_data *v3d_ra,
447
BITSET_WORD *regs,
448
unsigned int *out)
449
{
450
for (int i = 0; i < PHYS_COUNT; i++) {
451
int phys_off = (v3d_ra->next_phys + i) % PHYS_COUNT;
452
int phys = PHYS_INDEX + phys_off;
453
454
if (BITSET_TEST(regs, phys)) {
455
v3d_ra->next_phys = phys_off + 1;
456
*out = phys;
457
return true;
458
}
459
}
460
461
return false;
462
}
463
464
static unsigned int
465
v3d_ra_select_callback(unsigned int n, BITSET_WORD *regs, void *data)
466
{
467
struct v3d_ra_select_callback_data *v3d_ra = data;
468
int r5 = ACC_INDEX + 5;
469
470
/* Choose r5 for our ldunifs if possible (nobody else can load to that
471
* reg, and it keeps the QPU cond field free from being occupied by
472
* ldunifrf).
473
*/
474
if (BITSET_TEST(regs, r5))
475
return r5;
476
477
unsigned int reg;
478
if (v3d_ra_favor_accum(v3d_ra, regs, v3d_ra->map[n].priority) &&
479
v3d_ra_select_accum(v3d_ra, regs, &reg)) {
480
return reg;
481
}
482
483
if (v3d_ra_select_rf(v3d_ra, regs, &reg))
484
return reg;
485
486
/* If we ran out of physical registers try to assign an accumulator
487
* if we didn't favor that option earlier.
488
*/
489
if (v3d_ra_select_accum(v3d_ra, regs, &reg))
490
return reg;
491
492
unreachable("RA must pass us at least one possible reg.");
493
}
494
495
bool
496
vir_init_reg_sets(struct v3d_compiler *compiler)
497
{
498
/* Allocate up to 3 regfile classes, for the ways the physical
499
* register file can be divided up for fragment shader threading.
500
*/
501
int max_thread_index = (compiler->devinfo->ver >= 40 ? 2 : 3);
502
503
compiler->regs = ra_alloc_reg_set(compiler, PHYS_INDEX + PHYS_COUNT,
504
false);
505
if (!compiler->regs)
506
return false;
507
508
for (int threads = 0; threads < max_thread_index; threads++) {
509
compiler->reg_class_any[threads] =
510
ra_alloc_contig_reg_class(compiler->regs, 1);
511
compiler->reg_class_r5[threads] =
512
ra_alloc_contig_reg_class(compiler->regs, 1);
513
compiler->reg_class_phys_or_acc[threads] =
514
ra_alloc_contig_reg_class(compiler->regs, 1);
515
compiler->reg_class_phys[threads] =
516
ra_alloc_contig_reg_class(compiler->regs, 1);
517
518
for (int i = PHYS_INDEX;
519
i < PHYS_INDEX + (PHYS_COUNT >> threads); i++) {
520
ra_class_add_reg(compiler->reg_class_phys_or_acc[threads], i);
521
ra_class_add_reg(compiler->reg_class_phys[threads], i);
522
ra_class_add_reg(compiler->reg_class_any[threads], i);
523
}
524
525
for (int i = ACC_INDEX + 0; i < ACC_INDEX + ACC_COUNT - 1; i++) {
526
ra_class_add_reg(compiler->reg_class_phys_or_acc[threads], i);
527
ra_class_add_reg(compiler->reg_class_any[threads], i);
528
}
529
/* r5 can only store a single 32-bit value, so not much can
530
* use it.
531
*/
532
ra_class_add_reg(compiler->reg_class_r5[threads],
533
ACC_INDEX + 5);
534
ra_class_add_reg(compiler->reg_class_any[threads],
535
ACC_INDEX + 5);
536
}
537
538
ra_set_finalize(compiler->regs, NULL);
539
540
return true;
541
}
542
543
static int
544
node_to_temp_priority(const void *in_a, const void *in_b)
545
{
546
const struct node_to_temp_map *a = in_a;
547
const struct node_to_temp_map *b = in_b;
548
549
return a->priority - b->priority;
550
}
551
552
/**
553
* Computes the number of registers to spill in a batch after a register
554
* allocation failure.
555
*/
556
static uint32_t
557
get_spill_batch_size(struct v3d_compile *c)
558
{
559
/* Allow up to 10 spills in batches of 1 in any case to avoid any chance of
560
* over-spilling if the program requires few spills to compile.
561
*/
562
if (c->spill_count < 10)
563
return 1;
564
565
/* If we have to spill more than that we assume performance is not going to
566
* be great and we shift focus to batching spills to cut down compile
567
* time at the expense of over-spilling.
568
*/
569
return 20;
570
}
571
572
/* Don't emit spills using the TMU until we've dropped thread count first. We,
573
* may also disable spilling when certain optimizations that are known to
574
* increase register pressure are active so we favor recompiling with
575
* optimizations disabled instead of spilling.
576
*/
577
static inline bool
578
tmu_spilling_allowed(struct v3d_compile *c, int thread_index)
579
{
580
return thread_index == 0 && c->tmu_spilling_allowed;
581
}
582
583
#define CLASS_BIT_PHYS (1 << 0)
584
#define CLASS_BIT_ACC (1 << 1)
585
#define CLASS_BIT_R5 (1 << 4)
586
#define CLASS_BITS_ANY (CLASS_BIT_PHYS | \
587
CLASS_BIT_ACC | \
588
CLASS_BIT_R5)
589
590
/**
591
* Returns a mapping from QFILE_TEMP indices to struct qpu_regs.
592
*
593
* The return value should be freed by the caller.
594
*/
595
struct qpu_reg *
596
v3d_register_allocate(struct v3d_compile *c, bool *spilled)
597
{
598
uint32_t UNUSED start_num_temps = c->num_temps;
599
struct node_to_temp_map map[c->num_temps];
600
uint32_t temp_to_node[c->num_temps];
601
uint8_t class_bits[c->num_temps];
602
int acc_nodes[ACC_COUNT];
603
struct v3d_ra_select_callback_data callback_data = {
604
.next_acc = 0,
605
/* Start at RF3, to try to keep the TLB writes from using
606
* RF0-2.
607
*/
608
.next_phys = 3,
609
.map = map,
610
};
611
612
*spilled = false;
613
614
vir_calculate_live_intervals(c);
615
616
/* Convert 1, 2, 4 threads to 0, 1, 2 index.
617
*
618
* V3D 4.x has double the physical register space, so 64 physical regs
619
* are available at both 1x and 2x threading, and 4x has 32.
620
*/
621
int thread_index = ffs(c->threads) - 1;
622
if (c->devinfo->ver >= 40) {
623
if (thread_index >= 1)
624
thread_index--;
625
}
626
627
struct ra_graph *g = ra_alloc_interference_graph(c->compiler->regs,
628
c->num_temps +
629
ARRAY_SIZE(acc_nodes));
630
ra_set_select_reg_callback(g, v3d_ra_select_callback, &callback_data);
631
632
/* Make some fixed nodes for the accumulators, which we will need to
633
* interfere with when ops have implied r3/r4 writes or for the thread
634
* switches. We could represent these as classes for the nodes to
635
* live in, but the classes take up a lot of memory to set up, so we
636
* don't want to make too many.
637
*/
638
for (int i = 0; i < ARRAY_SIZE(acc_nodes); i++) {
639
acc_nodes[i] = c->num_temps + i;
640
ra_set_node_reg(g, acc_nodes[i], ACC_INDEX + i);
641
}
642
643
for (uint32_t i = 0; i < c->num_temps; i++) {
644
map[i].temp = i;
645
map[i].priority = c->temp_end[i] - c->temp_start[i];
646
}
647
qsort(map, c->num_temps, sizeof(map[0]), node_to_temp_priority);
648
for (uint32_t i = 0; i < c->num_temps; i++) {
649
temp_to_node[map[i].temp] = i;
650
}
651
652
/* Figure out our register classes and preallocated registers. We
653
* start with any temp being able to be in any file, then instructions
654
* incrementally remove bits that the temp definitely can't be in.
655
*/
656
memset(class_bits, CLASS_BITS_ANY, sizeof(class_bits));
657
658
int ip = 0;
659
vir_for_each_inst_inorder(inst, c) {
660
/* If the instruction writes r3/r4 (and optionally moves its
661
* result to a temp), nothing else can be stored in r3/r4 across
662
* it.
663
*/
664
if (vir_writes_r3(c->devinfo, inst)) {
665
for (int i = 0; i < c->num_temps; i++) {
666
if (c->temp_start[i] < ip &&
667
c->temp_end[i] > ip) {
668
ra_add_node_interference(g,
669
temp_to_node[i],
670
acc_nodes[3]);
671
}
672
}
673
}
674
if (vir_writes_r4(c->devinfo, inst)) {
675
for (int i = 0; i < c->num_temps; i++) {
676
if (c->temp_start[i] < ip &&
677
c->temp_end[i] > ip) {
678
ra_add_node_interference(g,
679
temp_to_node[i],
680
acc_nodes[4]);
681
}
682
}
683
}
684
685
if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU) {
686
switch (inst->qpu.alu.add.op) {
687
case V3D_QPU_A_LDVPMV_IN:
688
case V3D_QPU_A_LDVPMV_OUT:
689
case V3D_QPU_A_LDVPMD_IN:
690
case V3D_QPU_A_LDVPMD_OUT:
691
case V3D_QPU_A_LDVPMP:
692
case V3D_QPU_A_LDVPMG_IN:
693
case V3D_QPU_A_LDVPMG_OUT:
694
/* LDVPMs only store to temps (the MA flag
695
* decides whether the LDVPM is in or out)
696
*/
697
assert(inst->dst.file == QFILE_TEMP);
698
class_bits[inst->dst.index] &= CLASS_BIT_PHYS;
699
break;
700
701
case V3D_QPU_A_RECIP:
702
case V3D_QPU_A_RSQRT:
703
case V3D_QPU_A_EXP:
704
case V3D_QPU_A_LOG:
705
case V3D_QPU_A_SIN:
706
case V3D_QPU_A_RSQRT2:
707
/* The SFU instructions write directly to the
708
* phys regfile.
709
*/
710
assert(inst->dst.file == QFILE_TEMP);
711
class_bits[inst->dst.index] &= CLASS_BIT_PHYS;
712
break;
713
714
default:
715
break;
716
}
717
}
718
719
if (inst->src[0].file == QFILE_REG) {
720
switch (inst->src[0].index) {
721
case 0:
722
case 1:
723
case 2:
724
case 3:
725
/* Payload setup instructions: Force allocate
726
* the dst to the given register (so the MOV
727
* will disappear).
728
*/
729
assert(inst->qpu.alu.mul.op == V3D_QPU_M_MOV);
730
assert(inst->dst.file == QFILE_TEMP);
731
ra_set_node_reg(g,
732
temp_to_node[inst->dst.index],
733
PHYS_INDEX +
734
inst->src[0].index);
735
break;
736
}
737
}
738
739
if (inst->dst.file == QFILE_TEMP) {
740
/* Only a ldunif gets to write to R5, which only has a
741
* single 32-bit channel of storage.
742
*/
743
if (!inst->qpu.sig.ldunif) {
744
class_bits[inst->dst.index] &= ~CLASS_BIT_R5;
745
} else {
746
/* Until V3D 4.x, we could only load a uniform
747
* to r5, so we'll need to spill if uniform
748
* loads interfere with each other.
749
*/
750
if (c->devinfo->ver < 40) {
751
class_bits[inst->dst.index] &=
752
CLASS_BIT_R5;
753
}
754
}
755
}
756
757
if (inst->qpu.sig.thrsw) {
758
/* All accumulators are invalidated across a thread
759
* switch.
760
*/
761
for (int i = 0; i < c->num_temps; i++) {
762
if (c->temp_start[i] < ip && c->temp_end[i] > ip)
763
class_bits[i] &= CLASS_BIT_PHYS;
764
}
765
}
766
767
ip++;
768
}
769
770
for (uint32_t i = 0; i < c->num_temps; i++) {
771
if (class_bits[i] == CLASS_BIT_PHYS) {
772
ra_set_node_class(g, temp_to_node[i],
773
c->compiler->reg_class_phys[thread_index]);
774
} else if (class_bits[i] == (CLASS_BIT_R5)) {
775
ra_set_node_class(g, temp_to_node[i],
776
c->compiler->reg_class_r5[thread_index]);
777
} else if (class_bits[i] == (CLASS_BIT_PHYS | CLASS_BIT_ACC)) {
778
ra_set_node_class(g, temp_to_node[i],
779
c->compiler->reg_class_phys_or_acc[thread_index]);
780
} else {
781
assert(class_bits[i] == CLASS_BITS_ANY);
782
ra_set_node_class(g, temp_to_node[i],
783
c->compiler->reg_class_any[thread_index]);
784
}
785
}
786
787
for (uint32_t i = 0; i < c->num_temps; i++) {
788
for (uint32_t j = i + 1; j < c->num_temps; j++) {
789
if (!(c->temp_start[i] >= c->temp_end[j] ||
790
c->temp_start[j] >= c->temp_end[i])) {
791
ra_add_node_interference(g,
792
temp_to_node[i],
793
temp_to_node[j]);
794
}
795
}
796
}
797
798
/* Debug code to force a bit of register spilling, for running across
799
* conformance tests to make sure that spilling works.
800
*/
801
int force_register_spills = 0;
802
if (c->spill_size <
803
V3D_CHANNELS * sizeof(uint32_t) * force_register_spills) {
804
int node = v3d_choose_spill_node(c, g, temp_to_node);
805
if (node != -1) {
806
v3d_spill_reg(c, map[node].temp);
807
ralloc_free(g);
808
*spilled = true;
809
return NULL;
810
}
811
}
812
813
bool ok = ra_allocate(g);
814
if (!ok) {
815
const uint32_t spill_batch_size = get_spill_batch_size(c);
816
817
for (uint32_t i = 0; i < spill_batch_size; i++) {
818
int node = v3d_choose_spill_node(c, g, temp_to_node);
819
if (node == -1)
820
break;
821
822
/* TMU spills inject thrsw signals that invalidate
823
* accumulators, so we can't batch them.
824
*/
825
bool is_uniform = vir_is_mov_uniform(c, map[node].temp);
826
if (i > 0 && !is_uniform)
827
break;
828
829
if (is_uniform || tmu_spilling_allowed(c, thread_index)) {
830
v3d_spill_reg(c, map[node].temp);
831
832
/* Ask the outer loop to call back in. */
833
*spilled = true;
834
835
/* See comment above about batching TMU spills.
836
*/
837
if (!is_uniform) {
838
assert(i == 0);
839
break;
840
}
841
} else {
842
break;
843
}
844
}
845
846
ralloc_free(g);
847
return NULL;
848
}
849
850
/* Ensure that we are not accessing temp_to_node out of bounds. We
851
* should never trigger this assertion because `c->num_temps` only
852
* grows when we spill, in which case we return early and don't get
853
* here.
854
*/
855
assert(start_num_temps == c->num_temps);
856
struct qpu_reg *temp_registers = calloc(c->num_temps,
857
sizeof(*temp_registers));
858
859
for (uint32_t i = 0; i < c->num_temps; i++) {
860
int ra_reg = ra_get_node_reg(g, temp_to_node[i]);
861
if (ra_reg < PHYS_INDEX) {
862
temp_registers[i].magic = true;
863
temp_registers[i].index = (V3D_QPU_WADDR_R0 +
864
ra_reg - ACC_INDEX);
865
} else {
866
temp_registers[i].magic = false;
867
temp_registers[i].index = ra_reg - PHYS_INDEX;
868
}
869
}
870
871
ralloc_free(g);
872
873
return temp_registers;
874
}
875
876