Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/intel/compiler/brw_fs_copy_propagation.cpp
4550 views
1
/*
2
* Copyright © 2012 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
/** @file brw_fs_copy_propagation.cpp
25
*
26
* Support for global copy propagation in two passes: A local pass that does
27
* intra-block copy (and constant) propagation, and a global pass that uses
28
* dataflow analysis on the copies available at the end of each block to re-do
29
* local copy propagation with more copies available.
30
*
31
* See Muchnick's Advanced Compiler Design and Implementation, section
32
* 12.5 (p356).
33
*/
34
35
#define ACP_HASH_SIZE 64
36
37
#include "util/bitset.h"
38
#include "util/u_math.h"
39
#include "brw_fs.h"
40
#include "brw_fs_live_variables.h"
41
#include "brw_cfg.h"
42
#include "brw_eu.h"
43
44
using namespace brw;
45
46
namespace { /* avoid conflict with opt_copy_propagation_elements */
47
struct acp_entry : public exec_node {
48
fs_reg dst;
49
fs_reg src;
50
unsigned global_idx;
51
unsigned size_written;
52
unsigned size_read;
53
enum opcode opcode;
54
bool saturate;
55
};
56
57
struct block_data {
58
/**
59
* Which entries in the fs_copy_prop_dataflow acp table are live at the
60
* start of this block. This is the useful output of the analysis, since
61
* it lets us plug those into the local copy propagation on the second
62
* pass.
63
*/
64
BITSET_WORD *livein;
65
66
/**
67
* Which entries in the fs_copy_prop_dataflow acp table are live at the end
68
* of this block. This is done in initial setup from the per-block acps
69
* returned by the first local copy prop pass.
70
*/
71
BITSET_WORD *liveout;
72
73
/**
74
* Which entries in the fs_copy_prop_dataflow acp table are generated by
75
* instructions in this block which reach the end of the block without
76
* being killed.
77
*/
78
BITSET_WORD *copy;
79
80
/**
81
* Which entries in the fs_copy_prop_dataflow acp table are killed over the
82
* course of this block.
83
*/
84
BITSET_WORD *kill;
85
86
/**
87
* Which entries in the fs_copy_prop_dataflow acp table are guaranteed to
88
* have a fully uninitialized destination at the end of this block.
89
*/
90
BITSET_WORD *undef;
91
};
92
93
class fs_copy_prop_dataflow
94
{
95
public:
96
fs_copy_prop_dataflow(void *mem_ctx, cfg_t *cfg,
97
const fs_live_variables &live,
98
exec_list *out_acp[ACP_HASH_SIZE]);
99
100
void setup_initial_values();
101
void run();
102
103
void dump_block_data() const UNUSED;
104
105
void *mem_ctx;
106
cfg_t *cfg;
107
const fs_live_variables &live;
108
109
acp_entry **acp;
110
int num_acp;
111
int bitset_words;
112
113
struct block_data *bd;
114
};
115
} /* anonymous namespace */
116
117
fs_copy_prop_dataflow::fs_copy_prop_dataflow(void *mem_ctx, cfg_t *cfg,
118
const fs_live_variables &live,
119
exec_list *out_acp[ACP_HASH_SIZE])
120
: mem_ctx(mem_ctx), cfg(cfg), live(live)
121
{
122
bd = rzalloc_array(mem_ctx, struct block_data, cfg->num_blocks);
123
124
num_acp = 0;
125
foreach_block (block, cfg) {
126
for (int i = 0; i < ACP_HASH_SIZE; i++) {
127
num_acp += out_acp[block->num][i].length();
128
}
129
}
130
131
acp = rzalloc_array(mem_ctx, struct acp_entry *, num_acp);
132
133
bitset_words = BITSET_WORDS(num_acp);
134
135
int next_acp = 0;
136
foreach_block (block, cfg) {
137
bd[block->num].livein = rzalloc_array(bd, BITSET_WORD, bitset_words);
138
bd[block->num].liveout = rzalloc_array(bd, BITSET_WORD, bitset_words);
139
bd[block->num].copy = rzalloc_array(bd, BITSET_WORD, bitset_words);
140
bd[block->num].kill = rzalloc_array(bd, BITSET_WORD, bitset_words);
141
bd[block->num].undef = rzalloc_array(bd, BITSET_WORD, bitset_words);
142
143
for (int i = 0; i < ACP_HASH_SIZE; i++) {
144
foreach_in_list(acp_entry, entry, &out_acp[block->num][i]) {
145
acp[next_acp] = entry;
146
147
entry->global_idx = next_acp;
148
149
/* opt_copy_propagation_local populates out_acp with copies created
150
* in a block which are still live at the end of the block. This
151
* is exactly what we want in the COPY set.
152
*/
153
BITSET_SET(bd[block->num].copy, next_acp);
154
155
next_acp++;
156
}
157
}
158
}
159
160
assert(next_acp == num_acp);
161
162
setup_initial_values();
163
run();
164
}
165
166
/**
167
* Set up initial values for each of the data flow sets, prior to running
168
* the fixed-point algorithm.
169
*/
170
void
171
fs_copy_prop_dataflow::setup_initial_values()
172
{
173
/* Initialize the COPY and KILL sets. */
174
{
175
/* Create a temporary table of ACP entries which we'll use for efficient
176
* look-up. Unfortunately, we have to do this in two steps because we
177
* have to match both sources and destinations and an ACP entry can only
178
* be in one list at a time.
179
*
180
* We choose to make the table size between num_acp/2 and num_acp/4 to
181
* try and trade off between the time it takes to initialize the table
182
* via exec_list constructors or make_empty() and the cost of
183
* collisions. In practice, it doesn't appear to matter too much what
184
* size we make the table as long as it's roughly the same order of
185
* magnitude as num_acp. We get most of the benefit of the table
186
* approach even if we use a table of size ACP_HASH_SIZE though a
187
* full-sized table is 1-2% faster in practice.
188
*/
189
unsigned acp_table_size = util_next_power_of_two(num_acp) / 4;
190
acp_table_size = MAX2(acp_table_size, ACP_HASH_SIZE);
191
exec_list *acp_table = new exec_list[acp_table_size];
192
193
/* First, get all the KILLs for instructions which overwrite ACP
194
* destinations.
195
*/
196
for (int i = 0; i < num_acp; i++) {
197
unsigned idx = reg_space(acp[i]->dst) & (acp_table_size - 1);
198
acp_table[idx].push_tail(acp[i]);
199
}
200
201
foreach_block (block, cfg) {
202
foreach_inst_in_block(fs_inst, inst, block) {
203
if (inst->dst.file != VGRF)
204
continue;
205
206
unsigned idx = reg_space(inst->dst) & (acp_table_size - 1);
207
foreach_in_list(acp_entry, entry, &acp_table[idx]) {
208
if (regions_overlap(inst->dst, inst->size_written,
209
entry->dst, entry->size_written))
210
BITSET_SET(bd[block->num].kill, entry->global_idx);
211
}
212
}
213
}
214
215
/* Clear the table for the second pass */
216
for (unsigned i = 0; i < acp_table_size; i++)
217
acp_table[i].make_empty();
218
219
/* Next, get all the KILLs for instructions which overwrite ACP
220
* sources.
221
*/
222
for (int i = 0; i < num_acp; i++) {
223
unsigned idx = reg_space(acp[i]->src) & (acp_table_size - 1);
224
acp_table[idx].push_tail(acp[i]);
225
}
226
227
foreach_block (block, cfg) {
228
foreach_inst_in_block(fs_inst, inst, block) {
229
if (inst->dst.file != VGRF &&
230
inst->dst.file != FIXED_GRF)
231
continue;
232
233
unsigned idx = reg_space(inst->dst) & (acp_table_size - 1);
234
foreach_in_list(acp_entry, entry, &acp_table[idx]) {
235
if (regions_overlap(inst->dst, inst->size_written,
236
entry->src, entry->size_read))
237
BITSET_SET(bd[block->num].kill, entry->global_idx);
238
}
239
}
240
}
241
242
delete [] acp_table;
243
}
244
245
/* Populate the initial values for the livein and liveout sets. For the
246
* block at the start of the program, livein = 0 and liveout = copy.
247
* For the others, set liveout and livein to ~0 (the universal set).
248
*/
249
foreach_block (block, cfg) {
250
if (block->parents.is_empty()) {
251
for (int i = 0; i < bitset_words; i++) {
252
bd[block->num].livein[i] = 0u;
253
bd[block->num].liveout[i] = bd[block->num].copy[i];
254
}
255
} else {
256
for (int i = 0; i < bitset_words; i++) {
257
bd[block->num].liveout[i] = ~0u;
258
bd[block->num].livein[i] = ~0u;
259
}
260
}
261
}
262
263
/* Initialize the undef set. */
264
foreach_block (block, cfg) {
265
for (int i = 0; i < num_acp; i++) {
266
BITSET_SET(bd[block->num].undef, i);
267
for (unsigned off = 0; off < acp[i]->size_written; off += REG_SIZE) {
268
if (BITSET_TEST(live.block_data[block->num].defout,
269
live.var_from_reg(byte_offset(acp[i]->dst, off))))
270
BITSET_CLEAR(bd[block->num].undef, i);
271
}
272
}
273
}
274
}
275
276
/**
277
* Walk the set of instructions in the block, marking which entries in the acp
278
* are killed by the block.
279
*/
280
void
281
fs_copy_prop_dataflow::run()
282
{
283
bool progress;
284
285
do {
286
progress = false;
287
288
foreach_block (block, cfg) {
289
if (block->parents.is_empty())
290
continue;
291
292
for (int i = 0; i < bitset_words; i++) {
293
const BITSET_WORD old_liveout = bd[block->num].liveout[i];
294
BITSET_WORD livein_from_any_block = 0;
295
296
/* Update livein for this block. If a copy is live out of all
297
* parent blocks, it's live coming in to this block.
298
*/
299
bd[block->num].livein[i] = ~0u;
300
foreach_list_typed(bblock_link, parent_link, link, &block->parents) {
301
bblock_t *parent = parent_link->block;
302
/* Consider ACP entries with a known-undefined destination to
303
* be available from the parent. This is valid because we're
304
* free to set the undefined variable equal to the source of
305
* the ACP entry without breaking the application's
306
* expectations, since the variable is undefined.
307
*/
308
bd[block->num].livein[i] &= (bd[parent->num].liveout[i] |
309
bd[parent->num].undef[i]);
310
livein_from_any_block |= bd[parent->num].liveout[i];
311
}
312
313
/* Limit to the set of ACP entries that can possibly be available
314
* at the start of the block, since propagating from a variable
315
* which is guaranteed to be undefined (rather than potentially
316
* undefined for some dynamic control-flow paths) doesn't seem
317
* particularly useful.
318
*/
319
bd[block->num].livein[i] &= livein_from_any_block;
320
321
/* Update liveout for this block. */
322
bd[block->num].liveout[i] =
323
bd[block->num].copy[i] | (bd[block->num].livein[i] &
324
~bd[block->num].kill[i]);
325
326
if (old_liveout != bd[block->num].liveout[i])
327
progress = true;
328
}
329
}
330
} while (progress);
331
}
332
333
void
334
fs_copy_prop_dataflow::dump_block_data() const
335
{
336
foreach_block (block, cfg) {
337
fprintf(stderr, "Block %d [%d, %d] (parents ", block->num,
338
block->start_ip, block->end_ip);
339
foreach_list_typed(bblock_link, link, link, &block->parents) {
340
bblock_t *parent = link->block;
341
fprintf(stderr, "%d ", parent->num);
342
}
343
fprintf(stderr, "):\n");
344
fprintf(stderr, " livein = 0x");
345
for (int i = 0; i < bitset_words; i++)
346
fprintf(stderr, "%08x", bd[block->num].livein[i]);
347
fprintf(stderr, ", liveout = 0x");
348
for (int i = 0; i < bitset_words; i++)
349
fprintf(stderr, "%08x", bd[block->num].liveout[i]);
350
fprintf(stderr, ",\n copy = 0x");
351
for (int i = 0; i < bitset_words; i++)
352
fprintf(stderr, "%08x", bd[block->num].copy[i]);
353
fprintf(stderr, ", kill = 0x");
354
for (int i = 0; i < bitset_words; i++)
355
fprintf(stderr, "%08x", bd[block->num].kill[i]);
356
fprintf(stderr, "\n");
357
}
358
}
359
360
static bool
361
is_logic_op(enum opcode opcode)
362
{
363
return (opcode == BRW_OPCODE_AND ||
364
opcode == BRW_OPCODE_OR ||
365
opcode == BRW_OPCODE_XOR ||
366
opcode == BRW_OPCODE_NOT);
367
}
368
369
static bool
370
can_take_stride(fs_inst *inst, brw_reg_type dst_type,
371
unsigned arg, unsigned stride,
372
const intel_device_info *devinfo)
373
{
374
if (stride > 4)
375
return false;
376
377
/* Bail if the channels of the source need to be aligned to the byte offset
378
* of the corresponding channel of the destination, and the provided stride
379
* would break this restriction.
380
*/
381
if (has_dst_aligned_region_restriction(devinfo, inst, dst_type) &&
382
!(type_sz(inst->src[arg].type) * stride ==
383
type_sz(dst_type) * inst->dst.stride ||
384
stride == 0))
385
return false;
386
387
/* 3-source instructions can only be Align16, which restricts what strides
388
* they can take. They can only take a stride of 1 (the usual case), or 0
389
* with a special "repctrl" bit. But the repctrl bit doesn't work for
390
* 64-bit datatypes, so if the source type is 64-bit then only a stride of
391
* 1 is allowed. From the Broadwell PRM, Volume 7 "3D Media GPGPU", page
392
* 944:
393
*
394
* This is applicable to 32b datatypes and 16b datatype. 64b datatypes
395
* cannot use the replicate control.
396
*/
397
if (inst->is_3src(devinfo)) {
398
if (type_sz(inst->src[arg].type) > 4)
399
return stride == 1;
400
else
401
return stride == 1 || stride == 0;
402
}
403
404
/* From the Broadwell PRM, Volume 2a "Command Reference - Instructions",
405
* page 391 ("Extended Math Function"):
406
*
407
* The following restrictions apply for align1 mode: Scalar source is
408
* supported. Source and destination horizontal stride must be the
409
* same.
410
*
411
* From the Haswell PRM Volume 2b "Command Reference - Instructions", page
412
* 134 ("Extended Math Function"):
413
*
414
* Scalar source is supported. Source and destination horizontal stride
415
* must be 1.
416
*
417
* and similar language exists for IVB and SNB. Pre-SNB, math instructions
418
* are sends, so the sources are moved to MRF's and there are no
419
* restrictions.
420
*/
421
if (inst->is_math()) {
422
if (devinfo->ver == 6 || devinfo->ver == 7) {
423
assert(inst->dst.stride == 1);
424
return stride == 1 || stride == 0;
425
} else if (devinfo->ver >= 8) {
426
return stride == inst->dst.stride || stride == 0;
427
}
428
}
429
430
return true;
431
}
432
433
static bool
434
instruction_requires_packed_data(fs_inst *inst)
435
{
436
switch (inst->opcode) {
437
case FS_OPCODE_DDX_FINE:
438
case FS_OPCODE_DDX_COARSE:
439
case FS_OPCODE_DDY_FINE:
440
case FS_OPCODE_DDY_COARSE:
441
case SHADER_OPCODE_QUAD_SWIZZLE:
442
return true;
443
default:
444
return false;
445
}
446
}
447
448
bool
449
fs_visitor::try_copy_propagate(fs_inst *inst, int arg, acp_entry *entry)
450
{
451
if (inst->src[arg].file != VGRF)
452
return false;
453
454
if (entry->src.file == IMM)
455
return false;
456
assert(entry->src.file == VGRF || entry->src.file == UNIFORM ||
457
entry->src.file == ATTR || entry->src.file == FIXED_GRF);
458
459
/* Avoid propagating a LOAD_PAYLOAD instruction into another if there is a
460
* good chance that we'll be able to eliminate the latter through register
461
* coalescing. If only part of the sources of the second LOAD_PAYLOAD can
462
* be simplified through copy propagation we would be making register
463
* coalescing impossible, ending up with unnecessary copies in the program.
464
* This is also the case for is_multi_copy_payload() copies that can only
465
* be coalesced when the instruction is lowered into a sequence of MOVs.
466
*
467
* Worse -- In cases where the ACP entry was the result of CSE combining
468
* multiple LOAD_PAYLOAD subexpressions, propagating the first LOAD_PAYLOAD
469
* into the second would undo the work of CSE, leading to an infinite
470
* optimization loop. Avoid this by detecting LOAD_PAYLOAD copies from CSE
471
* temporaries which should match is_coalescing_payload().
472
*/
473
if (entry->opcode == SHADER_OPCODE_LOAD_PAYLOAD &&
474
(is_coalescing_payload(alloc, inst) || is_multi_copy_payload(inst)))
475
return false;
476
477
assert(entry->dst.file == VGRF);
478
if (inst->src[arg].nr != entry->dst.nr)
479
return false;
480
481
/* Bail if inst is reading a range that isn't contained in the range
482
* that entry is writing.
483
*/
484
if (!region_contained_in(inst->src[arg], inst->size_read(arg),
485
entry->dst, entry->size_written))
486
return false;
487
488
/* Avoid propagating a FIXED_GRF register into an EOT instruction in order
489
* for any register allocation restrictions to be applied.
490
*/
491
if (entry->src.file == FIXED_GRF && inst->eot)
492
return false;
493
494
/* Avoid propagating odd-numbered FIXED_GRF registers into the first source
495
* of a LINTERP instruction on platforms where the PLN instruction has
496
* register alignment restrictions.
497
*/
498
if (devinfo->has_pln && devinfo->ver <= 6 &&
499
entry->src.file == FIXED_GRF && (entry->src.nr & 1) &&
500
inst->opcode == FS_OPCODE_LINTERP && arg == 0)
501
return false;
502
503
/* we can't generally copy-propagate UD negations because we
504
* can end up accessing the resulting values as signed integers
505
* instead. See also resolve_ud_negate() and comment in
506
* fs_generator::generate_code.
507
*/
508
if (entry->src.type == BRW_REGISTER_TYPE_UD &&
509
entry->src.negate)
510
return false;
511
512
bool has_source_modifiers = entry->src.abs || entry->src.negate;
513
514
if (has_source_modifiers && !inst->can_do_source_mods(devinfo))
515
return false;
516
517
/* Reject cases that would violate register regioning restrictions. */
518
if ((entry->src.file == UNIFORM || !entry->src.is_contiguous()) &&
519
((devinfo->ver == 6 && inst->is_math()) ||
520
inst->is_send_from_grf() ||
521
inst->uses_indirect_addressing())) {
522
return false;
523
}
524
525
if (has_source_modifiers &&
526
inst->opcode == SHADER_OPCODE_GFX4_SCRATCH_WRITE)
527
return false;
528
529
/* Some instructions implemented in the generator backend, such as
530
* derivatives, assume that their operands are packed so we can't
531
* generally propagate strided regions to them.
532
*/
533
const unsigned entry_stride = (entry->src.file == FIXED_GRF ? 1 :
534
entry->src.stride);
535
if (instruction_requires_packed_data(inst) && entry_stride != 1)
536
return false;
537
538
const brw_reg_type dst_type = (has_source_modifiers &&
539
entry->dst.type != inst->src[arg].type) ?
540
entry->dst.type : inst->dst.type;
541
542
/* Bail if the result of composing both strides would exceed the
543
* hardware limit.
544
*/
545
if (!can_take_stride(inst, dst_type, arg,
546
entry_stride * inst->src[arg].stride,
547
devinfo))
548
return false;
549
550
/* From the Cherry Trail/Braswell PRMs, Volume 7: 3D Media GPGPU:
551
* EU Overview
552
* Register Region Restrictions
553
* Special Requirements for Handling Double Precision Data Types :
554
*
555
* "When source or destination datatype is 64b or operation is integer
556
* DWord multiply, regioning in Align1 must follow these rules:
557
*
558
* 1. Source and Destination horizontal stride must be aligned to the
559
* same qword.
560
* 2. Regioning must ensure Src.Vstride = Src.Width * Src.Hstride.
561
* 3. Source and Destination offset must be the same, except the case
562
* of scalar source."
563
*
564
* Most of this is already checked in can_take_stride(), we're only left
565
* with checking 3.
566
*/
567
if (has_dst_aligned_region_restriction(devinfo, inst, dst_type) &&
568
entry_stride != 0 &&
569
(reg_offset(inst->dst) % REG_SIZE) != (reg_offset(entry->src) % REG_SIZE))
570
return false;
571
572
/* Bail if the source FIXED_GRF region of the copy cannot be trivially
573
* composed with the source region of the instruction -- E.g. because the
574
* copy uses some extended stride greater than 4 not supported natively by
575
* the hardware as a horizontal stride, or because instruction compression
576
* could require us to use a vertical stride shorter than a GRF.
577
*/
578
if (entry->src.file == FIXED_GRF &&
579
(inst->src[arg].stride > 4 ||
580
inst->dst.component_size(inst->exec_size) >
581
inst->src[arg].component_size(inst->exec_size)))
582
return false;
583
584
/* Bail if the instruction type is larger than the execution type of the
585
* copy, what implies that each channel is reading multiple channels of the
586
* destination of the copy, and simply replacing the sources would give a
587
* program with different semantics.
588
*/
589
if (type_sz(entry->dst.type) < type_sz(inst->src[arg].type))
590
return false;
591
592
/* Bail if the result of composing both strides cannot be expressed
593
* as another stride. This avoids, for example, trying to transform
594
* this:
595
*
596
* MOV (8) rX<1>UD rY<0;1,0>UD
597
* FOO (8) ... rX<8;8,1>UW
598
*
599
* into this:
600
*
601
* FOO (8) ... rY<0;1,0>UW
602
*
603
* Which would have different semantics.
604
*/
605
if (entry_stride != 1 &&
606
(inst->src[arg].stride *
607
type_sz(inst->src[arg].type)) % type_sz(entry->src.type) != 0)
608
return false;
609
610
/* Since semantics of source modifiers are type-dependent we need to
611
* ensure that the meaning of the instruction remains the same if we
612
* change the type. If the sizes of the types are different the new
613
* instruction will read a different amount of data than the original
614
* and the semantics will always be different.
615
*/
616
if (has_source_modifiers &&
617
entry->dst.type != inst->src[arg].type &&
618
(!inst->can_change_types() ||
619
type_sz(entry->dst.type) != type_sz(inst->src[arg].type)))
620
return false;
621
622
if (devinfo->ver >= 8 && (entry->src.negate || entry->src.abs) &&
623
is_logic_op(inst->opcode)) {
624
return false;
625
}
626
627
if (entry->saturate) {
628
switch(inst->opcode) {
629
case BRW_OPCODE_SEL:
630
if ((inst->conditional_mod != BRW_CONDITIONAL_GE &&
631
inst->conditional_mod != BRW_CONDITIONAL_L) ||
632
inst->src[1].file != IMM ||
633
inst->src[1].f < 0.0 ||
634
inst->src[1].f > 1.0) {
635
return false;
636
}
637
break;
638
default:
639
return false;
640
}
641
}
642
643
/* Save the offset of inst->src[arg] relative to entry->dst for it to be
644
* applied later.
645
*/
646
const unsigned rel_offset = inst->src[arg].offset - entry->dst.offset;
647
648
/* Fold the copy into the instruction consuming it. */
649
inst->src[arg].file = entry->src.file;
650
inst->src[arg].nr = entry->src.nr;
651
inst->src[arg].subnr = entry->src.subnr;
652
inst->src[arg].offset = entry->src.offset;
653
654
/* Compose the strides of both regions. */
655
if (entry->src.file == FIXED_GRF) {
656
if (inst->src[arg].stride) {
657
const unsigned orig_width = 1 << entry->src.width;
658
const unsigned reg_width = REG_SIZE / (type_sz(inst->src[arg].type) *
659
inst->src[arg].stride);
660
inst->src[arg].width = cvt(MIN2(orig_width, reg_width)) - 1;
661
inst->src[arg].hstride = cvt(inst->src[arg].stride);
662
inst->src[arg].vstride = inst->src[arg].hstride + inst->src[arg].width;
663
} else {
664
inst->src[arg].vstride = inst->src[arg].hstride =
665
inst->src[arg].width = 0;
666
}
667
668
inst->src[arg].stride = 1;
669
670
/* Hopefully no Align16 around here... */
671
assert(entry->src.swizzle == BRW_SWIZZLE_XYZW);
672
inst->src[arg].swizzle = entry->src.swizzle;
673
} else {
674
inst->src[arg].stride *= entry->src.stride;
675
}
676
677
/* Compose any saturate modifiers. */
678
inst->saturate = inst->saturate || entry->saturate;
679
680
/* Compute the first component of the copy that the instruction is
681
* reading, and the base byte offset within that component.
682
*/
683
assert(entry->dst.offset % REG_SIZE == 0 && entry->dst.stride == 1);
684
const unsigned component = rel_offset / type_sz(entry->dst.type);
685
const unsigned suboffset = rel_offset % type_sz(entry->dst.type);
686
687
/* Calculate the byte offset at the origin of the copy of the given
688
* component and suboffset.
689
*/
690
inst->src[arg] = byte_offset(inst->src[arg],
691
component * entry_stride * type_sz(entry->src.type) + suboffset);
692
693
if (has_source_modifiers) {
694
if (entry->dst.type != inst->src[arg].type) {
695
/* We are propagating source modifiers from a MOV with a different
696
* type. If we got here, then we can just change the source and
697
* destination types of the instruction and keep going.
698
*/
699
assert(inst->can_change_types());
700
for (int i = 0; i < inst->sources; i++) {
701
inst->src[i].type = entry->dst.type;
702
}
703
inst->dst.type = entry->dst.type;
704
}
705
706
if (!inst->src[arg].abs) {
707
inst->src[arg].abs = entry->src.abs;
708
inst->src[arg].negate ^= entry->src.negate;
709
}
710
}
711
712
return true;
713
}
714
715
716
bool
717
fs_visitor::try_constant_propagate(fs_inst *inst, acp_entry *entry)
718
{
719
bool progress = false;
720
721
if (entry->src.file != IMM)
722
return false;
723
if (type_sz(entry->src.type) > 4)
724
return false;
725
if (entry->saturate)
726
return false;
727
728
for (int i = inst->sources - 1; i >= 0; i--) {
729
if (inst->src[i].file != VGRF)
730
continue;
731
732
assert(entry->dst.file == VGRF);
733
if (inst->src[i].nr != entry->dst.nr)
734
continue;
735
736
/* Bail if inst is reading a range that isn't contained in the range
737
* that entry is writing.
738
*/
739
if (!region_contained_in(inst->src[i], inst->size_read(i),
740
entry->dst, entry->size_written))
741
continue;
742
743
/* If the type sizes don't match each channel of the instruction is
744
* either extracting a portion of the constant (which could be handled
745
* with some effort but the code below doesn't) or reading multiple
746
* channels of the source at once.
747
*/
748
if (type_sz(inst->src[i].type) != type_sz(entry->dst.type))
749
continue;
750
751
fs_reg val = entry->src;
752
val.type = inst->src[i].type;
753
754
if (inst->src[i].abs) {
755
if ((devinfo->ver >= 8 && is_logic_op(inst->opcode)) ||
756
!brw_abs_immediate(val.type, &val.as_brw_reg())) {
757
continue;
758
}
759
}
760
761
if (inst->src[i].negate) {
762
if ((devinfo->ver >= 8 && is_logic_op(inst->opcode)) ||
763
!brw_negate_immediate(val.type, &val.as_brw_reg())) {
764
continue;
765
}
766
}
767
768
switch (inst->opcode) {
769
case BRW_OPCODE_MOV:
770
case SHADER_OPCODE_LOAD_PAYLOAD:
771
case FS_OPCODE_PACK:
772
inst->src[i] = val;
773
progress = true;
774
break;
775
776
case SHADER_OPCODE_INT_QUOTIENT:
777
case SHADER_OPCODE_INT_REMAINDER:
778
/* FINISHME: Promote non-float constants and remove this. */
779
if (devinfo->ver < 8)
780
break;
781
FALLTHROUGH;
782
case SHADER_OPCODE_POW:
783
/* Allow constant propagation into src1 (except on Gen 6 which
784
* doesn't support scalar source math), and let constant combining
785
* promote the constant on Gen < 8.
786
*/
787
if (devinfo->ver == 6)
788
break;
789
FALLTHROUGH;
790
case BRW_OPCODE_BFI1:
791
case BRW_OPCODE_ASR:
792
case BRW_OPCODE_SHL:
793
case BRW_OPCODE_SHR:
794
case BRW_OPCODE_SUBB:
795
if (i == 1) {
796
inst->src[i] = val;
797
progress = true;
798
}
799
break;
800
801
case BRW_OPCODE_MACH:
802
case BRW_OPCODE_MUL:
803
case SHADER_OPCODE_MULH:
804
case BRW_OPCODE_ADD:
805
case BRW_OPCODE_OR:
806
case BRW_OPCODE_AND:
807
case BRW_OPCODE_XOR:
808
case BRW_OPCODE_ADDC:
809
if (i == 1) {
810
inst->src[i] = val;
811
progress = true;
812
} else if (i == 0 && inst->src[1].file != IMM) {
813
/* Fit this constant in by commuting the operands.
814
* Exception: we can't do this for 32-bit integer MUL/MACH
815
* because it's asymmetric.
816
*
817
* The BSpec says for Broadwell that
818
*
819
* "When multiplying DW x DW, the dst cannot be accumulator."
820
*
821
* Integer MUL with a non-accumulator destination will be lowered
822
* by lower_integer_multiplication(), so don't restrict it.
823
*/
824
if (((inst->opcode == BRW_OPCODE_MUL &&
825
inst->dst.is_accumulator()) ||
826
inst->opcode == BRW_OPCODE_MACH) &&
827
(inst->src[1].type == BRW_REGISTER_TYPE_D ||
828
inst->src[1].type == BRW_REGISTER_TYPE_UD))
829
break;
830
inst->src[0] = inst->src[1];
831
inst->src[1] = val;
832
progress = true;
833
}
834
break;
835
836
case BRW_OPCODE_CMP:
837
case BRW_OPCODE_IF:
838
if (i == 1) {
839
inst->src[i] = val;
840
progress = true;
841
} else if (i == 0 && inst->src[1].file != IMM) {
842
enum brw_conditional_mod new_cmod;
843
844
new_cmod = brw_swap_cmod(inst->conditional_mod);
845
if (new_cmod != BRW_CONDITIONAL_NONE) {
846
/* Fit this constant in by swapping the operands and
847
* flipping the test
848
*/
849
inst->src[0] = inst->src[1];
850
inst->src[1] = val;
851
inst->conditional_mod = new_cmod;
852
progress = true;
853
}
854
}
855
break;
856
857
case BRW_OPCODE_SEL:
858
if (i == 1) {
859
inst->src[i] = val;
860
progress = true;
861
} else if (i == 0 && inst->src[1].file != IMM &&
862
(inst->conditional_mod == BRW_CONDITIONAL_NONE ||
863
/* Only GE and L are commutative. */
864
inst->conditional_mod == BRW_CONDITIONAL_GE ||
865
inst->conditional_mod == BRW_CONDITIONAL_L)) {
866
inst->src[0] = inst->src[1];
867
inst->src[1] = val;
868
869
/* If this was predicated, flipping operands means
870
* we also need to flip the predicate.
871
*/
872
if (inst->conditional_mod == BRW_CONDITIONAL_NONE) {
873
inst->predicate_inverse =
874
!inst->predicate_inverse;
875
}
876
progress = true;
877
}
878
break;
879
880
case FS_OPCODE_FB_WRITE_LOGICAL:
881
/* The stencil and omask sources of FS_OPCODE_FB_WRITE_LOGICAL are
882
* bit-cast using a strided region so they cannot be immediates.
883
*/
884
if (i != FB_WRITE_LOGICAL_SRC_SRC_STENCIL &&
885
i != FB_WRITE_LOGICAL_SRC_OMASK) {
886
inst->src[i] = val;
887
progress = true;
888
}
889
break;
890
891
case SHADER_OPCODE_TEX_LOGICAL:
892
case SHADER_OPCODE_TXD_LOGICAL:
893
case SHADER_OPCODE_TXF_LOGICAL:
894
case SHADER_OPCODE_TXL_LOGICAL:
895
case SHADER_OPCODE_TXS_LOGICAL:
896
case FS_OPCODE_TXB_LOGICAL:
897
case SHADER_OPCODE_TXF_CMS_LOGICAL:
898
case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
899
case SHADER_OPCODE_TXF_UMS_LOGICAL:
900
case SHADER_OPCODE_TXF_MCS_LOGICAL:
901
case SHADER_OPCODE_LOD_LOGICAL:
902
case SHADER_OPCODE_TG4_LOGICAL:
903
case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
904
case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
905
case SHADER_OPCODE_IMAGE_SIZE_LOGICAL:
906
case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
907
case SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL:
908
case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
909
case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
910
case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
911
case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
912
case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
913
case SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL:
914
case SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL:
915
inst->src[i] = val;
916
progress = true;
917
break;
918
919
case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
920
case SHADER_OPCODE_BROADCAST:
921
inst->src[i] = val;
922
progress = true;
923
break;
924
925
case BRW_OPCODE_MAD:
926
case BRW_OPCODE_LRP:
927
inst->src[i] = val;
928
progress = true;
929
break;
930
931
default:
932
break;
933
}
934
}
935
936
return progress;
937
}
938
939
static bool
940
can_propagate_from(fs_inst *inst)
941
{
942
return (inst->opcode == BRW_OPCODE_MOV &&
943
inst->dst.file == VGRF &&
944
((inst->src[0].file == VGRF &&
945
!regions_overlap(inst->dst, inst->size_written,
946
inst->src[0], inst->size_read(0))) ||
947
inst->src[0].file == ATTR ||
948
inst->src[0].file == UNIFORM ||
949
inst->src[0].file == IMM ||
950
(inst->src[0].file == FIXED_GRF &&
951
inst->src[0].is_contiguous())) &&
952
inst->src[0].type == inst->dst.type &&
953
!inst->is_partial_write()) ||
954
is_identity_payload(FIXED_GRF, inst);
955
}
956
957
/* Walks a basic block and does copy propagation on it using the acp
958
* list.
959
*/
960
bool
961
fs_visitor::opt_copy_propagation_local(void *copy_prop_ctx, bblock_t *block,
962
exec_list *acp)
963
{
964
bool progress = false;
965
966
foreach_inst_in_block(fs_inst, inst, block) {
967
/* Try propagating into this instruction. */
968
for (int i = 0; i < inst->sources; i++) {
969
if (inst->src[i].file != VGRF)
970
continue;
971
972
foreach_in_list(acp_entry, entry, &acp[inst->src[i].nr % ACP_HASH_SIZE]) {
973
if (try_constant_propagate(inst, entry))
974
progress = true;
975
else if (try_copy_propagate(inst, i, entry))
976
progress = true;
977
}
978
}
979
980
/* kill the destination from the ACP */
981
if (inst->dst.file == VGRF || inst->dst.file == FIXED_GRF) {
982
foreach_in_list_safe(acp_entry, entry, &acp[inst->dst.nr % ACP_HASH_SIZE]) {
983
if (regions_overlap(entry->dst, entry->size_written,
984
inst->dst, inst->size_written))
985
entry->remove();
986
}
987
988
/* Oops, we only have the chaining hash based on the destination, not
989
* the source, so walk across the entire table.
990
*/
991
for (int i = 0; i < ACP_HASH_SIZE; i++) {
992
foreach_in_list_safe(acp_entry, entry, &acp[i]) {
993
/* Make sure we kill the entry if this instruction overwrites
994
* _any_ of the registers that it reads
995
*/
996
if (regions_overlap(entry->src, entry->size_read,
997
inst->dst, inst->size_written))
998
entry->remove();
999
}
1000
}
1001
}
1002
1003
/* If this instruction's source could potentially be folded into the
1004
* operand of another instruction, add it to the ACP.
1005
*/
1006
if (can_propagate_from(inst)) {
1007
acp_entry *entry = rzalloc(copy_prop_ctx, acp_entry);
1008
entry->dst = inst->dst;
1009
entry->src = inst->src[0];
1010
entry->size_written = inst->size_written;
1011
for (unsigned i = 0; i < inst->sources; i++)
1012
entry->size_read += inst->size_read(i);
1013
entry->opcode = inst->opcode;
1014
entry->saturate = inst->saturate;
1015
acp[entry->dst.nr % ACP_HASH_SIZE].push_tail(entry);
1016
} else if (inst->opcode == SHADER_OPCODE_LOAD_PAYLOAD &&
1017
inst->dst.file == VGRF) {
1018
int offset = 0;
1019
for (int i = 0; i < inst->sources; i++) {
1020
int effective_width = i < inst->header_size ? 8 : inst->exec_size;
1021
assert(effective_width * type_sz(inst->src[i].type) % REG_SIZE == 0);
1022
const unsigned size_written = effective_width *
1023
type_sz(inst->src[i].type);
1024
if (inst->src[i].file == VGRF ||
1025
(inst->src[i].file == FIXED_GRF &&
1026
inst->src[i].is_contiguous())) {
1027
acp_entry *entry = rzalloc(copy_prop_ctx, acp_entry);
1028
entry->dst = byte_offset(inst->dst, offset);
1029
entry->src = inst->src[i];
1030
entry->size_written = size_written;
1031
entry->size_read = inst->size_read(i);
1032
entry->opcode = inst->opcode;
1033
if (!entry->dst.equals(inst->src[i])) {
1034
acp[entry->dst.nr % ACP_HASH_SIZE].push_tail(entry);
1035
} else {
1036
ralloc_free(entry);
1037
}
1038
}
1039
offset += size_written;
1040
}
1041
}
1042
}
1043
1044
return progress;
1045
}
1046
1047
bool
1048
fs_visitor::opt_copy_propagation()
1049
{
1050
bool progress = false;
1051
void *copy_prop_ctx = ralloc_context(NULL);
1052
exec_list *out_acp[cfg->num_blocks];
1053
1054
for (int i = 0; i < cfg->num_blocks; i++)
1055
out_acp[i] = new exec_list [ACP_HASH_SIZE];
1056
1057
const fs_live_variables &live = live_analysis.require();
1058
1059
/* First, walk through each block doing local copy propagation and getting
1060
* the set of copies available at the end of the block.
1061
*/
1062
foreach_block (block, cfg) {
1063
progress = opt_copy_propagation_local(copy_prop_ctx, block,
1064
out_acp[block->num]) || progress;
1065
1066
/* If the destination of an ACP entry exists only within this block,
1067
* then there's no need to keep it for dataflow analysis. We can delete
1068
* it from the out_acp table and avoid growing the bitsets any bigger
1069
* than we absolutely have to.
1070
*
1071
* Because nothing in opt_copy_propagation_local touches the block
1072
* start/end IPs and opt_copy_propagation_local is incapable of
1073
* extending the live range of an ACP destination beyond the block,
1074
* it's safe to use the liveness information in this way.
1075
*/
1076
for (unsigned a = 0; a < ACP_HASH_SIZE; a++) {
1077
foreach_in_list_safe(acp_entry, entry, &out_acp[block->num][a]) {
1078
assert(entry->dst.file == VGRF);
1079
if (block->start_ip <= live.vgrf_start[entry->dst.nr] &&
1080
live.vgrf_end[entry->dst.nr] <= block->end_ip)
1081
entry->remove();
1082
}
1083
}
1084
}
1085
1086
/* Do dataflow analysis for those available copies. */
1087
fs_copy_prop_dataflow dataflow(copy_prop_ctx, cfg, live, out_acp);
1088
1089
/* Next, re-run local copy propagation, this time with the set of copies
1090
* provided by the dataflow analysis available at the start of a block.
1091
*/
1092
foreach_block (block, cfg) {
1093
exec_list in_acp[ACP_HASH_SIZE];
1094
1095
for (int i = 0; i < dataflow.num_acp; i++) {
1096
if (BITSET_TEST(dataflow.bd[block->num].livein, i)) {
1097
struct acp_entry *entry = dataflow.acp[i];
1098
in_acp[entry->dst.nr % ACP_HASH_SIZE].push_tail(entry);
1099
}
1100
}
1101
1102
progress = opt_copy_propagation_local(copy_prop_ctx, block, in_acp) ||
1103
progress;
1104
}
1105
1106
for (int i = 0; i < cfg->num_blocks; i++)
1107
delete [] out_acp[i];
1108
ralloc_free(copy_prop_ctx);
1109
1110
if (progress)
1111
invalidate_analysis(DEPENDENCY_INSTRUCTION_DATA_FLOW |
1112
DEPENDENCY_INSTRUCTION_DETAIL);
1113
1114
return progress;
1115
}
1116
1117