Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/amd/compiler/aco_insert_NOPs.cpp
4550 views
1
/*
2
* Copyright © 2019 Valve Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*
23
*/
24
25
#include "aco_builder.h"
26
#include "aco_ir.h"
27
28
#include <algorithm>
29
#include <bitset>
30
#include <stack>
31
#include <vector>
32
33
namespace aco {
34
namespace {
35
36
struct NOP_ctx_gfx6 {
37
void join(const NOP_ctx_gfx6& other)
38
{
39
set_vskip_mode_then_vector =
40
MAX2(set_vskip_mode_then_vector, other.set_vskip_mode_then_vector);
41
valu_wr_vcc_then_vccz = MAX2(valu_wr_vcc_then_vccz, other.valu_wr_vcc_then_vccz);
42
valu_wr_exec_then_execz = MAX2(valu_wr_exec_then_execz, other.valu_wr_exec_then_execz);
43
valu_wr_vcc_then_div_fmas = MAX2(valu_wr_vcc_then_div_fmas, other.valu_wr_vcc_then_div_fmas);
44
salu_wr_m0_then_gds_msg_ttrace =
45
MAX2(salu_wr_m0_then_gds_msg_ttrace, other.salu_wr_m0_then_gds_msg_ttrace);
46
valu_wr_exec_then_dpp = MAX2(valu_wr_exec_then_dpp, other.valu_wr_exec_then_dpp);
47
salu_wr_m0_then_lds = MAX2(salu_wr_m0_then_lds, other.salu_wr_m0_then_lds);
48
salu_wr_m0_then_moverel = MAX2(salu_wr_m0_then_moverel, other.salu_wr_m0_then_moverel);
49
setreg_then_getsetreg = MAX2(setreg_then_getsetreg, other.setreg_then_getsetreg);
50
vmem_store_then_wr_data |= other.vmem_store_then_wr_data;
51
smem_clause |= other.smem_clause;
52
smem_write |= other.smem_write;
53
for (unsigned i = 0; i < BITSET_WORDS(128); i++) {
54
smem_clause_read_write[i] |= other.smem_clause_read_write[i];
55
smem_clause_write[i] |= other.smem_clause_write[i];
56
}
57
}
58
59
bool operator==(const NOP_ctx_gfx6& other)
60
{
61
return set_vskip_mode_then_vector == other.set_vskip_mode_then_vector &&
62
valu_wr_vcc_then_vccz == other.valu_wr_vcc_then_vccz &&
63
valu_wr_exec_then_execz == other.valu_wr_exec_then_execz &&
64
valu_wr_vcc_then_div_fmas == other.valu_wr_vcc_then_div_fmas &&
65
vmem_store_then_wr_data == other.vmem_store_then_wr_data &&
66
salu_wr_m0_then_gds_msg_ttrace == other.salu_wr_m0_then_gds_msg_ttrace &&
67
valu_wr_exec_then_dpp == other.valu_wr_exec_then_dpp &&
68
salu_wr_m0_then_lds == other.salu_wr_m0_then_lds &&
69
salu_wr_m0_then_moverel == other.salu_wr_m0_then_moverel &&
70
setreg_then_getsetreg == other.setreg_then_getsetreg &&
71
smem_clause == other.smem_clause && smem_write == other.smem_write &&
72
BITSET_EQUAL(smem_clause_read_write, other.smem_clause_read_write) &&
73
BITSET_EQUAL(smem_clause_write, other.smem_clause_write);
74
}
75
76
void add_wait_states(unsigned amount)
77
{
78
if ((set_vskip_mode_then_vector -= amount) < 0)
79
set_vskip_mode_then_vector = 0;
80
81
if ((valu_wr_vcc_then_vccz -= amount) < 0)
82
valu_wr_vcc_then_vccz = 0;
83
84
if ((valu_wr_exec_then_execz -= amount) < 0)
85
valu_wr_exec_then_execz = 0;
86
87
if ((valu_wr_vcc_then_div_fmas -= amount) < 0)
88
valu_wr_vcc_then_div_fmas = 0;
89
90
if ((salu_wr_m0_then_gds_msg_ttrace -= amount) < 0)
91
salu_wr_m0_then_gds_msg_ttrace = 0;
92
93
if ((valu_wr_exec_then_dpp -= amount) < 0)
94
valu_wr_exec_then_dpp = 0;
95
96
if ((salu_wr_m0_then_lds -= amount) < 0)
97
salu_wr_m0_then_lds = 0;
98
99
if ((salu_wr_m0_then_moverel -= amount) < 0)
100
salu_wr_m0_then_moverel = 0;
101
102
if ((setreg_then_getsetreg -= amount) < 0)
103
setreg_then_getsetreg = 0;
104
105
vmem_store_then_wr_data.reset();
106
}
107
108
/* setting MODE.vskip and then any vector op requires 2 wait states */
109
int8_t set_vskip_mode_then_vector = 0;
110
111
/* VALU writing VCC/EXEC and then a VALU reading VCCZ/EXECZ requires 5 wait states */
112
int8_t valu_wr_vcc_then_vccz = 0;
113
int8_t valu_wr_exec_then_execz = 0;
114
115
/* VALU writing VCC followed by v_div_fmas require 4 wait states */
116
int8_t valu_wr_vcc_then_div_fmas = 0;
117
118
/* SALU writing M0 followed by GDS, s_sendmsg or s_ttrace_data requires 1 wait state */
119
int8_t salu_wr_m0_then_gds_msg_ttrace = 0;
120
121
/* VALU writing EXEC followed by DPP requires 5 wait states */
122
int8_t valu_wr_exec_then_dpp = 0;
123
124
/* SALU writing M0 followed by some LDS instructions requires 1 wait state on GFX10 */
125
int8_t salu_wr_m0_then_lds = 0;
126
127
/* SALU writing M0 followed by s_moverel requires 1 wait state on GFX9 */
128
int8_t salu_wr_m0_then_moverel = 0;
129
130
/* s_setreg followed by a s_getreg/s_setreg of the same register needs 2 wait states
131
* currently we don't look at the actual register */
132
int8_t setreg_then_getsetreg = 0;
133
134
/* some memory instructions writing >64bit followed by a instructions
135
* writing the VGPRs holding the writedata requires 1 wait state */
136
std::bitset<256> vmem_store_then_wr_data;
137
138
/* we break up SMEM clauses that contain stores or overwrite an
139
* operand/definition of another instruction in the clause */
140
bool smem_clause = false;
141
bool smem_write = false;
142
BITSET_DECLARE(smem_clause_read_write, 128) = {0};
143
BITSET_DECLARE(smem_clause_write, 128) = {0};
144
};
145
146
struct NOP_ctx_gfx10 {
147
bool has_VOPC = false;
148
bool has_nonVALU_exec_read = false;
149
bool has_VMEM = false;
150
bool has_branch_after_VMEM = false;
151
bool has_DS = false;
152
bool has_branch_after_DS = false;
153
bool has_NSA_MIMG = false;
154
bool has_writelane = false;
155
std::bitset<128> sgprs_read_by_VMEM;
156
std::bitset<128> sgprs_read_by_SMEM;
157
158
void join(const NOP_ctx_gfx10& other)
159
{
160
has_VOPC |= other.has_VOPC;
161
has_nonVALU_exec_read |= other.has_nonVALU_exec_read;
162
has_VMEM |= other.has_VMEM;
163
has_branch_after_VMEM |= other.has_branch_after_VMEM;
164
has_DS |= other.has_DS;
165
has_branch_after_DS |= other.has_branch_after_DS;
166
has_NSA_MIMG |= other.has_NSA_MIMG;
167
has_writelane |= other.has_writelane;
168
sgprs_read_by_VMEM |= other.sgprs_read_by_VMEM;
169
sgprs_read_by_SMEM |= other.sgprs_read_by_SMEM;
170
}
171
172
bool operator==(const NOP_ctx_gfx10& other)
173
{
174
return has_VOPC == other.has_VOPC && has_nonVALU_exec_read == other.has_nonVALU_exec_read &&
175
has_VMEM == other.has_VMEM && has_branch_after_VMEM == other.has_branch_after_VMEM &&
176
has_DS == other.has_DS && has_branch_after_DS == other.has_branch_after_DS &&
177
has_NSA_MIMG == other.has_NSA_MIMG && has_writelane == other.has_writelane &&
178
sgprs_read_by_VMEM == other.sgprs_read_by_VMEM &&
179
sgprs_read_by_SMEM == other.sgprs_read_by_SMEM;
180
}
181
};
182
183
int
184
get_wait_states(aco_ptr<Instruction>& instr)
185
{
186
if (instr->opcode == aco_opcode::s_nop)
187
return instr->sopp().imm + 1;
188
else if (instr->opcode == aco_opcode::p_constaddr)
189
return 3; /* lowered to 3 instructions in the assembler */
190
else
191
return 1;
192
}
193
194
bool
195
regs_intersect(PhysReg a_reg, unsigned a_size, PhysReg b_reg, unsigned b_size)
196
{
197
return a_reg > b_reg ? (a_reg - b_reg < b_size) : (b_reg - a_reg < a_size);
198
}
199
200
template <bool Valu, bool Vintrp, bool Salu>
201
int
202
handle_raw_hazard_internal(Program* program, Block* block, int nops_needed, PhysReg reg,
203
uint32_t mask)
204
{
205
unsigned mask_size = util_last_bit(mask);
206
for (int pred_idx = block->instructions.size() - 1; pred_idx >= 0; pred_idx--) {
207
aco_ptr<Instruction>& pred = block->instructions[pred_idx];
208
209
uint32_t writemask = 0;
210
for (Definition& def : pred->definitions) {
211
if (regs_intersect(reg, mask_size, def.physReg(), def.size())) {
212
unsigned start = def.physReg() > reg ? def.physReg() - reg : 0;
213
unsigned end = MIN2(mask_size, start + def.size());
214
writemask |= u_bit_consecutive(start, end - start);
215
}
216
}
217
218
bool is_hazard = writemask != 0 && ((pred->isVALU() && Valu) ||
219
(pred->isVINTRP() && Vintrp) || (pred->isSALU() && Salu));
220
if (is_hazard)
221
return nops_needed;
222
223
mask &= ~writemask;
224
nops_needed -= get_wait_states(pred);
225
226
if (nops_needed <= 0 || mask == 0)
227
return 0;
228
}
229
230
int res = 0;
231
232
/* Loops require branch instructions, which count towards the wait
233
* states. So even with loops this should finish unless nops_needed is some
234
* huge value. */
235
for (unsigned lin_pred : block->linear_preds) {
236
res = std::max(res, handle_raw_hazard_internal<Valu, Vintrp, Salu>(
237
program, &program->blocks[lin_pred], nops_needed, reg, mask));
238
}
239
return res;
240
}
241
242
template <bool Valu, bool Vintrp, bool Salu>
243
void
244
handle_raw_hazard(Program* program, Block* cur_block, int* NOPs, int min_states, Operand op)
245
{
246
if (*NOPs >= min_states)
247
return;
248
int res = handle_raw_hazard_internal<Valu, Vintrp, Salu>(
249
program, cur_block, min_states, op.physReg(), u_bit_consecutive(0, op.size()));
250
*NOPs = MAX2(*NOPs, res);
251
}
252
253
static auto handle_valu_then_read_hazard = handle_raw_hazard<true, true, false>;
254
static auto handle_vintrp_then_read_hazard = handle_raw_hazard<false, true, false>;
255
static auto handle_valu_salu_then_read_hazard = handle_raw_hazard<true, true, true>;
256
257
void
258
set_bitset_range(BITSET_WORD* words, unsigned start, unsigned size)
259
{
260
unsigned end = start + size - 1;
261
unsigned start_mod = start % BITSET_WORDBITS;
262
if (start_mod + size <= BITSET_WORDBITS) {
263
BITSET_SET_RANGE(words, start, end);
264
} else {
265
unsigned first_size = BITSET_WORDBITS - start_mod;
266
set_bitset_range(words, start, BITSET_WORDBITS - start_mod);
267
set_bitset_range(words, start + first_size, size - first_size);
268
}
269
}
270
271
bool
272
test_bitset_range(BITSET_WORD* words, unsigned start, unsigned size)
273
{
274
unsigned end = start + size - 1;
275
unsigned start_mod = start % BITSET_WORDBITS;
276
if (start_mod + size <= BITSET_WORDBITS) {
277
return BITSET_TEST_RANGE(words, start, end);
278
} else {
279
unsigned first_size = BITSET_WORDBITS - start_mod;
280
return test_bitset_range(words, start, BITSET_WORDBITS - start_mod) ||
281
test_bitset_range(words, start + first_size, size - first_size);
282
}
283
}
284
285
/* A SMEM clause is any group of consecutive SMEM instructions. The
286
* instructions in this group may return out of order and/or may be replayed.
287
*
288
* To fix this potential hazard correctly, we have to make sure that when a
289
* clause has more than one instruction, no instruction in the clause writes
290
* to a register that is read by another instruction in the clause (including
291
* itself). In this case, we have to break the SMEM clause by inserting non
292
* SMEM instructions.
293
*
294
* SMEM clauses are only present on GFX8+, and only matter when XNACK is set.
295
*/
296
void
297
handle_smem_clause_hazards(Program* program, NOP_ctx_gfx6& ctx, aco_ptr<Instruction>& instr,
298
int* NOPs)
299
{
300
/* break off from previous SMEM clause if needed */
301
if (!*NOPs & (ctx.smem_clause || ctx.smem_write)) {
302
/* Don't allow clauses with store instructions since the clause's
303
* instructions may use the same address. */
304
if (ctx.smem_write || instr->definitions.empty() ||
305
instr_info.is_atomic[(unsigned)instr->opcode]) {
306
*NOPs = 1;
307
} else if (program->dev.xnack_enabled) {
308
for (Operand op : instr->operands) {
309
if (!op.isConstant() &&
310
test_bitset_range(ctx.smem_clause_write, op.physReg(), op.size())) {
311
*NOPs = 1;
312
break;
313
}
314
}
315
316
Definition def = instr->definitions[0];
317
if (!*NOPs && test_bitset_range(ctx.smem_clause_read_write, def.physReg(), def.size()))
318
*NOPs = 1;
319
}
320
}
321
}
322
323
/* TODO: we don't handle accessing VCC using the actual SGPR instead of using the alias */
324
void
325
handle_instruction_gfx6(Program* program, Block* cur_block, NOP_ctx_gfx6& ctx,
326
aco_ptr<Instruction>& instr,
327
std::vector<aco_ptr<Instruction>>& new_instructions)
328
{
329
/* check hazards */
330
int NOPs = 0;
331
332
if (instr->isSMEM()) {
333
if (program->chip_class == GFX6) {
334
/* A read of an SGPR by SMRD instruction requires 4 wait states
335
* when the SGPR was written by a VALU instruction. According to LLVM,
336
* there is also an undocumented hardware behavior when the buffer
337
* descriptor is written by a SALU instruction */
338
for (unsigned i = 0; i < instr->operands.size(); i++) {
339
Operand op = instr->operands[i];
340
if (op.isConstant())
341
continue;
342
343
bool is_buffer_desc = i == 0 && op.size() > 2;
344
if (is_buffer_desc)
345
handle_valu_salu_then_read_hazard(program, cur_block, &NOPs, 4, op);
346
else
347
handle_valu_then_read_hazard(program, cur_block, &NOPs, 4, op);
348
}
349
}
350
351
handle_smem_clause_hazards(program, ctx, instr, &NOPs);
352
} else if (instr->isSALU()) {
353
if (instr->opcode == aco_opcode::s_setreg_b32 ||
354
instr->opcode == aco_opcode::s_setreg_imm32_b32 ||
355
instr->opcode == aco_opcode::s_getreg_b32) {
356
NOPs = MAX2(NOPs, ctx.setreg_then_getsetreg);
357
}
358
359
if (program->chip_class == GFX9) {
360
if (instr->opcode == aco_opcode::s_movrels_b32 ||
361
instr->opcode == aco_opcode::s_movrels_b64 ||
362
instr->opcode == aco_opcode::s_movreld_b32 ||
363
instr->opcode == aco_opcode::s_movreld_b64) {
364
NOPs = MAX2(NOPs, ctx.salu_wr_m0_then_moverel);
365
}
366
}
367
368
if (instr->opcode == aco_opcode::s_sendmsg || instr->opcode == aco_opcode::s_ttracedata)
369
NOPs = MAX2(NOPs, ctx.salu_wr_m0_then_gds_msg_ttrace);
370
} else if (instr->isDS() && instr->ds().gds) {
371
NOPs = MAX2(NOPs, ctx.salu_wr_m0_then_gds_msg_ttrace);
372
} else if (instr->isVALU() || instr->isVINTRP()) {
373
for (Operand op : instr->operands) {
374
if (op.physReg() == vccz)
375
NOPs = MAX2(NOPs, ctx.valu_wr_vcc_then_vccz);
376
if (op.physReg() == execz)
377
NOPs = MAX2(NOPs, ctx.valu_wr_exec_then_execz);
378
}
379
380
if (instr->isDPP()) {
381
NOPs = MAX2(NOPs, ctx.valu_wr_exec_then_dpp);
382
handle_valu_then_read_hazard(program, cur_block, &NOPs, 2, instr->operands[0]);
383
}
384
385
for (Definition def : instr->definitions) {
386
if (def.regClass().type() != RegType::sgpr) {
387
for (unsigned i = 0; i < def.size(); i++)
388
NOPs = MAX2(NOPs, ctx.vmem_store_then_wr_data[(def.physReg() & 0xff) + i]);
389
}
390
}
391
392
if ((instr->opcode == aco_opcode::v_readlane_b32 ||
393
instr->opcode == aco_opcode::v_readlane_b32_e64 ||
394
instr->opcode == aco_opcode::v_writelane_b32 ||
395
instr->opcode == aco_opcode::v_writelane_b32_e64) &&
396
!instr->operands[1].isConstant()) {
397
handle_valu_then_read_hazard(program, cur_block, &NOPs, 4, instr->operands[1]);
398
}
399
400
/* It's required to insert 1 wait state if the dst VGPR of any v_interp_*
401
* is followed by a read with v_readfirstlane or v_readlane to fix GPU
402
* hangs on GFX6. Note that v_writelane_* is apparently not affected.
403
* This hazard isn't documented anywhere but AMD confirmed that hazard.
404
*/
405
if (program->chip_class == GFX6 &&
406
(instr->opcode == aco_opcode::v_readlane_b32 || /* GFX6 doesn't have v_readlane_b32_e64 */
407
instr->opcode == aco_opcode::v_readfirstlane_b32)) {
408
handle_vintrp_then_read_hazard(program, cur_block, &NOPs, 1, instr->operands[0]);
409
}
410
411
if (instr->opcode == aco_opcode::v_div_fmas_f32 ||
412
instr->opcode == aco_opcode::v_div_fmas_f64)
413
NOPs = MAX2(NOPs, ctx.valu_wr_vcc_then_div_fmas);
414
} else if (instr->isVMEM() || instr->isFlatLike()) {
415
/* If the VALU writes the SGPR that is used by a VMEM, the user must add five wait states. */
416
for (Operand op : instr->operands) {
417
if (!op.isConstant() && !op.isUndefined() && op.regClass().type() == RegType::sgpr)
418
handle_valu_then_read_hazard(program, cur_block, &NOPs, 5, op);
419
}
420
}
421
422
if (!instr->isSALU() && instr->format != Format::SMEM)
423
NOPs = MAX2(NOPs, ctx.set_vskip_mode_then_vector);
424
425
if (program->chip_class == GFX9) {
426
bool lds_scratch_global = (instr->isScratch() || instr->isGlobal()) && instr->flatlike().lds;
427
if (instr->isVINTRP() || lds_scratch_global ||
428
instr->opcode == aco_opcode::ds_read_addtid_b32 ||
429
instr->opcode == aco_opcode::ds_write_addtid_b32 ||
430
instr->opcode == aco_opcode::buffer_store_lds_dword) {
431
NOPs = MAX2(NOPs, ctx.salu_wr_m0_then_lds);
432
}
433
}
434
435
ctx.add_wait_states(NOPs + get_wait_states(instr));
436
437
// TODO: try to schedule the NOP-causing instruction up to reduce the number of stall cycles
438
if (NOPs) {
439
/* create NOP */
440
aco_ptr<SOPP_instruction> nop{
441
create_instruction<SOPP_instruction>(aco_opcode::s_nop, Format::SOPP, 0, 0)};
442
nop->imm = NOPs - 1;
443
nop->block = -1;
444
new_instructions.emplace_back(std::move(nop));
445
}
446
447
/* update information to check for later hazards */
448
if ((ctx.smem_clause || ctx.smem_write) && (NOPs || instr->format != Format::SMEM)) {
449
ctx.smem_clause = false;
450
ctx.smem_write = false;
451
452
if (program->dev.xnack_enabled) {
453
BITSET_ZERO(ctx.smem_clause_read_write);
454
BITSET_ZERO(ctx.smem_clause_write);
455
}
456
}
457
458
if (instr->isSMEM()) {
459
if (instr->definitions.empty() || instr_info.is_atomic[(unsigned)instr->opcode]) {
460
ctx.smem_write = true;
461
} else {
462
ctx.smem_clause = true;
463
464
if (program->dev.xnack_enabled) {
465
for (Operand op : instr->operands) {
466
if (!op.isConstant()) {
467
set_bitset_range(ctx.smem_clause_read_write, op.physReg(), op.size());
468
}
469
}
470
471
Definition def = instr->definitions[0];
472
set_bitset_range(ctx.smem_clause_read_write, def.physReg(), def.size());
473
set_bitset_range(ctx.smem_clause_write, def.physReg(), def.size());
474
}
475
}
476
} else if (instr->isVALU()) {
477
for (Definition def : instr->definitions) {
478
if (def.regClass().type() == RegType::sgpr) {
479
if (def.physReg() == vcc || def.physReg() == vcc_hi) {
480
ctx.valu_wr_vcc_then_vccz = 5;
481
ctx.valu_wr_vcc_then_div_fmas = 4;
482
}
483
if (def.physReg() == exec || def.physReg() == exec_hi) {
484
ctx.valu_wr_exec_then_execz = 5;
485
ctx.valu_wr_exec_then_dpp = 5;
486
}
487
}
488
}
489
} else if (instr->isSALU() && !instr->definitions.empty()) {
490
if (!instr->definitions.empty()) {
491
/* all other definitions should be SCC */
492
Definition def = instr->definitions[0];
493
if (def.physReg() == m0) {
494
ctx.salu_wr_m0_then_gds_msg_ttrace = 1;
495
ctx.salu_wr_m0_then_lds = 1;
496
ctx.salu_wr_m0_then_moverel = 1;
497
}
498
} else if (instr->opcode == aco_opcode::s_setreg_b32 ||
499
instr->opcode == aco_opcode::s_setreg_imm32_b32) {
500
SOPK_instruction& sopk = instr->sopk();
501
unsigned offset = (sopk.imm >> 6) & 0x1f;
502
unsigned size = ((sopk.imm >> 11) & 0x1f) + 1;
503
unsigned reg = sopk.imm & 0x3f;
504
ctx.setreg_then_getsetreg = 2;
505
506
if (reg == 1 && offset >= 28 && size > (28 - offset))
507
ctx.set_vskip_mode_then_vector = 2;
508
}
509
} else if (instr->isVMEM() || instr->isFlatLike()) {
510
/* >64-bit MUBUF/MTBUF store with a constant in SOFFSET */
511
bool consider_buf = (instr->isMUBUF() || instr->isMTBUF()) && instr->operands.size() == 4 &&
512
instr->operands[3].size() > 2 && instr->operands[2].physReg() >= 128;
513
/* MIMG store with a 128-bit T# with more than two bits set in dmask (making it a >64-bit
514
* store) */
515
bool consider_mimg = instr->isMIMG() &&
516
instr->operands[1].regClass().type() == RegType::vgpr &&
517
instr->operands[1].size() > 2 && instr->operands[0].size() == 4;
518
/* FLAT/GLOBAL/SCRATCH store with >64-bit data */
519
bool consider_flat =
520
instr->isFlatLike() && instr->operands.size() == 3 && instr->operands[2].size() > 2;
521
if (consider_buf || consider_mimg || consider_flat) {
522
PhysReg wrdata = instr->operands[consider_flat ? 2 : 3].physReg();
523
unsigned size = instr->operands[consider_flat ? 2 : 3].size();
524
for (unsigned i = 0; i < size; i++)
525
ctx.vmem_store_then_wr_data[(wrdata & 0xff) + i] = 1;
526
}
527
}
528
}
529
530
template <std::size_t N>
531
bool
532
check_written_regs(const aco_ptr<Instruction>& instr, const std::bitset<N>& check_regs)
533
{
534
return std::any_of(instr->definitions.begin(), instr->definitions.end(),
535
[&check_regs](const Definition& def) -> bool
536
{
537
bool writes_any = false;
538
for (unsigned i = 0; i < def.size(); i++) {
539
unsigned def_reg = def.physReg() + i;
540
writes_any |= def_reg < check_regs.size() && check_regs[def_reg];
541
}
542
return writes_any;
543
});
544
}
545
546
template <std::size_t N>
547
void
548
mark_read_regs(const aco_ptr<Instruction>& instr, std::bitset<N>& reg_reads)
549
{
550
for (const Operand& op : instr->operands) {
551
for (unsigned i = 0; i < op.size(); i++) {
552
unsigned reg = op.physReg() + i;
553
if (reg < reg_reads.size())
554
reg_reads.set(reg);
555
}
556
}
557
}
558
559
bool
560
VALU_writes_sgpr(aco_ptr<Instruction>& instr)
561
{
562
if (instr->isVOPC())
563
return true;
564
if (instr->isVOP3() && instr->definitions.size() == 2)
565
return true;
566
if (instr->opcode == aco_opcode::v_readfirstlane_b32 ||
567
instr->opcode == aco_opcode::v_readlane_b32 ||
568
instr->opcode == aco_opcode::v_readlane_b32_e64)
569
return true;
570
return false;
571
}
572
573
bool
574
instr_writes_exec(const aco_ptr<Instruction>& instr)
575
{
576
return std::any_of(instr->definitions.begin(), instr->definitions.end(),
577
[](const Definition& def) -> bool
578
{ return def.physReg() == exec_lo || def.physReg() == exec_hi; });
579
}
580
581
bool
582
instr_writes_sgpr(const aco_ptr<Instruction>& instr)
583
{
584
return std::any_of(instr->definitions.begin(), instr->definitions.end(),
585
[](const Definition& def) -> bool
586
{ return def.getTemp().type() == RegType::sgpr; });
587
}
588
589
inline bool
590
instr_is_branch(const aco_ptr<Instruction>& instr)
591
{
592
return instr->opcode == aco_opcode::s_branch || instr->opcode == aco_opcode::s_cbranch_scc0 ||
593
instr->opcode == aco_opcode::s_cbranch_scc1 ||
594
instr->opcode == aco_opcode::s_cbranch_vccz ||
595
instr->opcode == aco_opcode::s_cbranch_vccnz ||
596
instr->opcode == aco_opcode::s_cbranch_execz ||
597
instr->opcode == aco_opcode::s_cbranch_execnz ||
598
instr->opcode == aco_opcode::s_cbranch_cdbgsys ||
599
instr->opcode == aco_opcode::s_cbranch_cdbguser ||
600
instr->opcode == aco_opcode::s_cbranch_cdbgsys_or_user ||
601
instr->opcode == aco_opcode::s_cbranch_cdbgsys_and_user ||
602
instr->opcode == aco_opcode::s_subvector_loop_begin ||
603
instr->opcode == aco_opcode::s_subvector_loop_end ||
604
instr->opcode == aco_opcode::s_setpc_b64 || instr->opcode == aco_opcode::s_swappc_b64 ||
605
instr->opcode == aco_opcode::s_getpc_b64 || instr->opcode == aco_opcode::s_call_b64;
606
}
607
608
void
609
handle_instruction_gfx10(Program* program, Block* cur_block, NOP_ctx_gfx10& ctx,
610
aco_ptr<Instruction>& instr,
611
std::vector<aco_ptr<Instruction>>& new_instructions)
612
{
613
// TODO: s_dcache_inv needs to be in it's own group on GFX10
614
615
/* VMEMtoScalarWriteHazard
616
* Handle EXEC/M0/SGPR write following a VMEM instruction without a VALU or "waitcnt vmcnt(0)"
617
* in-between.
618
*/
619
if (instr->isVMEM() || instr->isFlatLike() || instr->isDS()) {
620
/* Remember all SGPRs that are read by the VMEM instruction */
621
mark_read_regs(instr, ctx.sgprs_read_by_VMEM);
622
ctx.sgprs_read_by_VMEM.set(exec);
623
if (program->wave_size == 64)
624
ctx.sgprs_read_by_VMEM.set(exec_hi);
625
} else if (instr->isSALU() || instr->isSMEM()) {
626
if (instr->opcode == aco_opcode::s_waitcnt) {
627
/* Hazard is mitigated by "s_waitcnt vmcnt(0)" */
628
uint16_t imm = instr->sopp().imm;
629
unsigned vmcnt = (imm & 0xF) | ((imm & (0x3 << 14)) >> 10);
630
if (vmcnt == 0)
631
ctx.sgprs_read_by_VMEM.reset();
632
} else if (instr->opcode == aco_opcode::s_waitcnt_depctr) {
633
/* Hazard is mitigated by a s_waitcnt_depctr with a magic imm */
634
if (instr->sopp().imm == 0xffe3)
635
ctx.sgprs_read_by_VMEM.reset();
636
}
637
638
/* Check if SALU writes an SGPR that was previously read by the VALU */
639
if (check_written_regs(instr, ctx.sgprs_read_by_VMEM)) {
640
ctx.sgprs_read_by_VMEM.reset();
641
642
/* Insert s_waitcnt_depctr instruction with magic imm to mitigate the problem */
643
aco_ptr<SOPP_instruction> depctr{
644
create_instruction<SOPP_instruction>(aco_opcode::s_waitcnt_depctr, Format::SOPP, 0, 0)};
645
depctr->imm = 0xffe3;
646
depctr->block = -1;
647
new_instructions.emplace_back(std::move(depctr));
648
}
649
} else if (instr->isVALU()) {
650
/* Hazard is mitigated by any VALU instruction */
651
ctx.sgprs_read_by_VMEM.reset();
652
}
653
654
/* VcmpxPermlaneHazard
655
* Handle any permlane following a VOPC instruction, insert v_mov between them.
656
*/
657
if (instr->isVOPC()) {
658
ctx.has_VOPC = true;
659
} else if (ctx.has_VOPC && (instr->opcode == aco_opcode::v_permlane16_b32 ||
660
instr->opcode == aco_opcode::v_permlanex16_b32)) {
661
ctx.has_VOPC = false;
662
663
/* v_nop would be discarded by SQ, so use v_mov with the first operand of the permlane */
664
aco_ptr<VOP1_instruction> v_mov{
665
create_instruction<VOP1_instruction>(aco_opcode::v_mov_b32, Format::VOP1, 1, 1)};
666
v_mov->definitions[0] = Definition(instr->operands[0].physReg(), v1);
667
v_mov->operands[0] = Operand(instr->operands[0].physReg(), v1);
668
new_instructions.emplace_back(std::move(v_mov));
669
} else if (instr->isVALU() && instr->opcode != aco_opcode::v_nop) {
670
ctx.has_VOPC = false;
671
}
672
673
/* VcmpxExecWARHazard
674
* Handle any VALU instruction writing the exec mask after it was read by a non-VALU instruction.
675
*/
676
if (!instr->isVALU() && instr->reads_exec()) {
677
ctx.has_nonVALU_exec_read = true;
678
} else if (instr->isVALU()) {
679
if (instr_writes_exec(instr)) {
680
ctx.has_nonVALU_exec_read = false;
681
682
/* Insert s_waitcnt_depctr instruction with magic imm to mitigate the problem */
683
aco_ptr<SOPP_instruction> depctr{
684
create_instruction<SOPP_instruction>(aco_opcode::s_waitcnt_depctr, Format::SOPP, 0, 0)};
685
depctr->imm = 0xfffe;
686
depctr->block = -1;
687
new_instructions.emplace_back(std::move(depctr));
688
} else if (instr_writes_sgpr(instr)) {
689
/* Any VALU instruction that writes an SGPR mitigates the problem */
690
ctx.has_nonVALU_exec_read = false;
691
}
692
} else if (instr->opcode == aco_opcode::s_waitcnt_depctr) {
693
/* s_waitcnt_depctr can mitigate the problem if it has a magic imm */
694
if ((instr->sopp().imm & 0xfffe) == 0xfffe)
695
ctx.has_nonVALU_exec_read = false;
696
}
697
698
/* SMEMtoVectorWriteHazard
699
* Handle any VALU instruction writing an SGPR after an SMEM reads it.
700
*/
701
if (instr->isSMEM()) {
702
/* Remember all SGPRs that are read by the SMEM instruction */
703
mark_read_regs(instr, ctx.sgprs_read_by_SMEM);
704
} else if (VALU_writes_sgpr(instr)) {
705
/* Check if VALU writes an SGPR that was previously read by SMEM */
706
if (check_written_regs(instr, ctx.sgprs_read_by_SMEM)) {
707
ctx.sgprs_read_by_SMEM.reset();
708
709
/* Insert s_mov to mitigate the problem */
710
aco_ptr<SOP1_instruction> s_mov{
711
create_instruction<SOP1_instruction>(aco_opcode::s_mov_b32, Format::SOP1, 1, 1)};
712
s_mov->definitions[0] = Definition(sgpr_null, s1);
713
s_mov->operands[0] = Operand::zero();
714
new_instructions.emplace_back(std::move(s_mov));
715
}
716
} else if (instr->isSALU()) {
717
if (instr->format != Format::SOPP) {
718
/* SALU can mitigate the hazard */
719
ctx.sgprs_read_by_SMEM.reset();
720
} else {
721
/* Reducing lgkmcnt count to 0 always mitigates the hazard. */
722
const SOPP_instruction& sopp = instr->sopp();
723
if (sopp.opcode == aco_opcode::s_waitcnt_lgkmcnt) {
724
if (sopp.imm == 0 && sopp.definitions[0].physReg() == sgpr_null)
725
ctx.sgprs_read_by_SMEM.reset();
726
} else if (sopp.opcode == aco_opcode::s_waitcnt) {
727
unsigned lgkm = (sopp.imm >> 8) & 0x3f;
728
if (lgkm == 0)
729
ctx.sgprs_read_by_SMEM.reset();
730
}
731
}
732
}
733
734
/* LdsBranchVmemWARHazard
735
* Handle VMEM/GLOBAL/SCRATCH->branch->DS and DS->branch->VMEM/GLOBAL/SCRATCH patterns.
736
*/
737
if (instr->isVMEM() || instr->isGlobal() || instr->isScratch()) {
738
ctx.has_VMEM = true;
739
ctx.has_branch_after_VMEM = false;
740
/* Mitigation for DS is needed only if there was already a branch after */
741
ctx.has_DS = ctx.has_branch_after_DS;
742
} else if (instr->isDS()) {
743
ctx.has_DS = true;
744
ctx.has_branch_after_DS = false;
745
/* Mitigation for VMEM is needed only if there was already a branch after */
746
ctx.has_VMEM = ctx.has_branch_after_VMEM;
747
} else if (instr_is_branch(instr)) {
748
ctx.has_branch_after_VMEM = ctx.has_VMEM;
749
ctx.has_branch_after_DS = ctx.has_DS;
750
} else if (instr->opcode == aco_opcode::s_waitcnt_vscnt) {
751
/* Only s_waitcnt_vscnt can mitigate the hazard */
752
const SOPK_instruction& sopk = instr->sopk();
753
if (sopk.definitions[0].physReg() == sgpr_null && sopk.imm == 0)
754
ctx.has_VMEM = ctx.has_branch_after_VMEM = ctx.has_DS = ctx.has_branch_after_DS = false;
755
}
756
if ((ctx.has_VMEM && ctx.has_branch_after_DS) || (ctx.has_DS && ctx.has_branch_after_VMEM)) {
757
ctx.has_VMEM = ctx.has_branch_after_VMEM = ctx.has_DS = ctx.has_branch_after_DS = false;
758
759
/* Insert s_waitcnt_vscnt to mitigate the problem */
760
aco_ptr<SOPK_instruction> wait{
761
create_instruction<SOPK_instruction>(aco_opcode::s_waitcnt_vscnt, Format::SOPK, 0, 1)};
762
wait->definitions[0] = Definition(sgpr_null, s1);
763
wait->imm = 0;
764
new_instructions.emplace_back(std::move(wait));
765
}
766
767
/* NSAToVMEMBug
768
* Handles NSA MIMG (4 or more dwords) immediately followed by MUBUF/MTBUF (with offset[2:1] !=
769
* 0).
770
*/
771
if (instr->isMIMG() && get_mimg_nsa_dwords(instr.get()) > 1) {
772
ctx.has_NSA_MIMG = true;
773
} else if (ctx.has_NSA_MIMG) {
774
ctx.has_NSA_MIMG = false;
775
776
if (instr->isMUBUF() || instr->isMTBUF()) {
777
uint32_t offset = instr->isMUBUF() ? instr->mubuf().offset : instr->mtbuf().offset;
778
if (offset & 6)
779
Builder(program, &new_instructions).sopp(aco_opcode::s_nop, -1, 0);
780
}
781
}
782
783
/* waNsaCannotFollowWritelane
784
* Handles NSA MIMG immediately following a v_writelane_b32.
785
*/
786
if (instr->opcode == aco_opcode::v_writelane_b32_e64) {
787
ctx.has_writelane = true;
788
} else if (ctx.has_writelane) {
789
ctx.has_writelane = false;
790
if (instr->isMIMG() && get_mimg_nsa_dwords(instr.get()) > 0)
791
Builder(program, &new_instructions).sopp(aco_opcode::s_nop, -1, 0);
792
}
793
}
794
795
template <typename Ctx>
796
using HandleInstr = void (*)(Program*, Block* block, Ctx&, aco_ptr<Instruction>&,
797
std::vector<aco_ptr<Instruction>>&);
798
799
template <typename Ctx, HandleInstr<Ctx> Handle>
800
void
801
handle_block(Program* program, Ctx& ctx, Block& block)
802
{
803
if (block.instructions.empty())
804
return;
805
806
std::vector<aco_ptr<Instruction>> old_instructions = std::move(block.instructions);
807
808
block.instructions.clear(); // Silence clang-analyzer-cplusplus.Move warning
809
block.instructions.reserve(old_instructions.size());
810
811
for (aco_ptr<Instruction>& instr : old_instructions) {
812
Handle(program, &block, ctx, instr, block.instructions);
813
block.instructions.emplace_back(std::move(instr));
814
}
815
}
816
817
template <typename Ctx, HandleInstr<Ctx> Handle>
818
void
819
mitigate_hazards(Program* program)
820
{
821
std::vector<Ctx> all_ctx(program->blocks.size());
822
std::stack<unsigned> loop_header_indices;
823
824
for (unsigned i = 0; i < program->blocks.size(); i++) {
825
Block& block = program->blocks[i];
826
Ctx& ctx = all_ctx[i];
827
828
if (block.kind & block_kind_loop_header) {
829
loop_header_indices.push(i);
830
} else if (block.kind & block_kind_loop_exit) {
831
/* Go through the whole loop again */
832
for (unsigned idx = loop_header_indices.top(); idx < i; idx++) {
833
Ctx loop_block_ctx;
834
for (unsigned b : program->blocks[idx].linear_preds)
835
loop_block_ctx.join(all_ctx[b]);
836
837
handle_block<Ctx, Handle>(program, loop_block_ctx, program->blocks[idx]);
838
839
/* We only need to continue if the loop header context changed */
840
if (idx == loop_header_indices.top() && loop_block_ctx == all_ctx[idx])
841
break;
842
843
all_ctx[idx] = loop_block_ctx;
844
}
845
846
loop_header_indices.pop();
847
}
848
849
for (unsigned b : block.linear_preds)
850
ctx.join(all_ctx[b]);
851
852
handle_block<Ctx, Handle>(program, ctx, block);
853
}
854
}
855
856
} /* end namespace */
857
858
void
859
insert_NOPs(Program* program)
860
{
861
if (program->chip_class >= GFX10_3)
862
; /* no hazards/bugs to mitigate */
863
else if (program->chip_class >= GFX10)
864
mitigate_hazards<NOP_ctx_gfx10, handle_instruction_gfx10>(program);
865
else
866
mitigate_hazards<NOP_ctx_gfx6, handle_instruction_gfx6>(program);
867
}
868
869
} // namespace aco
870
871