Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/amd/compiler/aco_lower_to_hw_instr.cpp
4550 views
1
/*
2
* Copyright © 2018 Valve Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*
23
*/
24
25
#include "aco_builder.h"
26
#include "aco_ir.h"
27
28
#include "common/sid.h"
29
30
#include <map>
31
#include <vector>
32
33
namespace aco {
34
35
struct lower_context {
36
Program* program;
37
Block* block;
38
std::vector<aco_ptr<Instruction>> instructions;
39
};
40
41
/* used by handle_operands() indirectly through Builder::copy */
42
uint8_t int8_mul_table[512] = {
43
0, 20, 1, 1, 1, 2, 1, 3, 1, 4, 1, 5, 1, 6, 1, 7, 1, 8, 1, 9,
44
1, 10, 1, 11, 1, 12, 1, 13, 1, 14, 1, 15, 1, 16, 1, 17, 1, 18, 1, 19,
45
1, 20, 1, 21, 1, 22, 1, 23, 1, 24, 1, 25, 1, 26, 1, 27, 1, 28, 1, 29,
46
1, 30, 1, 31, 1, 32, 1, 33, 1, 34, 1, 35, 1, 36, 1, 37, 1, 38, 1, 39,
47
1, 40, 1, 41, 1, 42, 1, 43, 1, 44, 1, 45, 1, 46, 1, 47, 1, 48, 1, 49,
48
1, 50, 1, 51, 1, 52, 1, 53, 1, 54, 1, 55, 1, 56, 1, 57, 1, 58, 1, 59,
49
1, 60, 1, 61, 1, 62, 1, 63, 1, 64, 5, 13, 2, 33, 17, 19, 2, 34, 3, 23,
50
2, 35, 11, 53, 2, 36, 7, 47, 2, 37, 3, 25, 2, 38, 7, 11, 2, 39, 53, 243,
51
2, 40, 3, 27, 2, 41, 17, 35, 2, 42, 5, 17, 2, 43, 3, 29, 2, 44, 15, 23,
52
2, 45, 7, 13, 2, 46, 3, 31, 2, 47, 5, 19, 2, 48, 19, 59, 2, 49, 3, 33,
53
2, 50, 7, 51, 2, 51, 15, 41, 2, 52, 3, 35, 2, 53, 11, 33, 2, 54, 23, 27,
54
2, 55, 3, 37, 2, 56, 9, 41, 2, 57, 5, 23, 2, 58, 3, 39, 2, 59, 7, 17,
55
2, 60, 9, 241, 2, 61, 3, 41, 2, 62, 5, 25, 2, 63, 35, 245, 2, 64, 3, 43,
56
5, 26, 9, 43, 3, 44, 7, 19, 10, 39, 3, 45, 4, 34, 11, 59, 3, 46, 9, 243,
57
4, 35, 3, 47, 22, 53, 7, 57, 3, 48, 5, 29, 10, 245, 3, 49, 4, 37, 9, 45,
58
3, 50, 7, 241, 4, 38, 3, 51, 7, 22, 5, 31, 3, 52, 7, 59, 7, 242, 3, 53,
59
4, 40, 7, 23, 3, 54, 15, 45, 4, 41, 3, 55, 6, 241, 9, 47, 3, 56, 13, 13,
60
5, 34, 3, 57, 4, 43, 11, 39, 3, 58, 5, 35, 4, 44, 3, 59, 6, 243, 7, 245,
61
3, 60, 5, 241, 7, 26, 3, 61, 4, 46, 5, 37, 3, 62, 11, 17, 4, 47, 3, 63,
62
5, 38, 5, 243, 3, 64, 7, 247, 9, 50, 5, 39, 4, 241, 33, 37, 6, 33, 13, 35,
63
4, 242, 5, 245, 6, 247, 7, 29, 4, 51, 5, 41, 5, 246, 7, 249, 3, 240, 11, 19,
64
5, 42, 3, 241, 4, 245, 25, 29, 3, 242, 5, 43, 4, 246, 3, 243, 17, 58, 17, 43,
65
3, 244, 5, 249, 6, 37, 3, 245, 2, 240, 5, 45, 2, 241, 21, 23, 2, 242, 3, 247,
66
2, 243, 5, 251, 2, 244, 29, 61, 2, 245, 3, 249, 2, 246, 17, 29, 2, 247, 9, 55,
67
1, 240, 1, 241, 1, 242, 1, 243, 1, 244, 1, 245, 1, 246, 1, 247, 1, 248, 1, 249,
68
1, 250, 1, 251, 1, 252, 1, 253, 1, 254, 1, 255};
69
70
aco_opcode
71
get_reduce_opcode(chip_class chip, ReduceOp op)
72
{
73
/* Because some 16-bit instructions are already VOP3 on GFX10, we use the
74
* 32-bit opcodes (VOP2) which allows to remove the tempory VGPR and to use
75
* DPP with the arithmetic instructions. This requires to sign-extend.
76
*/
77
switch (op) {
78
case iadd8:
79
case iadd16:
80
if (chip >= GFX10) {
81
return aco_opcode::v_add_u32;
82
} else if (chip >= GFX8) {
83
return aco_opcode::v_add_u16;
84
} else {
85
return aco_opcode::v_add_co_u32;
86
}
87
break;
88
case imul8:
89
case imul16:
90
if (chip >= GFX10) {
91
return aco_opcode::v_mul_lo_u16_e64;
92
} else if (chip >= GFX8) {
93
return aco_opcode::v_mul_lo_u16;
94
} else {
95
return aco_opcode::v_mul_u32_u24;
96
}
97
break;
98
case fadd16: return aco_opcode::v_add_f16;
99
case fmul16: return aco_opcode::v_mul_f16;
100
case imax8:
101
case imax16:
102
if (chip >= GFX10) {
103
return aco_opcode::v_max_i32;
104
} else if (chip >= GFX8) {
105
return aco_opcode::v_max_i16;
106
} else {
107
return aco_opcode::v_max_i32;
108
}
109
break;
110
case imin8:
111
case imin16:
112
if (chip >= GFX10) {
113
return aco_opcode::v_min_i32;
114
} else if (chip >= GFX8) {
115
return aco_opcode::v_min_i16;
116
} else {
117
return aco_opcode::v_min_i32;
118
}
119
break;
120
case umin8:
121
case umin16:
122
if (chip >= GFX10) {
123
return aco_opcode::v_min_u32;
124
} else if (chip >= GFX8) {
125
return aco_opcode::v_min_u16;
126
} else {
127
return aco_opcode::v_min_u32;
128
}
129
break;
130
case umax8:
131
case umax16:
132
if (chip >= GFX10) {
133
return aco_opcode::v_max_u32;
134
} else if (chip >= GFX8) {
135
return aco_opcode::v_max_u16;
136
} else {
137
return aco_opcode::v_max_u32;
138
}
139
break;
140
case fmin16: return aco_opcode::v_min_f16;
141
case fmax16: return aco_opcode::v_max_f16;
142
case iadd32: return chip >= GFX9 ? aco_opcode::v_add_u32 : aco_opcode::v_add_co_u32;
143
case imul32: return aco_opcode::v_mul_lo_u32;
144
case fadd32: return aco_opcode::v_add_f32;
145
case fmul32: return aco_opcode::v_mul_f32;
146
case imax32: return aco_opcode::v_max_i32;
147
case imin32: return aco_opcode::v_min_i32;
148
case umin32: return aco_opcode::v_min_u32;
149
case umax32: return aco_opcode::v_max_u32;
150
case fmin32: return aco_opcode::v_min_f32;
151
case fmax32: return aco_opcode::v_max_f32;
152
case iand8:
153
case iand16:
154
case iand32: return aco_opcode::v_and_b32;
155
case ixor8:
156
case ixor16:
157
case ixor32: return aco_opcode::v_xor_b32;
158
case ior8:
159
case ior16:
160
case ior32: return aco_opcode::v_or_b32;
161
case iadd64: return aco_opcode::num_opcodes;
162
case imul64: return aco_opcode::num_opcodes;
163
case fadd64: return aco_opcode::v_add_f64;
164
case fmul64: return aco_opcode::v_mul_f64;
165
case imin64: return aco_opcode::num_opcodes;
166
case imax64: return aco_opcode::num_opcodes;
167
case umin64: return aco_opcode::num_opcodes;
168
case umax64: return aco_opcode::num_opcodes;
169
case fmin64: return aco_opcode::v_min_f64;
170
case fmax64: return aco_opcode::v_max_f64;
171
case iand64: return aco_opcode::num_opcodes;
172
case ior64: return aco_opcode::num_opcodes;
173
case ixor64: return aco_opcode::num_opcodes;
174
default: return aco_opcode::num_opcodes;
175
}
176
}
177
178
bool
179
is_vop3_reduce_opcode(aco_opcode opcode)
180
{
181
/* 64-bit reductions are VOP3. */
182
if (opcode == aco_opcode::num_opcodes)
183
return true;
184
185
return instr_info.format[(int)opcode] == Format::VOP3;
186
}
187
188
void
189
emit_vadd32(Builder& bld, Definition def, Operand src0, Operand src1)
190
{
191
Instruction* instr = bld.vadd32(def, src0, src1, false, Operand(s2), true);
192
if (instr->definitions.size() >= 2) {
193
assert(instr->definitions[1].regClass() == bld.lm);
194
instr->definitions[1].setFixed(vcc);
195
}
196
}
197
198
void
199
emit_int64_dpp_op(lower_context* ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg,
200
PhysReg vtmp_reg, ReduceOp op, unsigned dpp_ctrl, unsigned row_mask,
201
unsigned bank_mask, bool bound_ctrl, Operand* identity = NULL)
202
{
203
Builder bld(ctx->program, &ctx->instructions);
204
Definition dst[] = {Definition(dst_reg, v1), Definition(PhysReg{dst_reg + 1}, v1)};
205
Definition vtmp_def[] = {Definition(vtmp_reg, v1), Definition(PhysReg{vtmp_reg + 1}, v1)};
206
Operand src0[] = {Operand(src0_reg, v1), Operand(PhysReg{src0_reg + 1}, v1)};
207
Operand src1[] = {Operand(src1_reg, v1), Operand(PhysReg{src1_reg + 1}, v1)};
208
Operand src1_64 = Operand(src1_reg, v2);
209
Operand vtmp_op[] = {Operand(vtmp_reg, v1), Operand(PhysReg{vtmp_reg + 1}, v1)};
210
Operand vtmp_op64 = Operand(vtmp_reg, v2);
211
if (op == iadd64) {
212
if (ctx->program->chip_class >= GFX10) {
213
if (identity)
214
bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
215
bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0], dpp_ctrl, row_mask, bank_mask,
216
bound_ctrl);
217
bld.vop3(aco_opcode::v_add_co_u32_e64, dst[0], bld.def(bld.lm, vcc), vtmp_op[0], src1[0]);
218
} else {
219
bld.vop2_dpp(aco_opcode::v_add_co_u32, dst[0], bld.def(bld.lm, vcc), src0[0], src1[0],
220
dpp_ctrl, row_mask, bank_mask, bound_ctrl);
221
}
222
bld.vop2_dpp(aco_opcode::v_addc_co_u32, dst[1], bld.def(bld.lm, vcc), src0[1], src1[1],
223
Operand(vcc, bld.lm), dpp_ctrl, row_mask, bank_mask, bound_ctrl);
224
} else if (op == iand64) {
225
bld.vop2_dpp(aco_opcode::v_and_b32, dst[0], src0[0], src1[0], dpp_ctrl, row_mask, bank_mask,
226
bound_ctrl);
227
bld.vop2_dpp(aco_opcode::v_and_b32, dst[1], src0[1], src1[1], dpp_ctrl, row_mask, bank_mask,
228
bound_ctrl);
229
} else if (op == ior64) {
230
bld.vop2_dpp(aco_opcode::v_or_b32, dst[0], src0[0], src1[0], dpp_ctrl, row_mask, bank_mask,
231
bound_ctrl);
232
bld.vop2_dpp(aco_opcode::v_or_b32, dst[1], src0[1], src1[1], dpp_ctrl, row_mask, bank_mask,
233
bound_ctrl);
234
} else if (op == ixor64) {
235
bld.vop2_dpp(aco_opcode::v_xor_b32, dst[0], src0[0], src1[0], dpp_ctrl, row_mask, bank_mask,
236
bound_ctrl);
237
bld.vop2_dpp(aco_opcode::v_xor_b32, dst[1], src0[1], src1[1], dpp_ctrl, row_mask, bank_mask,
238
bound_ctrl);
239
} else if (op == umin64 || op == umax64 || op == imin64 || op == imax64) {
240
aco_opcode cmp = aco_opcode::num_opcodes;
241
switch (op) {
242
case umin64: cmp = aco_opcode::v_cmp_gt_u64; break;
243
case umax64: cmp = aco_opcode::v_cmp_lt_u64; break;
244
case imin64: cmp = aco_opcode::v_cmp_gt_i64; break;
245
case imax64: cmp = aco_opcode::v_cmp_lt_i64; break;
246
default: break;
247
}
248
249
if (identity) {
250
bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
251
bld.vop1(aco_opcode::v_mov_b32, vtmp_def[1], identity[1]);
252
}
253
bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0], dpp_ctrl, row_mask, bank_mask,
254
bound_ctrl);
255
bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[1], src0[1], dpp_ctrl, row_mask, bank_mask,
256
bound_ctrl);
257
258
bld.vopc(cmp, bld.def(bld.lm, vcc), vtmp_op64, src1_64);
259
bld.vop2(aco_opcode::v_cndmask_b32, dst[0], vtmp_op[0], src1[0], Operand(vcc, bld.lm));
260
bld.vop2(aco_opcode::v_cndmask_b32, dst[1], vtmp_op[1], src1[1], Operand(vcc, bld.lm));
261
} else if (op == imul64) {
262
/* t4 = dpp(x_hi)
263
* t1 = umul_lo(t4, y_lo)
264
* t3 = dpp(x_lo)
265
* t0 = umul_lo(t3, y_hi)
266
* t2 = iadd(t0, t1)
267
* t5 = umul_hi(t3, y_lo)
268
* res_hi = iadd(t2, t5)
269
* res_lo = umul_lo(t3, y_lo)
270
* Requires that res_hi != src0[0] and res_hi != src1[0]
271
* and that vtmp[0] != res_hi.
272
*/
273
if (identity)
274
bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[1]);
275
bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[1], dpp_ctrl, row_mask, bank_mask,
276
bound_ctrl);
277
bld.vop3(aco_opcode::v_mul_lo_u32, vtmp_def[1], vtmp_op[0], src1[0]);
278
if (identity)
279
bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
280
bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0], dpp_ctrl, row_mask, bank_mask,
281
bound_ctrl);
282
bld.vop3(aco_opcode::v_mul_lo_u32, vtmp_def[0], vtmp_op[0], src1[1]);
283
emit_vadd32(bld, vtmp_def[1], vtmp_op[0], vtmp_op[1]);
284
if (identity)
285
bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
286
bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0], dpp_ctrl, row_mask, bank_mask,
287
bound_ctrl);
288
bld.vop3(aco_opcode::v_mul_hi_u32, vtmp_def[0], vtmp_op[0], src1[0]);
289
emit_vadd32(bld, dst[1], vtmp_op[1], vtmp_op[0]);
290
if (identity)
291
bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
292
bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0], dpp_ctrl, row_mask, bank_mask,
293
bound_ctrl);
294
bld.vop3(aco_opcode::v_mul_lo_u32, dst[0], vtmp_op[0], src1[0]);
295
}
296
}
297
298
void
299
emit_int64_op(lower_context* ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg, PhysReg vtmp,
300
ReduceOp op)
301
{
302
Builder bld(ctx->program, &ctx->instructions);
303
Definition dst[] = {Definition(dst_reg, v1), Definition(PhysReg{dst_reg + 1}, v1)};
304
RegClass src0_rc = src0_reg.reg() >= 256 ? v1 : s1;
305
Operand src0[] = {Operand(src0_reg, src0_rc), Operand(PhysReg{src0_reg + 1}, src0_rc)};
306
Operand src1[] = {Operand(src1_reg, v1), Operand(PhysReg{src1_reg + 1}, v1)};
307
Operand src0_64 = Operand(src0_reg, src0_reg.reg() >= 256 ? v2 : s2);
308
Operand src1_64 = Operand(src1_reg, v2);
309
310
if (src0_rc == s1 &&
311
(op == imul64 || op == umin64 || op == umax64 || op == imin64 || op == imax64)) {
312
assert(vtmp.reg() != 0);
313
bld.vop1(aco_opcode::v_mov_b32, Definition(vtmp, v1), src0[0]);
314
bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp + 1}, v1), src0[1]);
315
src0_reg = vtmp;
316
src0[0] = Operand(vtmp, v1);
317
src0[1] = Operand(PhysReg{vtmp + 1}, v1);
318
src0_64 = Operand(vtmp, v2);
319
} else if (src0_rc == s1 && op == iadd64) {
320
assert(vtmp.reg() != 0);
321
bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp + 1}, v1), src0[1]);
322
src0[1] = Operand(PhysReg{vtmp + 1}, v1);
323
}
324
325
if (op == iadd64) {
326
if (ctx->program->chip_class >= GFX10) {
327
bld.vop3(aco_opcode::v_add_co_u32_e64, dst[0], bld.def(bld.lm, vcc), src0[0], src1[0]);
328
} else {
329
bld.vop2(aco_opcode::v_add_co_u32, dst[0], bld.def(bld.lm, vcc), src0[0], src1[0]);
330
}
331
bld.vop2(aco_opcode::v_addc_co_u32, dst[1], bld.def(bld.lm, vcc), src0[1], src1[1],
332
Operand(vcc, bld.lm));
333
} else if (op == iand64) {
334
bld.vop2(aco_opcode::v_and_b32, dst[0], src0[0], src1[0]);
335
bld.vop2(aco_opcode::v_and_b32, dst[1], src0[1], src1[1]);
336
} else if (op == ior64) {
337
bld.vop2(aco_opcode::v_or_b32, dst[0], src0[0], src1[0]);
338
bld.vop2(aco_opcode::v_or_b32, dst[1], src0[1], src1[1]);
339
} else if (op == ixor64) {
340
bld.vop2(aco_opcode::v_xor_b32, dst[0], src0[0], src1[0]);
341
bld.vop2(aco_opcode::v_xor_b32, dst[1], src0[1], src1[1]);
342
} else if (op == umin64 || op == umax64 || op == imin64 || op == imax64) {
343
aco_opcode cmp = aco_opcode::num_opcodes;
344
switch (op) {
345
case umin64: cmp = aco_opcode::v_cmp_gt_u64; break;
346
case umax64: cmp = aco_opcode::v_cmp_lt_u64; break;
347
case imin64: cmp = aco_opcode::v_cmp_gt_i64; break;
348
case imax64: cmp = aco_opcode::v_cmp_lt_i64; break;
349
default: break;
350
}
351
352
bld.vopc(cmp, bld.def(bld.lm, vcc), src0_64, src1_64);
353
bld.vop2(aco_opcode::v_cndmask_b32, dst[0], src0[0], src1[0], Operand(vcc, bld.lm));
354
bld.vop2(aco_opcode::v_cndmask_b32, dst[1], src0[1], src1[1], Operand(vcc, bld.lm));
355
} else if (op == imul64) {
356
if (src1_reg == dst_reg) {
357
/* it's fine if src0==dst but not if src1==dst */
358
std::swap(src0_reg, src1_reg);
359
std::swap(src0[0], src1[0]);
360
std::swap(src0[1], src1[1]);
361
std::swap(src0_64, src1_64);
362
}
363
assert(!(src0_reg == src1_reg));
364
/* t1 = umul_lo(x_hi, y_lo)
365
* t0 = umul_lo(x_lo, y_hi)
366
* t2 = iadd(t0, t1)
367
* t5 = umul_hi(x_lo, y_lo)
368
* res_hi = iadd(t2, t5)
369
* res_lo = umul_lo(x_lo, y_lo)
370
* assumes that it's ok to modify x_hi/y_hi, since we might not have vtmp
371
*/
372
Definition tmp0_def(PhysReg{src0_reg + 1}, v1);
373
Definition tmp1_def(PhysReg{src1_reg + 1}, v1);
374
Operand tmp0_op = src0[1];
375
Operand tmp1_op = src1[1];
376
bld.vop3(aco_opcode::v_mul_lo_u32, tmp0_def, src0[1], src1[0]);
377
bld.vop3(aco_opcode::v_mul_lo_u32, tmp1_def, src0[0], src1[1]);
378
emit_vadd32(bld, tmp0_def, tmp1_op, tmp0_op);
379
bld.vop3(aco_opcode::v_mul_hi_u32, tmp1_def, src0[0], src1[0]);
380
emit_vadd32(bld, dst[1], tmp0_op, tmp1_op);
381
bld.vop3(aco_opcode::v_mul_lo_u32, dst[0], src0[0], src1[0]);
382
}
383
}
384
385
void
386
emit_dpp_op(lower_context* ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg, PhysReg vtmp,
387
ReduceOp op, unsigned size, unsigned dpp_ctrl, unsigned row_mask, unsigned bank_mask,
388
bool bound_ctrl, Operand* identity = NULL) /* for VOP3 with sparse writes */
389
{
390
Builder bld(ctx->program, &ctx->instructions);
391
RegClass rc = RegClass(RegType::vgpr, size);
392
Definition dst(dst_reg, rc);
393
Operand src0(src0_reg, rc);
394
Operand src1(src1_reg, rc);
395
396
aco_opcode opcode = get_reduce_opcode(ctx->program->chip_class, op);
397
bool vop3 = is_vop3_reduce_opcode(opcode);
398
399
if (!vop3) {
400
if (opcode == aco_opcode::v_add_co_u32)
401
bld.vop2_dpp(opcode, dst, bld.def(bld.lm, vcc), src0, src1, dpp_ctrl, row_mask, bank_mask,
402
bound_ctrl);
403
else
404
bld.vop2_dpp(opcode, dst, src0, src1, dpp_ctrl, row_mask, bank_mask, bound_ctrl);
405
return;
406
}
407
408
if (opcode == aco_opcode::num_opcodes) {
409
emit_int64_dpp_op(ctx, dst_reg, src0_reg, src1_reg, vtmp, op, dpp_ctrl, row_mask, bank_mask,
410
bound_ctrl, identity);
411
return;
412
}
413
414
if (identity)
415
bld.vop1(aco_opcode::v_mov_b32, Definition(vtmp, v1), identity[0]);
416
if (identity && size >= 2)
417
bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp + 1}, v1), identity[1]);
418
419
for (unsigned i = 0; i < size; i++)
420
bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp + i}, v1),
421
Operand(PhysReg{src0_reg + i}, v1), dpp_ctrl, row_mask, bank_mask, bound_ctrl);
422
423
bld.vop3(opcode, dst, Operand(vtmp, rc), src1);
424
}
425
426
void
427
emit_op(lower_context* ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg, PhysReg vtmp,
428
ReduceOp op, unsigned size)
429
{
430
Builder bld(ctx->program, &ctx->instructions);
431
RegClass rc = RegClass(RegType::vgpr, size);
432
Definition dst(dst_reg, rc);
433
Operand src0(src0_reg, RegClass(src0_reg.reg() >= 256 ? RegType::vgpr : RegType::sgpr, size));
434
Operand src1(src1_reg, rc);
435
436
aco_opcode opcode = get_reduce_opcode(ctx->program->chip_class, op);
437
bool vop3 = is_vop3_reduce_opcode(opcode);
438
439
if (opcode == aco_opcode::num_opcodes) {
440
emit_int64_op(ctx, dst_reg, src0_reg, src1_reg, vtmp, op);
441
return;
442
}
443
444
if (vop3) {
445
bld.vop3(opcode, dst, src0, src1);
446
} else if (opcode == aco_opcode::v_add_co_u32) {
447
bld.vop2(opcode, dst, bld.def(bld.lm, vcc), src0, src1);
448
} else {
449
bld.vop2(opcode, dst, src0, src1);
450
}
451
}
452
453
void
454
emit_dpp_mov(lower_context* ctx, PhysReg dst, PhysReg src0, unsigned size, unsigned dpp_ctrl,
455
unsigned row_mask, unsigned bank_mask, bool bound_ctrl)
456
{
457
Builder bld(ctx->program, &ctx->instructions);
458
for (unsigned i = 0; i < size; i++) {
459
bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(PhysReg{dst + i}, v1),
460
Operand(PhysReg{src0 + i}, v1), dpp_ctrl, row_mask, bank_mask, bound_ctrl);
461
}
462
}
463
464
void
465
emit_ds_swizzle(Builder bld, PhysReg dst, PhysReg src, unsigned size, unsigned ds_pattern)
466
{
467
for (unsigned i = 0; i < size; i++) {
468
bld.ds(aco_opcode::ds_swizzle_b32, Definition(PhysReg{dst + i}, v1),
469
Operand(PhysReg{src + i}, v1), ds_pattern);
470
}
471
}
472
473
void
474
emit_reduction(lower_context* ctx, aco_opcode op, ReduceOp reduce_op, unsigned cluster_size,
475
PhysReg tmp, PhysReg stmp, PhysReg vtmp, PhysReg sitmp, Operand src, Definition dst)
476
{
477
assert(cluster_size == ctx->program->wave_size || op == aco_opcode::p_reduce);
478
assert(cluster_size <= ctx->program->wave_size);
479
480
Builder bld(ctx->program, &ctx->instructions);
481
482
Operand identity[2];
483
identity[0] = Operand::c32(get_reduction_identity(reduce_op, 0));
484
identity[1] = Operand::c32(get_reduction_identity(reduce_op, 1));
485
Operand vcndmask_identity[2] = {identity[0], identity[1]};
486
487
/* First, copy the source to tmp and set inactive lanes to the identity */
488
bld.sop1(Builder::s_or_saveexec, Definition(stmp, bld.lm), Definition(scc, s1),
489
Definition(exec, bld.lm), Operand::c64(UINT64_MAX), Operand(exec, bld.lm));
490
491
for (unsigned i = 0; i < src.size(); i++) {
492
/* p_exclusive_scan needs it to be a sgpr or inline constant for the v_writelane_b32
493
* except on GFX10, where v_writelane_b32 can take a literal. */
494
if (identity[i].isLiteral() && op == aco_opcode::p_exclusive_scan &&
495
ctx->program->chip_class < GFX10) {
496
bld.sop1(aco_opcode::s_mov_b32, Definition(PhysReg{sitmp + i}, s1), identity[i]);
497
identity[i] = Operand(PhysReg{sitmp + i}, s1);
498
499
bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{tmp + i}, v1), identity[i]);
500
vcndmask_identity[i] = Operand(PhysReg{tmp + i}, v1);
501
} else if (identity[i].isLiteral()) {
502
bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{tmp + i}, v1), identity[i]);
503
vcndmask_identity[i] = Operand(PhysReg{tmp + i}, v1);
504
}
505
}
506
507
for (unsigned i = 0; i < src.size(); i++) {
508
bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(PhysReg{tmp + i}, v1),
509
vcndmask_identity[i], Operand(PhysReg{src.physReg() + i}, v1),
510
Operand(stmp, bld.lm));
511
}
512
513
if (src.regClass() == v1b) {
514
if (ctx->program->chip_class >= GFX8) {
515
aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(
516
aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)};
517
sdwa->operands[0] = Operand(PhysReg{tmp}, v1);
518
sdwa->definitions[0] = Definition(PhysReg{tmp}, v1);
519
if (reduce_op == imin8 || reduce_op == imax8)
520
sdwa->sel[0] = sdwa_sbyte;
521
else
522
sdwa->sel[0] = sdwa_ubyte;
523
sdwa->dst_sel = sdwa_udword;
524
bld.insert(std::move(sdwa));
525
} else {
526
aco_opcode opcode;
527
528
if (reduce_op == imin8 || reduce_op == imax8)
529
opcode = aco_opcode::v_bfe_i32;
530
else
531
opcode = aco_opcode::v_bfe_u32;
532
533
bld.vop3(opcode, Definition(PhysReg{tmp}, v1), Operand(PhysReg{tmp}, v1), Operand::zero(),
534
Operand::c32(8u));
535
}
536
} else if (src.regClass() == v2b) {
537
if (ctx->program->chip_class >= GFX10 &&
538
(reduce_op == iadd16 || reduce_op == imax16 || reduce_op == imin16 ||
539
reduce_op == umin16 || reduce_op == umax16)) {
540
aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(
541
aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)};
542
sdwa->operands[0] = Operand(PhysReg{tmp}, v1);
543
sdwa->definitions[0] = Definition(PhysReg{tmp}, v1);
544
if (reduce_op == imin16 || reduce_op == imax16 || reduce_op == iadd16)
545
sdwa->sel[0] = sdwa_sword;
546
else
547
sdwa->sel[0] = sdwa_uword;
548
sdwa->dst_sel = sdwa_udword;
549
bld.insert(std::move(sdwa));
550
} else if (ctx->program->chip_class == GFX6 || ctx->program->chip_class == GFX7) {
551
aco_opcode opcode;
552
553
if (reduce_op == imin16 || reduce_op == imax16 || reduce_op == iadd16)
554
opcode = aco_opcode::v_bfe_i32;
555
else
556
opcode = aco_opcode::v_bfe_u32;
557
558
bld.vop3(opcode, Definition(PhysReg{tmp}, v1), Operand(PhysReg{tmp}, v1), Operand::zero(),
559
Operand::c32(16u));
560
}
561
}
562
563
bool reduction_needs_last_op = false;
564
switch (op) {
565
case aco_opcode::p_reduce:
566
if (cluster_size == 1)
567
break;
568
569
if (ctx->program->chip_class <= GFX7) {
570
reduction_needs_last_op = true;
571
emit_ds_swizzle(bld, vtmp, tmp, src.size(), (1 << 15) | dpp_quad_perm(1, 0, 3, 2));
572
if (cluster_size == 2)
573
break;
574
emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
575
emit_ds_swizzle(bld, vtmp, tmp, src.size(), (1 << 15) | dpp_quad_perm(2, 3, 0, 1));
576
if (cluster_size == 4)
577
break;
578
emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
579
emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1f, 0, 0x04));
580
if (cluster_size == 8)
581
break;
582
emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
583
emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1f, 0, 0x08));
584
if (cluster_size == 16)
585
break;
586
emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
587
emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1f, 0, 0x10));
588
if (cluster_size == 32)
589
break;
590
emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
591
for (unsigned i = 0; i < src.size(); i++)
592
bld.readlane(Definition(PhysReg{dst.physReg() + i}, s1), Operand(PhysReg{tmp + i}, v1),
593
Operand::zero());
594
// TODO: it would be more effective to do the last reduction step on SALU
595
emit_op(ctx, tmp, dst.physReg(), tmp, vtmp, reduce_op, src.size());
596
reduction_needs_last_op = false;
597
break;
598
}
599
600
emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_quad_perm(1, 0, 3, 2), 0xf,
601
0xf, false);
602
if (cluster_size == 2)
603
break;
604
emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_quad_perm(2, 3, 0, 1), 0xf,
605
0xf, false);
606
if (cluster_size == 4)
607
break;
608
emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_half_mirror, 0xf, 0xf,
609
false);
610
if (cluster_size == 8)
611
break;
612
emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_mirror, 0xf, 0xf, false);
613
if (cluster_size == 16)
614
break;
615
616
if (ctx->program->chip_class >= GFX10) {
617
/* GFX10+ doesn't support row_bcast15 and row_bcast31 */
618
for (unsigned i = 0; i < src.size(); i++)
619
bld.vop3(aco_opcode::v_permlanex16_b32, Definition(PhysReg{vtmp + i}, v1),
620
Operand(PhysReg{tmp + i}, v1), Operand::zero(), Operand::zero());
621
622
if (cluster_size == 32) {
623
reduction_needs_last_op = true;
624
break;
625
}
626
627
emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
628
for (unsigned i = 0; i < src.size(); i++)
629
bld.readlane(Definition(PhysReg{dst.physReg() + i}, s1), Operand(PhysReg{tmp + i}, v1),
630
Operand::zero());
631
// TODO: it would be more effective to do the last reduction step on SALU
632
emit_op(ctx, tmp, dst.physReg(), tmp, vtmp, reduce_op, src.size());
633
break;
634
}
635
636
if (cluster_size == 32) {
637
emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1f, 0, 0x10));
638
reduction_needs_last_op = true;
639
break;
640
}
641
assert(cluster_size == 64);
642
emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_bcast15, 0xa, 0xf,
643
false);
644
emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_bcast31, 0xc, 0xf,
645
false);
646
break;
647
case aco_opcode::p_exclusive_scan:
648
if (ctx->program->chip_class >= GFX10) { /* gfx10 doesn't support wf_sr1, so emulate it */
649
/* shift rows right */
650
emit_dpp_mov(ctx, vtmp, tmp, src.size(), dpp_row_sr(1), 0xf, 0xf, true);
651
652
/* fill in the gaps in rows 1 and 3 */
653
bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand::c32(0x10000u));
654
bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand::c32(0x10000u));
655
for (unsigned i = 0; i < src.size(); i++) {
656
Instruction* perm =
657
bld.vop3(aco_opcode::v_permlanex16_b32, Definition(PhysReg{vtmp + i}, v1),
658
Operand(PhysReg{tmp + i}, v1), Operand::c32(0xffffffffu),
659
Operand::c32(0xffffffffu))
660
.instr;
661
perm->vop3().opsel = 1; /* FI (Fetch Inactive) */
662
}
663
bld.sop1(Builder::s_mov, Definition(exec, bld.lm), Operand::c64(UINT64_MAX));
664
665
if (ctx->program->wave_size == 64) {
666
/* fill in the gap in row 2 */
667
for (unsigned i = 0; i < src.size(); i++) {
668
bld.readlane(Definition(PhysReg{sitmp + i}, s1), Operand(PhysReg{tmp + i}, v1),
669
Operand::c32(31u));
670
bld.writelane(Definition(PhysReg{vtmp + i}, v1), Operand(PhysReg{sitmp + i}, s1),
671
Operand::c32(32u), Operand(PhysReg{vtmp + i}, v1));
672
}
673
}
674
std::swap(tmp, vtmp);
675
} else if (ctx->program->chip_class >= GFX8) {
676
emit_dpp_mov(ctx, tmp, tmp, src.size(), dpp_wf_sr1, 0xf, 0xf, true);
677
} else {
678
// TODO: use LDS on CS with a single write and shifted read
679
/* wavefront shift_right by 1 on SI/CI */
680
emit_ds_swizzle(bld, vtmp, tmp, src.size(), (1 << 15) | dpp_quad_perm(0, 0, 1, 2));
681
emit_ds_swizzle(bld, tmp, tmp, src.size(),
682
ds_pattern_bitmode(0x1F, 0x00, 0x07)); /* mirror(8) */
683
bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand::c32(0x10101010u));
684
bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
685
for (unsigned i = 0; i < src.size(); i++)
686
bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp + i}, v1),
687
Operand(PhysReg{tmp + i}, v1));
688
689
bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand::c64(UINT64_MAX));
690
emit_ds_swizzle(bld, tmp, tmp, src.size(),
691
ds_pattern_bitmode(0x1F, 0x00, 0x08)); /* swap(8) */
692
bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand::c32(0x01000100u));
693
bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
694
for (unsigned i = 0; i < src.size(); i++)
695
bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp + i}, v1),
696
Operand(PhysReg{tmp + i}, v1));
697
698
bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand::c64(UINT64_MAX));
699
emit_ds_swizzle(bld, tmp, tmp, src.size(),
700
ds_pattern_bitmode(0x1F, 0x00, 0x10)); /* swap(16) */
701
bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_lo, s1), Operand::c32(1u),
702
Operand::c32(16u));
703
bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_hi, s1), Operand::c32(1u),
704
Operand::c32(16u));
705
for (unsigned i = 0; i < src.size(); i++)
706
bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp + i}, v1),
707
Operand(PhysReg{tmp + i}, v1));
708
709
bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand::c64(UINT64_MAX));
710
for (unsigned i = 0; i < src.size(); i++) {
711
bld.writelane(Definition(PhysReg{vtmp + i}, v1), identity[i], Operand::zero(),
712
Operand(PhysReg{vtmp + i}, v1));
713
bld.readlane(Definition(PhysReg{sitmp + i}, s1), Operand(PhysReg{tmp + i}, v1),
714
Operand::zero());
715
bld.writelane(Definition(PhysReg{vtmp + i}, v1), Operand(PhysReg{sitmp + i}, s1),
716
Operand::c32(32u), Operand(PhysReg{vtmp + i}, v1));
717
identity[i] = Operand::zero(); /* prevent further uses of identity */
718
}
719
std::swap(tmp, vtmp);
720
}
721
722
for (unsigned i = 0; i < src.size(); i++) {
723
if (!identity[i].isConstant() ||
724
identity[i].constantValue()) { /* bound_ctrl should take care of this overwise */
725
if (ctx->program->chip_class < GFX10)
726
assert((identity[i].isConstant() && !identity[i].isLiteral()) ||
727
identity[i].physReg() == PhysReg{sitmp + i});
728
bld.writelane(Definition(PhysReg{tmp + i}, v1), identity[i], Operand::zero(),
729
Operand(PhysReg{tmp + i}, v1));
730
}
731
}
732
FALLTHROUGH;
733
case aco_opcode::p_inclusive_scan:
734
assert(cluster_size == ctx->program->wave_size);
735
if (ctx->program->chip_class <= GFX7) {
736
emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1e, 0x00, 0x00));
737
bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand::c32(0xAAAAAAAAu));
738
bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
739
emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
740
741
bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand::c64(UINT64_MAX));
742
emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1c, 0x01, 0x00));
743
bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand::c32(0xCCCCCCCCu));
744
bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
745
emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
746
747
bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand::c64(UINT64_MAX));
748
emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x18, 0x03, 0x00));
749
bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand::c32(0xF0F0F0F0u));
750
bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
751
emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
752
753
bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand::c64(UINT64_MAX));
754
emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x10, 0x07, 0x00));
755
bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand::c32(0xFF00FF00u));
756
bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
757
emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
758
759
bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand::c64(UINT64_MAX));
760
emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x00, 0x0f, 0x00));
761
bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_lo, s1), Operand::c32(16u),
762
Operand::c32(16u));
763
bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_hi, s1), Operand::c32(16u),
764
Operand::c32(16u));
765
emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
766
767
for (unsigned i = 0; i < src.size(); i++)
768
bld.readlane(Definition(PhysReg{sitmp + i}, s1), Operand(PhysReg{tmp + i}, v1),
769
Operand::c32(31u));
770
bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand::c32(32u),
771
Operand::c32(32u));
772
emit_op(ctx, tmp, sitmp, tmp, vtmp, reduce_op, src.size());
773
break;
774
}
775
776
emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_sr(1), 0xf, 0xf, false,
777
identity);
778
emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_sr(2), 0xf, 0xf, false,
779
identity);
780
emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_sr(4), 0xf, 0xf, false,
781
identity);
782
emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_sr(8), 0xf, 0xf, false,
783
identity);
784
if (ctx->program->chip_class >= GFX10) {
785
bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_lo, s1), Operand::c32(16u),
786
Operand::c32(16u));
787
bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_hi, s1), Operand::c32(16u),
788
Operand::c32(16u));
789
for (unsigned i = 0; i < src.size(); i++) {
790
Instruction* perm =
791
bld.vop3(aco_opcode::v_permlanex16_b32, Definition(PhysReg{vtmp + i}, v1),
792
Operand(PhysReg{tmp + i}, v1), Operand::c32(0xffffffffu),
793
Operand::c32(0xffffffffu))
794
.instr;
795
perm->vop3().opsel = 1; /* FI (Fetch Inactive) */
796
}
797
emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
798
799
if (ctx->program->wave_size == 64) {
800
bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand::c32(32u),
801
Operand::c32(32u));
802
for (unsigned i = 0; i < src.size(); i++)
803
bld.readlane(Definition(PhysReg{sitmp + i}, s1), Operand(PhysReg{tmp + i}, v1),
804
Operand::c32(31u));
805
emit_op(ctx, tmp, sitmp, tmp, vtmp, reduce_op, src.size());
806
}
807
} else {
808
emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_bcast15, 0xa, 0xf,
809
false, identity);
810
emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_bcast31, 0xc, 0xf,
811
false, identity);
812
}
813
break;
814
default: unreachable("Invalid reduction mode");
815
}
816
817
if (op == aco_opcode::p_reduce) {
818
if (reduction_needs_last_op && dst.regClass().type() == RegType::vgpr) {
819
bld.sop1(Builder::s_mov, Definition(exec, bld.lm), Operand(stmp, bld.lm));
820
emit_op(ctx, dst.physReg(), tmp, vtmp, PhysReg{0}, reduce_op, src.size());
821
return;
822
}
823
824
if (reduction_needs_last_op)
825
emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
826
}
827
828
/* restore exec */
829
bld.sop1(Builder::s_mov, Definition(exec, bld.lm), Operand(stmp, bld.lm));
830
831
if (dst.regClass().type() == RegType::sgpr) {
832
for (unsigned k = 0; k < src.size(); k++) {
833
bld.readlane(Definition(PhysReg{dst.physReg() + k}, s1), Operand(PhysReg{tmp + k}, v1),
834
Operand::c32(ctx->program->wave_size - 1));
835
}
836
} else if (dst.physReg() != tmp) {
837
for (unsigned k = 0; k < src.size(); k++) {
838
bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{dst.physReg() + k}, v1),
839
Operand(PhysReg{tmp + k}, v1));
840
}
841
}
842
}
843
844
void
845
emit_gfx10_wave64_bpermute(Program* program, aco_ptr<Instruction>& instr, Builder& bld)
846
{
847
/* Emulates proper bpermute on GFX10 in wave64 mode.
848
*
849
* This is necessary because on GFX10 the bpermute instruction only works
850
* on half waves (you can think of it as having a cluster size of 32), so we
851
* manually swap the data between the two halves using two shared VGPRs.
852
*/
853
854
assert(program->chip_class >= GFX10);
855
assert(program->wave_size == 64);
856
857
unsigned shared_vgpr_reg_0 = align(program->config->num_vgprs, 4) + 256;
858
Definition dst = instr->definitions[0];
859
Definition tmp_exec = instr->definitions[1];
860
Definition clobber_scc = instr->definitions[2];
861
Operand index_x4 = instr->operands[0];
862
Operand input_data = instr->operands[1];
863
Operand same_half = instr->operands[2];
864
865
assert(dst.regClass() == v1);
866
assert(tmp_exec.regClass() == bld.lm);
867
assert(clobber_scc.isFixed() && clobber_scc.physReg() == scc);
868
assert(same_half.regClass() == bld.lm);
869
assert(index_x4.regClass() == v1);
870
assert(input_data.regClass().type() == RegType::vgpr);
871
assert(input_data.bytes() <= 4);
872
assert(dst.physReg() != index_x4.physReg());
873
assert(dst.physReg() != input_data.physReg());
874
assert(tmp_exec.physReg() != same_half.physReg());
875
876
PhysReg shared_vgpr_lo(shared_vgpr_reg_0);
877
PhysReg shared_vgpr_hi(shared_vgpr_reg_0 + 1);
878
879
/* Permute the input within the same half-wave */
880
bld.ds(aco_opcode::ds_bpermute_b32, dst, index_x4, input_data);
881
882
/* HI: Copy data from high lanes 32-63 to shared vgpr */
883
bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(shared_vgpr_hi, v1), input_data,
884
dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
885
/* Save EXEC */
886
bld.sop1(aco_opcode::s_mov_b64, tmp_exec, Operand(exec, s2));
887
/* Set EXEC to enable LO lanes only */
888
bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand::c32(32u), Operand::zero());
889
/* LO: Copy data from low lanes 0-31 to shared vgpr */
890
bld.vop1(aco_opcode::v_mov_b32, Definition(shared_vgpr_lo, v1), input_data);
891
/* LO: bpermute shared vgpr (high lanes' data) */
892
bld.ds(aco_opcode::ds_bpermute_b32, Definition(shared_vgpr_hi, v1), index_x4,
893
Operand(shared_vgpr_hi, v1));
894
/* Set EXEC to enable HI lanes only */
895
bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand::c32(32u), Operand::c32(32u));
896
/* HI: bpermute shared vgpr (low lanes' data) */
897
bld.ds(aco_opcode::ds_bpermute_b32, Definition(shared_vgpr_lo, v1), index_x4,
898
Operand(shared_vgpr_lo, v1));
899
900
/* Only enable lanes which use the other half's data */
901
bld.sop2(aco_opcode::s_andn2_b64, Definition(exec, s2), clobber_scc,
902
Operand(tmp_exec.physReg(), s2), same_half);
903
/* LO: Copy shared vgpr (high lanes' bpermuted data) to output vgpr */
904
bld.vop1_dpp(aco_opcode::v_mov_b32, dst, Operand(shared_vgpr_hi, v1), dpp_quad_perm(0, 1, 2, 3),
905
0x3, 0xf, false);
906
/* HI: Copy shared vgpr (low lanes' bpermuted data) to output vgpr */
907
bld.vop1_dpp(aco_opcode::v_mov_b32, dst, Operand(shared_vgpr_lo, v1), dpp_quad_perm(0, 1, 2, 3),
908
0xc, 0xf, false);
909
910
/* Restore saved EXEC */
911
bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(tmp_exec.physReg(), s2));
912
913
/* RA assumes that the result is always in the low part of the register, so we have to shift, if
914
* it's not there already */
915
if (input_data.physReg().byte()) {
916
unsigned right_shift = input_data.physReg().byte() * 8;
917
bld.vop2(aco_opcode::v_lshrrev_b32, dst, Operand::c32(right_shift),
918
Operand(dst.physReg(), v1));
919
}
920
}
921
922
void
923
emit_gfx6_bpermute(Program* program, aco_ptr<Instruction>& instr, Builder& bld)
924
{
925
/* Emulates bpermute using readlane instructions */
926
927
Operand index = instr->operands[0];
928
Operand input = instr->operands[1];
929
Definition dst = instr->definitions[0];
930
Definition temp_exec = instr->definitions[1];
931
Definition clobber_vcc = instr->definitions[2];
932
933
assert(dst.regClass() == v1);
934
assert(temp_exec.regClass() == bld.lm);
935
assert(clobber_vcc.regClass() == bld.lm);
936
assert(clobber_vcc.physReg() == vcc);
937
assert(index.regClass() == v1);
938
assert(index.physReg() != dst.physReg());
939
assert(input.regClass().type() == RegType::vgpr);
940
assert(input.bytes() <= 4);
941
assert(input.physReg() != dst.physReg());
942
943
/* Save original EXEC */
944
bld.sop1(aco_opcode::s_mov_b64, temp_exec, Operand(exec, s2));
945
946
/* An "unrolled loop" that is executed per each lane.
947
* This takes only a few instructions per lane, as opposed to a "real" loop
948
* with branching, where the branch instruction alone would take 16+ cycles.
949
*/
950
for (unsigned n = 0; n < program->wave_size; ++n) {
951
/* Activate the lane which has N for its source index */
952
bld.vopc(aco_opcode::v_cmpx_eq_u32, Definition(exec, bld.lm), clobber_vcc, Operand::c32(n),
953
index);
954
/* Read the data from lane N */
955
bld.readlane(Definition(vcc, s1), input, Operand::c32(n));
956
/* On the active lane, move the data we read from lane N to the destination VGPR */
957
bld.vop1(aco_opcode::v_mov_b32, dst, Operand(vcc, s1));
958
/* Restore original EXEC */
959
bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(temp_exec.physReg(), s2));
960
}
961
}
962
963
struct copy_operation {
964
Operand op;
965
Definition def;
966
unsigned bytes;
967
union {
968
uint8_t uses[8];
969
uint64_t is_used = 0;
970
};
971
};
972
973
void
974
split_copy(lower_context* ctx, unsigned offset, Definition* def, Operand* op,
975
const copy_operation& src, bool ignore_uses, unsigned max_size)
976
{
977
PhysReg def_reg = src.def.physReg();
978
PhysReg op_reg = src.op.physReg();
979
def_reg.reg_b += offset;
980
op_reg.reg_b += offset;
981
982
/* 64-bit VGPR copies (implemented with v_lshrrev_b64) are slow before GFX10 */
983
if (ctx->program->chip_class < GFX10 && src.def.regClass().type() == RegType::vgpr)
984
max_size = MIN2(max_size, 4);
985
unsigned max_align = src.def.regClass().type() == RegType::vgpr ? 4 : 16;
986
987
/* make sure the size is a power of two and reg % bytes == 0 */
988
unsigned bytes = 1;
989
for (; bytes <= max_size; bytes *= 2) {
990
unsigned next = bytes * 2u;
991
bool can_increase = def_reg.reg_b % MIN2(next, max_align) == 0 &&
992
offset + next <= src.bytes && next <= max_size;
993
if (!src.op.isConstant() && can_increase)
994
can_increase = op_reg.reg_b % MIN2(next, max_align) == 0;
995
for (unsigned i = 0; !ignore_uses && can_increase && (i < bytes); i++)
996
can_increase = (src.uses[offset + bytes + i] == 0) == (src.uses[offset] == 0);
997
if (!can_increase)
998
break;
999
}
1000
1001
RegClass def_cls = bytes % 4 == 0 ? RegClass(src.def.regClass().type(), bytes / 4u)
1002
: RegClass(src.def.regClass().type(), bytes).as_subdword();
1003
*def = Definition(src.def.tempId(), def_reg, def_cls);
1004
if (src.op.isConstant()) {
1005
assert(bytes >= 1 && bytes <= 8);
1006
uint64_t val = src.op.constantValue64() >> (offset * 8u);
1007
*op = Operand::get_const(ctx->program->chip_class, val, bytes);
1008
} else {
1009
RegClass op_cls = bytes % 4 == 0 ? RegClass(src.op.regClass().type(), bytes / 4u)
1010
: RegClass(src.op.regClass().type(), bytes).as_subdword();
1011
*op = Operand(op_reg, op_cls);
1012
op->setTemp(Temp(src.op.tempId(), op_cls));
1013
}
1014
}
1015
1016
uint32_t
1017
get_intersection_mask(int a_start, int a_size, int b_start, int b_size)
1018
{
1019
int intersection_start = MAX2(b_start - a_start, 0);
1020
int intersection_end = MAX2(b_start + b_size - a_start, 0);
1021
if (intersection_start >= a_size || intersection_end == 0)
1022
return 0;
1023
1024
uint32_t mask = u_bit_consecutive(0, a_size);
1025
return u_bit_consecutive(intersection_start, intersection_end - intersection_start) & mask;
1026
}
1027
1028
void
1029
copy_constant(lower_context* ctx, Builder& bld, Definition dst, Operand op)
1030
{
1031
assert(op.bytes() == dst.bytes());
1032
1033
if (dst.bytes() == 4 && op.isLiteral()) {
1034
uint32_t imm = op.constantValue();
1035
if (dst.regClass() == s1 && (imm >= 0xffff8000 || imm <= 0x7fff)) {
1036
bld.sopk(aco_opcode::s_movk_i32, dst, imm & 0xFFFFu);
1037
return;
1038
} else if (util_bitreverse(imm) <= 64 || util_bitreverse(imm) >= 0xFFFFFFF0) {
1039
uint32_t rev = util_bitreverse(imm);
1040
if (dst.regClass() == s1)
1041
bld.sop1(aco_opcode::s_brev_b32, dst, Operand::c32(rev));
1042
else
1043
bld.vop1(aco_opcode::v_bfrev_b32, dst, Operand::c32(rev));
1044
return;
1045
} else if (dst.regClass() == s1 && imm != 0) {
1046
unsigned start = (ffs(imm) - 1) & 0x1f;
1047
unsigned size = util_bitcount(imm) & 0x1f;
1048
if ((((1u << size) - 1u) << start) == imm) {
1049
bld.sop2(aco_opcode::s_bfm_b32, dst, Operand::c32(size), Operand::c32(start));
1050
return;
1051
}
1052
}
1053
}
1054
1055
if (op.bytes() == 4 && op.constantEquals(0x3e22f983) && ctx->program->chip_class >= GFX8)
1056
op.setFixed(PhysReg{248}); /* it can be an inline constant on GFX8+ */
1057
1058
if (dst.regClass() == s1) {
1059
bld.sop1(aco_opcode::s_mov_b32, dst, op);
1060
} else if (dst.regClass() == s2) {
1061
/* s_ashr_i64 writes SCC, so we can't use it */
1062
assert(Operand::is_constant_representable(op.constantValue64(), 8, true, false));
1063
bld.sop1(aco_opcode::s_mov_b64, dst, op);
1064
} else if (dst.regClass() == v2) {
1065
if (Operand::is_constant_representable(op.constantValue64(), 8, true, false)) {
1066
bld.vop3(aco_opcode::v_lshrrev_b64, dst, Operand::zero(), op);
1067
} else {
1068
assert(Operand::is_constant_representable(op.constantValue64(), 8, false, true));
1069
bld.vop3(aco_opcode::v_ashrrev_i64, dst, Operand::zero(), op);
1070
}
1071
} else if (dst.regClass() == v1) {
1072
bld.vop1(aco_opcode::v_mov_b32, dst, op);
1073
} else {
1074
assert(dst.regClass() == v1b || dst.regClass() == v2b);
1075
1076
if (dst.regClass() == v1b && ctx->program->chip_class >= GFX9) {
1077
uint8_t val = op.constantValue();
1078
Operand op32 = Operand::c32((uint32_t)val | (val & 0x80u ? 0xffffff00u : 0u));
1079
if (op32.isLiteral()) {
1080
uint32_t a = (uint32_t)int8_mul_table[val * 2];
1081
uint32_t b = (uint32_t)int8_mul_table[val * 2 + 1];
1082
bld.vop2_sdwa(aco_opcode::v_mul_u32_u24, dst,
1083
Operand::c32(a | (a & 0x80u ? 0xffffff00u : 0x0u)),
1084
Operand::c32(b | (b & 0x80u ? 0xffffff00u : 0x0u)));
1085
} else {
1086
bld.vop1_sdwa(aco_opcode::v_mov_b32, dst, op32);
1087
}
1088
} else if (dst.regClass() == v2b && ctx->program->chip_class >= GFX9 && !op.isLiteral()) {
1089
if (op.constantValue() >= 0xfff0 || op.constantValue() <= 64) {
1090
/* use v_mov_b32 to avoid possible issues with denormal flushing or
1091
* NaN. v_add_f16 is still needed for float constants. */
1092
uint32_t val32 = (int32_t)(int16_t)op.constantValue();
1093
bld.vop1_sdwa(aco_opcode::v_mov_b32, dst, Operand::c32(val32));
1094
} else {
1095
bld.vop2_sdwa(aco_opcode::v_add_f16, dst, op, Operand::zero());
1096
}
1097
} else if (dst.regClass() == v2b && ctx->program->chip_class >= GFX10 &&
1098
(ctx->block->fp_mode.denorm16_64 & fp_denorm_keep_in)) {
1099
if (dst.physReg().byte() == 2) {
1100
Operand def_lo(dst.physReg().advance(-2), v2b);
1101
Instruction* instr = bld.vop3(aco_opcode::v_pack_b32_f16, dst, def_lo, op);
1102
instr->vop3().opsel = 0;
1103
} else {
1104
assert(dst.physReg().byte() == 0);
1105
Operand def_hi(dst.physReg().advance(2), v2b);
1106
Instruction* instr = bld.vop3(aco_opcode::v_pack_b32_f16, dst, op, def_hi);
1107
instr->vop3().opsel = 2;
1108
}
1109
} else {
1110
uint32_t offset = dst.physReg().byte() * 8u;
1111
uint32_t mask = ((1u << (dst.bytes() * 8)) - 1) << offset;
1112
uint32_t val = (op.constantValue() << offset) & mask;
1113
dst = Definition(PhysReg(dst.physReg().reg()), v1);
1114
Operand def_op(dst.physReg(), v1);
1115
if (val != mask)
1116
bld.vop2(aco_opcode::v_and_b32, dst, Operand::c32(~mask), def_op);
1117
if (val != 0)
1118
bld.vop2(aco_opcode::v_or_b32, dst, Operand::c32(val), def_op);
1119
}
1120
}
1121
}
1122
1123
bool
1124
do_copy(lower_context* ctx, Builder& bld, const copy_operation& copy, bool* preserve_scc,
1125
PhysReg scratch_sgpr)
1126
{
1127
bool did_copy = false;
1128
for (unsigned offset = 0; offset < copy.bytes;) {
1129
if (copy.uses[offset]) {
1130
offset++;
1131
continue;
1132
}
1133
1134
Definition def;
1135
Operand op;
1136
split_copy(ctx, offset, &def, &op, copy, false, 8);
1137
1138
if (def.physReg() == scc) {
1139
bld.sopc(aco_opcode::s_cmp_lg_i32, def, op, Operand::zero());
1140
*preserve_scc = true;
1141
} else if (op.isConstant()) {
1142
copy_constant(ctx, bld, def, op);
1143
} else if (def.regClass() == v1) {
1144
bld.vop1(aco_opcode::v_mov_b32, def, op);
1145
} else if (def.regClass() == v2) {
1146
bld.vop3(aco_opcode::v_lshrrev_b64, def, Operand::zero(), op);
1147
} else if (def.regClass() == s1) {
1148
bld.sop1(aco_opcode::s_mov_b32, def, op);
1149
} else if (def.regClass() == s2) {
1150
bld.sop1(aco_opcode::s_mov_b64, def, op);
1151
} else if (def.regClass().is_subdword() && ctx->program->chip_class < GFX8) {
1152
if (op.physReg().byte()) {
1153
assert(def.physReg().byte() == 0);
1154
bld.vop2(aco_opcode::v_lshrrev_b32, def, Operand::c32(op.physReg().byte() * 8), op);
1155
} else if (def.physReg().byte()) {
1156
assert(op.physReg().byte() == 0);
1157
/* preserve the target's lower half */
1158
uint32_t bits = def.physReg().byte() * 8;
1159
PhysReg lo_reg = PhysReg(def.physReg().reg());
1160
Definition lo_half =
1161
Definition(lo_reg, RegClass::get(RegType::vgpr, def.physReg().byte()));
1162
Definition dst =
1163
Definition(lo_reg, RegClass::get(RegType::vgpr, lo_half.bytes() + op.bytes()));
1164
1165
if (def.physReg().reg() == op.physReg().reg()) {
1166
bld.vop2(aco_opcode::v_and_b32, lo_half, Operand::c32((1 << bits) - 1u),
1167
Operand(lo_reg, lo_half.regClass()));
1168
if (def.physReg().byte() == 1) {
1169
bld.vop2(aco_opcode::v_mul_u32_u24, dst, Operand::c32((1 << bits) + 1u), op);
1170
} else if (def.physReg().byte() == 2) {
1171
bld.vop2(aco_opcode::v_cvt_pk_u16_u32, dst, Operand(lo_reg, v2b), op);
1172
} else if (def.physReg().byte() == 3) {
1173
bld.sop1(aco_opcode::s_mov_b32, Definition(scratch_sgpr, s1),
1174
Operand::c32((1 << bits) + 1u));
1175
bld.vop3(aco_opcode::v_mul_lo_u32, dst, Operand(scratch_sgpr, s1), op);
1176
}
1177
} else {
1178
lo_half.setFixed(lo_half.physReg().advance(4 - def.physReg().byte()));
1179
bld.vop2(aco_opcode::v_lshlrev_b32, lo_half, Operand::c32(32 - bits),
1180
Operand(lo_reg, lo_half.regClass()));
1181
bld.vop3(aco_opcode::v_alignbyte_b32, dst, op,
1182
Operand(lo_half.physReg(), lo_half.regClass()),
1183
Operand::c32(4 - def.physReg().byte()));
1184
}
1185
} else {
1186
bld.vop1(aco_opcode::v_mov_b32, def, op);
1187
}
1188
} else if (def.regClass().is_subdword()) {
1189
bld.vop1_sdwa(aco_opcode::v_mov_b32, def, op);
1190
} else {
1191
unreachable("unsupported copy");
1192
}
1193
1194
did_copy = true;
1195
offset += def.bytes();
1196
}
1197
return did_copy;
1198
}
1199
1200
void
1201
do_swap(lower_context* ctx, Builder& bld, const copy_operation& copy, bool preserve_scc,
1202
Pseudo_instruction* pi)
1203
{
1204
unsigned offset = 0;
1205
1206
if (copy.bytes == 3 && (copy.def.physReg().reg_b % 4 <= 1) &&
1207
(copy.def.physReg().reg_b % 4) == (copy.op.physReg().reg_b % 4)) {
1208
/* instead of doing a 2-byte and 1-byte swap, do a 4-byte swap and then fixup with a 1-byte
1209
* swap */
1210
PhysReg op = copy.op.physReg();
1211
PhysReg def = copy.def.physReg();
1212
op.reg_b &= ~0x3;
1213
def.reg_b &= ~0x3;
1214
1215
copy_operation tmp;
1216
tmp.op = Operand(op, v1);
1217
tmp.def = Definition(def, v1);
1218
tmp.bytes = 4;
1219
memset(tmp.uses, 1, 4);
1220
do_swap(ctx, bld, tmp, preserve_scc, pi);
1221
1222
op.reg_b += copy.def.physReg().reg_b % 4 == 0 ? 3 : 0;
1223
def.reg_b += copy.def.physReg().reg_b % 4 == 0 ? 3 : 0;
1224
tmp.op = Operand(op, v1b);
1225
tmp.def = Definition(def, v1b);
1226
tmp.bytes = 1;
1227
tmp.uses[0] = 1;
1228
do_swap(ctx, bld, tmp, preserve_scc, pi);
1229
1230
offset = copy.bytes;
1231
}
1232
1233
for (; offset < copy.bytes;) {
1234
Definition def;
1235
Operand op;
1236
unsigned max_size = copy.def.regClass().type() == RegType::vgpr ? 4 : 8;
1237
split_copy(ctx, offset, &def, &op, copy, true, max_size);
1238
1239
assert(op.regClass() == def.regClass());
1240
Operand def_as_op = Operand(def.physReg(), def.regClass());
1241
Definition op_as_def = Definition(op.physReg(), op.regClass());
1242
if (ctx->program->chip_class >= GFX9 && def.regClass() == v1) {
1243
bld.vop1(aco_opcode::v_swap_b32, def, op_as_def, op, def_as_op);
1244
} else if (def.regClass() == v1) {
1245
assert(def.physReg().byte() == 0 && op.physReg().byte() == 0);
1246
bld.vop2(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
1247
bld.vop2(aco_opcode::v_xor_b32, def, op, def_as_op);
1248
bld.vop2(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
1249
} else if (op.physReg() == scc || def.physReg() == scc) {
1250
/* we need to swap scc and another sgpr */
1251
assert(!preserve_scc);
1252
1253
PhysReg other = op.physReg() == scc ? def.physReg() : op.physReg();
1254
1255
bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), Operand(scc, s1));
1256
bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(other, s1),
1257
Operand::zero());
1258
bld.sop1(aco_opcode::s_mov_b32, Definition(other, s1), Operand(pi->scratch_sgpr, s1));
1259
} else if (def.regClass() == s1) {
1260
if (preserve_scc) {
1261
bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), op);
1262
bld.sop1(aco_opcode::s_mov_b32, op_as_def, def_as_op);
1263
bld.sop1(aco_opcode::s_mov_b32, def, Operand(pi->scratch_sgpr, s1));
1264
} else {
1265
bld.sop2(aco_opcode::s_xor_b32, op_as_def, Definition(scc, s1), op, def_as_op);
1266
bld.sop2(aco_opcode::s_xor_b32, def, Definition(scc, s1), op, def_as_op);
1267
bld.sop2(aco_opcode::s_xor_b32, op_as_def, Definition(scc, s1), op, def_as_op);
1268
}
1269
} else if (def.regClass() == s2) {
1270
if (preserve_scc)
1271
bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), Operand(scc, s1));
1272
bld.sop2(aco_opcode::s_xor_b64, op_as_def, Definition(scc, s1), op, def_as_op);
1273
bld.sop2(aco_opcode::s_xor_b64, def, Definition(scc, s1), op, def_as_op);
1274
bld.sop2(aco_opcode::s_xor_b64, op_as_def, Definition(scc, s1), op, def_as_op);
1275
if (preserve_scc)
1276
bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(pi->scratch_sgpr, s1),
1277
Operand::zero());
1278
} else if (def.bytes() == 2 && def.physReg().reg() == op.physReg().reg()) {
1279
bld.vop3(aco_opcode::v_alignbyte_b32, Definition(def.physReg(), v1), def_as_op, op,
1280
Operand::c32(2u));
1281
} else {
1282
assert(def.regClass().is_subdword());
1283
bld.vop2_sdwa(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
1284
bld.vop2_sdwa(aco_opcode::v_xor_b32, def, op, def_as_op);
1285
bld.vop2_sdwa(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
1286
}
1287
1288
offset += def.bytes();
1289
}
1290
1291
if (ctx->program->chip_class <= GFX7)
1292
return;
1293
1294
/* fixup in case we swapped bytes we shouldn't have */
1295
copy_operation tmp_copy = copy;
1296
tmp_copy.op.setFixed(copy.def.physReg());
1297
tmp_copy.def.setFixed(copy.op.physReg());
1298
do_copy(ctx, bld, tmp_copy, &preserve_scc, pi->scratch_sgpr);
1299
}
1300
1301
void
1302
do_pack_2x16(lower_context* ctx, Builder& bld, Definition def, Operand lo, Operand hi)
1303
{
1304
if (lo.isConstant() && hi.isConstant()) {
1305
copy_constant(ctx, bld, def, Operand::c32(lo.constantValue() | (hi.constantValue() << 16)));
1306
return;
1307
}
1308
1309
bool can_use_pack = (ctx->block->fp_mode.denorm16_64 & fp_denorm_keep_in) &&
1310
(ctx->program->chip_class >= GFX10 ||
1311
(ctx->program->chip_class >= GFX9 && !lo.isLiteral() && !hi.isLiteral()));
1312
1313
if (can_use_pack) {
1314
Instruction* instr = bld.vop3(aco_opcode::v_pack_b32_f16, def, lo, hi);
1315
/* opsel: 0 = select low half, 1 = select high half. [0] = src0, [1] = src1 */
1316
instr->vop3().opsel = hi.physReg().byte() | (lo.physReg().byte() >> 1);
1317
return;
1318
}
1319
1320
/* a single alignbyte can be sufficient: hi can be a 32-bit integer constant */
1321
if (lo.physReg().byte() == 2 && hi.physReg().byte() == 0 &&
1322
(!hi.isConstant() || !Operand::c32(hi.constantValue()).isLiteral() ||
1323
ctx->program->chip_class >= GFX10)) {
1324
bld.vop3(aco_opcode::v_alignbyte_b32, def, hi, lo, Operand::c32(2u));
1325
return;
1326
}
1327
1328
Definition def_lo = Definition(def.physReg(), v2b);
1329
Definition def_hi = Definition(def.physReg().advance(2), v2b);
1330
1331
if (lo.isConstant()) {
1332
/* move hi and zero low bits */
1333
if (hi.physReg().byte() == 0)
1334
bld.vop2(aco_opcode::v_lshlrev_b32, def_hi, Operand::c32(16u), hi);
1335
else
1336
bld.vop2(aco_opcode::v_and_b32, def_hi, Operand::c32(~0xFFFFu), hi);
1337
bld.vop2(aco_opcode::v_or_b32, def, Operand::c32(lo.constantValue()),
1338
Operand(def.physReg(), v1));
1339
return;
1340
}
1341
if (hi.isConstant()) {
1342
/* move lo and zero high bits */
1343
if (lo.physReg().byte() == 2)
1344
bld.vop2(aco_opcode::v_lshrrev_b32, def_lo, Operand::c32(16u), lo);
1345
else
1346
bld.vop2(aco_opcode::v_and_b32, def_lo, Operand::c32(0xFFFFu), lo);
1347
bld.vop2(aco_opcode::v_or_b32, def, Operand::c32(hi.constantValue() << 16u),
1348
Operand(def.physReg(), v1));
1349
return;
1350
}
1351
1352
if (lo.physReg().reg() == def.physReg().reg()) {
1353
/* lo is in the high bits of def */
1354
assert(lo.physReg().byte() == 2);
1355
bld.vop2(aco_opcode::v_lshrrev_b32, def_lo, Operand::c32(16u), lo);
1356
lo.setFixed(def.physReg());
1357
} else if (hi.physReg() == def.physReg()) {
1358
/* hi is in the low bits of def */
1359
assert(hi.physReg().byte() == 0);
1360
bld.vop2(aco_opcode::v_lshlrev_b32, def_hi, Operand::c32(16u), hi);
1361
hi.setFixed(def.physReg().advance(2));
1362
} else if (ctx->program->chip_class >= GFX8) {
1363
/* either lo or hi can be placed with just a v_mov */
1364
assert(lo.physReg().byte() == 0 || hi.physReg().byte() == 2);
1365
Operand& op = lo.physReg().byte() == 0 ? lo : hi;
1366
PhysReg reg = def.physReg().advance(op.physReg().byte());
1367
bld.vop1(aco_opcode::v_mov_b32, Definition(reg, v2b), op);
1368
op.setFixed(reg);
1369
}
1370
1371
if (ctx->program->chip_class >= GFX8) {
1372
/* either hi or lo are already placed correctly */
1373
if (lo.physReg().reg() == def.physReg().reg())
1374
bld.vop1_sdwa(aco_opcode::v_mov_b32, def_hi, hi);
1375
else
1376
bld.vop1_sdwa(aco_opcode::v_mov_b32, def_lo, lo);
1377
return;
1378
}
1379
1380
/* alignbyte needs the operands in the following way:
1381
* | xx hi | lo xx | >> 2 byte */
1382
if (lo.physReg().byte() != hi.physReg().byte()) {
1383
/* | xx lo | hi xx | => | lo hi | lo hi | */
1384
assert(lo.physReg().byte() == 0 && hi.physReg().byte() == 2);
1385
bld.vop3(aco_opcode::v_alignbyte_b32, def, lo, hi, Operand::c32(2u));
1386
lo = Operand(def_hi.physReg(), v2b);
1387
hi = Operand(def_lo.physReg(), v2b);
1388
} else if (lo.physReg().byte() == 0) {
1389
/* | xx hi | xx lo | => | xx hi | lo 00 | */
1390
bld.vop2(aco_opcode::v_lshlrev_b32, def_hi, Operand::c32(16u), lo);
1391
lo = Operand(def_hi.physReg(), v2b);
1392
} else {
1393
/* | hi xx | lo xx | => | 00 hi | lo xx | */
1394
assert(hi.physReg().byte() == 2);
1395
bld.vop2(aco_opcode::v_lshrrev_b32, def_lo, Operand::c32(16u), hi);
1396
hi = Operand(def_lo.physReg(), v2b);
1397
}
1398
/* perform the alignbyte */
1399
bld.vop3(aco_opcode::v_alignbyte_b32, def, hi, lo, Operand::c32(2u));
1400
}
1401
1402
void
1403
try_coalesce_copies(lower_context* ctx, std::map<PhysReg, copy_operation>& copy_map,
1404
copy_operation& copy)
1405
{
1406
// TODO try more relaxed alignment for subdword copies
1407
unsigned next_def_align = util_next_power_of_two(copy.bytes + 1);
1408
unsigned next_op_align = next_def_align;
1409
if (copy.def.regClass().type() == RegType::vgpr)
1410
next_def_align = MIN2(next_def_align, 4);
1411
if (copy.op.regClass().type() == RegType::vgpr)
1412
next_op_align = MIN2(next_op_align, 4);
1413
1414
if (copy.bytes >= 8 || copy.def.physReg().reg_b % next_def_align ||
1415
(!copy.op.isConstant() && copy.op.physReg().reg_b % next_op_align))
1416
return;
1417
1418
auto other = copy_map.find(copy.def.physReg().advance(copy.bytes));
1419
if (other == copy_map.end() || copy.bytes + other->second.bytes > 8 ||
1420
copy.op.isConstant() != other->second.op.isConstant())
1421
return;
1422
1423
/* don't create 64-bit copies before GFX10 */
1424
if (copy.bytes >= 4 && copy.def.regClass().type() == RegType::vgpr &&
1425
ctx->program->chip_class < GFX10)
1426
return;
1427
1428
unsigned new_size = copy.bytes + other->second.bytes;
1429
if (copy.op.isConstant()) {
1430
uint64_t val =
1431
copy.op.constantValue64() | (other->second.op.constantValue64() << (copy.bytes * 8u));
1432
if (!Operand::is_constant_representable(val, copy.bytes + other->second.bytes, true,
1433
copy.def.regClass().type() == RegType::vgpr))
1434
return;
1435
copy.op = Operand::get_const(ctx->program->chip_class, val, new_size);
1436
} else {
1437
if (other->second.op.physReg() != copy.op.physReg().advance(copy.bytes))
1438
return;
1439
copy.op = Operand(copy.op.physReg(), RegClass::get(copy.op.regClass().type(), new_size));
1440
}
1441
1442
copy.bytes = new_size;
1443
copy.def = Definition(copy.def.physReg(), RegClass::get(copy.def.regClass().type(), copy.bytes));
1444
copy_map.erase(other);
1445
}
1446
1447
void
1448
handle_operands(std::map<PhysReg, copy_operation>& copy_map, lower_context* ctx,
1449
chip_class chip_class, Pseudo_instruction* pi)
1450
{
1451
Builder bld(ctx->program, &ctx->instructions);
1452
unsigned num_instructions_before = ctx->instructions.size();
1453
aco_ptr<Instruction> mov;
1454
bool writes_scc = false;
1455
1456
/* count the number of uses for each dst reg */
1457
for (auto it = copy_map.begin(); it != copy_map.end();) {
1458
1459
if (it->second.def.physReg() == scc)
1460
writes_scc = true;
1461
1462
assert(!pi->tmp_in_scc || !(it->second.def.physReg() == pi->scratch_sgpr));
1463
1464
/* if src and dst reg are the same, remove operation */
1465
if (it->first == it->second.op.physReg()) {
1466
it = copy_map.erase(it);
1467
continue;
1468
}
1469
1470
/* split large copies */
1471
if (it->second.bytes > 8) {
1472
assert(!it->second.op.isConstant());
1473
assert(!it->second.def.regClass().is_subdword());
1474
RegClass rc = RegClass(it->second.def.regClass().type(), it->second.def.size() - 2);
1475
Definition hi_def = Definition(PhysReg{it->first + 2}, rc);
1476
rc = RegClass(it->second.op.regClass().type(), it->second.op.size() - 2);
1477
Operand hi_op = Operand(PhysReg{it->second.op.physReg() + 2}, rc);
1478
copy_operation copy = {hi_op, hi_def, it->second.bytes - 8};
1479
copy_map[hi_def.physReg()] = copy;
1480
assert(it->second.op.physReg().byte() == 0 && it->second.def.physReg().byte() == 0);
1481
it->second.op = Operand(it->second.op.physReg(),
1482
it->second.op.regClass().type() == RegType::sgpr ? s2 : v2);
1483
it->second.def = Definition(it->second.def.physReg(),
1484
it->second.def.regClass().type() == RegType::sgpr ? s2 : v2);
1485
it->second.bytes = 8;
1486
}
1487
1488
try_coalesce_copies(ctx, copy_map, it->second);
1489
1490
/* check if the definition reg is used by another copy operation */
1491
for (std::pair<const PhysReg, copy_operation>& copy : copy_map) {
1492
if (copy.second.op.isConstant())
1493
continue;
1494
for (uint16_t i = 0; i < it->second.bytes; i++) {
1495
/* distance might underflow */
1496
unsigned distance = it->first.reg_b + i - copy.second.op.physReg().reg_b;
1497
if (distance < copy.second.bytes)
1498
it->second.uses[i] += 1;
1499
}
1500
}
1501
1502
++it;
1503
}
1504
1505
/* first, handle paths in the location transfer graph */
1506
bool preserve_scc = pi->tmp_in_scc && !writes_scc;
1507
bool skip_partial_copies = true;
1508
for (auto it = copy_map.begin();;) {
1509
if (copy_map.empty()) {
1510
ctx->program->statistics[statistic_copies] +=
1511
ctx->instructions.size() - num_instructions_before;
1512
return;
1513
}
1514
if (it == copy_map.end()) {
1515
if (!skip_partial_copies)
1516
break;
1517
skip_partial_copies = false;
1518
it = copy_map.begin();
1519
}
1520
1521
/* check if we can pack one register at once */
1522
if (it->first.byte() == 0 && it->second.bytes == 2) {
1523
PhysReg reg_hi = it->first.advance(2);
1524
std::map<PhysReg, copy_operation>::iterator other = copy_map.find(reg_hi);
1525
if (other != copy_map.end() && other->second.bytes == 2) {
1526
/* check if the target register is otherwise unused */
1527
bool unused_lo = !it->second.is_used || (it->second.is_used == 0x0101 &&
1528
other->second.op.physReg() == it->first);
1529
bool unused_hi = !other->second.is_used ||
1530
(other->second.is_used == 0x0101 && it->second.op.physReg() == reg_hi);
1531
if (unused_lo && unused_hi) {
1532
Operand lo = it->second.op;
1533
Operand hi = other->second.op;
1534
do_pack_2x16(ctx, bld, Definition(it->first, v1), lo, hi);
1535
copy_map.erase(it);
1536
copy_map.erase(other);
1537
1538
for (std::pair<const PhysReg, copy_operation>& other2 : copy_map) {
1539
for (uint16_t i = 0; i < other2.second.bytes; i++) {
1540
/* distance might underflow */
1541
unsigned distance_lo = other2.first.reg_b + i - lo.physReg().reg_b;
1542
unsigned distance_hi = other2.first.reg_b + i - hi.physReg().reg_b;
1543
if (distance_lo < 2 || distance_hi < 2)
1544
other2.second.uses[i] -= 1;
1545
}
1546
}
1547
it = copy_map.begin();
1548
continue;
1549
}
1550
}
1551
}
1552
1553
/* on GFX6/7, we need some small workarounds as there is no
1554
* SDWA instruction to do partial register writes */
1555
if (ctx->program->chip_class < GFX8 && it->second.bytes < 4) {
1556
if (it->first.byte() == 0 && it->second.op.physReg().byte() == 0 && !it->second.is_used &&
1557
pi->opcode == aco_opcode::p_split_vector) {
1558
/* Other operations might overwrite the high bits, so change all users
1559
* of the high bits to the new target where they are still available.
1560
* This mechanism depends on also emitting dead definitions. */
1561
PhysReg reg_hi = it->second.op.physReg().advance(it->second.bytes);
1562
while (reg_hi != PhysReg(it->second.op.physReg().reg() + 1)) {
1563
std::map<PhysReg, copy_operation>::iterator other = copy_map.begin();
1564
for (other = copy_map.begin(); other != copy_map.end(); other++) {
1565
/* on GFX6/7, if the high bits are used as operand, they cannot be a target */
1566
if (other->second.op.physReg() == reg_hi) {
1567
other->second.op.setFixed(it->first.advance(reg_hi.byte()));
1568
break; /* break because an operand can only be used once */
1569
}
1570
}
1571
reg_hi = reg_hi.advance(it->second.bytes);
1572
}
1573
} else if (it->first.byte()) {
1574
assert(pi->opcode == aco_opcode::p_create_vector);
1575
/* on GFX6/7, if we target an upper half where the lower half hasn't yet been handled,
1576
* move to the target operand's high bits. This is save to do as it cannot be an operand
1577
*/
1578
PhysReg lo = PhysReg(it->first.reg());
1579
std::map<PhysReg, copy_operation>::iterator other = copy_map.find(lo);
1580
if (other != copy_map.end()) {
1581
assert(other->second.bytes == it->first.byte());
1582
PhysReg new_reg_hi = other->second.op.physReg().advance(it->first.byte());
1583
it->second.def = Definition(new_reg_hi, it->second.def.regClass());
1584
it->second.is_used = 0;
1585
other->second.bytes += it->second.bytes;
1586
other->second.def.setTemp(Temp(other->second.def.tempId(),
1587
RegClass::get(RegType::vgpr, other->second.bytes)));
1588
other->second.op.setTemp(Temp(other->second.op.tempId(),
1589
RegClass::get(RegType::vgpr, other->second.bytes)));
1590
/* if the new target's high bits are also a target, change uses */
1591
std::map<PhysReg, copy_operation>::iterator target = copy_map.find(new_reg_hi);
1592
if (target != copy_map.end()) {
1593
for (unsigned i = 0; i < it->second.bytes; i++)
1594
target->second.uses[i]++;
1595
}
1596
}
1597
}
1598
}
1599
1600
/* find portions where the target reg is not used as operand for any other copy */
1601
if (it->second.is_used) {
1602
if (it->second.op.isConstant() || skip_partial_copies) {
1603
/* we have to skip constants until is_used=0.
1604
* we also skip partial copies at the beginning to help coalescing */
1605
++it;
1606
continue;
1607
}
1608
1609
unsigned has_zero_use_bytes = 0;
1610
for (unsigned i = 0; i < it->second.bytes; i++)
1611
has_zero_use_bytes |= (it->second.uses[i] == 0) << i;
1612
1613
if (has_zero_use_bytes) {
1614
/* Skipping partial copying and doing a v_swap_b32 and then fixup
1615
* copies is usually beneficial for sub-dword copies, but if doing
1616
* a partial copy allows further copies, it should be done instead. */
1617
bool partial_copy = (has_zero_use_bytes == 0xf) || (has_zero_use_bytes == 0xf0);
1618
for (std::pair<const PhysReg, copy_operation>& copy : copy_map) {
1619
/* on GFX6/7, we can only do copies with full registers */
1620
if (partial_copy || ctx->program->chip_class <= GFX7)
1621
break;
1622
for (uint16_t i = 0; i < copy.second.bytes; i++) {
1623
/* distance might underflow */
1624
unsigned distance = copy.first.reg_b + i - it->second.op.physReg().reg_b;
1625
if (distance < it->second.bytes && copy.second.uses[i] == 1 &&
1626
!it->second.uses[distance])
1627
partial_copy = true;
1628
}
1629
}
1630
1631
if (!partial_copy) {
1632
++it;
1633
continue;
1634
}
1635
} else {
1636
/* full target reg is used: register swapping needed */
1637
++it;
1638
continue;
1639
}
1640
}
1641
1642
bool did_copy = do_copy(ctx, bld, it->second, &preserve_scc, pi->scratch_sgpr);
1643
skip_partial_copies = did_copy;
1644
std::pair<PhysReg, copy_operation> copy = *it;
1645
1646
if (it->second.is_used == 0) {
1647
/* the target reg is not used as operand for any other copy, so we
1648
* copied to all of it */
1649
copy_map.erase(it);
1650
it = copy_map.begin();
1651
} else {
1652
/* we only performed some portions of this copy, so split it to only
1653
* leave the portions that still need to be done */
1654
copy_operation original = it->second; /* the map insertion below can overwrite this */
1655
copy_map.erase(it);
1656
for (unsigned offset = 0; offset < original.bytes;) {
1657
if (original.uses[offset] == 0) {
1658
offset++;
1659
continue;
1660
}
1661
Definition def;
1662
Operand op;
1663
split_copy(ctx, offset, &def, &op, original, false, 8);
1664
1665
copy_operation new_copy = {op, def, def.bytes()};
1666
for (unsigned i = 0; i < new_copy.bytes; i++)
1667
new_copy.uses[i] = original.uses[i + offset];
1668
copy_map[def.physReg()] = new_copy;
1669
1670
offset += def.bytes();
1671
}
1672
1673
it = copy_map.begin();
1674
}
1675
1676
/* Reduce the number of uses of the operand reg by one. Do this after
1677
* splitting the copy or removing it in case the copy writes to it's own
1678
* operand (for example, v[7:8] = v[8:9]) */
1679
if (did_copy && !copy.second.op.isConstant()) {
1680
for (std::pair<const PhysReg, copy_operation>& other : copy_map) {
1681
for (uint16_t i = 0; i < other.second.bytes; i++) {
1682
/* distance might underflow */
1683
unsigned distance = other.first.reg_b + i - copy.second.op.physReg().reg_b;
1684
if (distance < copy.second.bytes && !copy.second.uses[distance])
1685
other.second.uses[i] -= 1;
1686
}
1687
}
1688
}
1689
}
1690
1691
/* all target regs are needed as operand somewhere which means, all entries are part of a cycle */
1692
unsigned largest = 0;
1693
for (const std::pair<const PhysReg, copy_operation>& op : copy_map)
1694
largest = MAX2(largest, op.second.bytes);
1695
1696
while (!copy_map.empty()) {
1697
1698
/* Perform larger swaps first, because larger swaps swaps can make other
1699
* swaps unnecessary. */
1700
auto it = copy_map.begin();
1701
for (auto it2 = copy_map.begin(); it2 != copy_map.end(); ++it2) {
1702
if (it2->second.bytes > it->second.bytes) {
1703
it = it2;
1704
if (it->second.bytes == largest)
1705
break;
1706
}
1707
}
1708
1709
/* should already be done */
1710
assert(!it->second.op.isConstant());
1711
1712
assert(it->second.op.isFixed());
1713
assert(it->second.def.regClass() == it->second.op.regClass());
1714
1715
if (it->first == it->second.op.physReg()) {
1716
copy_map.erase(it);
1717
continue;
1718
}
1719
1720
if (preserve_scc && it->second.def.getTemp().type() == RegType::sgpr)
1721
assert(!(it->second.def.physReg() == pi->scratch_sgpr));
1722
1723
/* to resolve the cycle, we have to swap the src reg with the dst reg */
1724
copy_operation swap = it->second;
1725
1726
/* if this is self-intersecting, we have to split it because
1727
* self-intersecting swaps don't make sense */
1728
PhysReg src = swap.op.physReg(), dst = swap.def.physReg();
1729
if (abs((int)src.reg_b - (int)dst.reg_b) < (int)swap.bytes) {
1730
unsigned offset = abs((int)src.reg_b - (int)dst.reg_b);
1731
RegType type = swap.def.regClass().type();
1732
1733
copy_operation remaining;
1734
src.reg_b += offset;
1735
dst.reg_b += offset;
1736
remaining.bytes = swap.bytes - offset;
1737
memcpy(remaining.uses, swap.uses + offset, remaining.bytes);
1738
remaining.op = Operand(src, RegClass::get(type, remaining.bytes));
1739
remaining.def = Definition(dst, RegClass::get(type, remaining.bytes));
1740
copy_map[dst] = remaining;
1741
1742
memset(swap.uses + offset, 0, swap.bytes - offset);
1743
swap.bytes = offset;
1744
}
1745
1746
/* GFX6-7 can only swap full registers */
1747
if (ctx->program->chip_class <= GFX7)
1748
swap.bytes = align(swap.bytes, 4);
1749
1750
do_swap(ctx, bld, swap, preserve_scc, pi);
1751
1752
/* remove from map */
1753
copy_map.erase(it);
1754
1755
/* change the operand reg of the target's uses and split uses if needed */
1756
uint32_t bytes_left = u_bit_consecutive(0, swap.bytes);
1757
for (auto target = copy_map.begin(); target != copy_map.end(); ++target) {
1758
if (target->second.op.physReg() == swap.def.physReg() &&
1759
swap.bytes == target->second.bytes) {
1760
target->second.op.setFixed(swap.op.physReg());
1761
break;
1762
}
1763
1764
uint32_t imask =
1765
get_intersection_mask(swap.def.physReg().reg_b, swap.bytes,
1766
target->second.op.physReg().reg_b, target->second.bytes);
1767
1768
if (!imask)
1769
continue;
1770
1771
int offset = (int)target->second.op.physReg().reg_b - (int)swap.def.physReg().reg_b;
1772
1773
/* split and update the middle (the portion that reads the swap's
1774
* definition) to read the swap's operand instead */
1775
int target_op_end = target->second.op.physReg().reg_b + target->second.bytes;
1776
int swap_def_end = swap.def.physReg().reg_b + swap.bytes;
1777
int before_bytes = MAX2(-offset, 0);
1778
int after_bytes = MAX2(target_op_end - swap_def_end, 0);
1779
int middle_bytes = target->second.bytes - before_bytes - after_bytes;
1780
1781
if (after_bytes) {
1782
unsigned after_offset = before_bytes + middle_bytes;
1783
assert(after_offset > 0);
1784
copy_operation copy;
1785
copy.bytes = after_bytes;
1786
memcpy(copy.uses, target->second.uses + after_offset, copy.bytes);
1787
RegClass rc = RegClass::get(target->second.op.regClass().type(), after_bytes);
1788
copy.op = Operand(target->second.op.physReg().advance(after_offset), rc);
1789
copy.def = Definition(target->second.def.physReg().advance(after_offset), rc);
1790
copy_map[copy.def.physReg()] = copy;
1791
}
1792
1793
if (middle_bytes) {
1794
copy_operation copy;
1795
copy.bytes = middle_bytes;
1796
memcpy(copy.uses, target->second.uses + before_bytes, copy.bytes);
1797
RegClass rc = RegClass::get(target->second.op.regClass().type(), middle_bytes);
1798
copy.op = Operand(swap.op.physReg().advance(MAX2(offset, 0)), rc);
1799
copy.def = Definition(target->second.def.physReg().advance(before_bytes), rc);
1800
copy_map[copy.def.physReg()] = copy;
1801
}
1802
1803
if (before_bytes) {
1804
copy_operation copy;
1805
target->second.bytes = before_bytes;
1806
RegClass rc = RegClass::get(target->second.op.regClass().type(), before_bytes);
1807
target->second.op = Operand(target->second.op.physReg(), rc);
1808
target->second.def = Definition(target->second.def.physReg(), rc);
1809
memset(target->second.uses + target->second.bytes, 0, 8 - target->second.bytes);
1810
}
1811
1812
/* break early since we know each byte of the swap's definition is used
1813
* at most once */
1814
bytes_left &= ~imask;
1815
if (!bytes_left)
1816
break;
1817
}
1818
}
1819
ctx->program->statistics[statistic_copies] += ctx->instructions.size() - num_instructions_before;
1820
}
1821
1822
void
1823
emit_set_mode(Builder& bld, float_mode new_mode, bool set_round, bool set_denorm)
1824
{
1825
if (bld.program->chip_class >= GFX10) {
1826
if (set_round)
1827
bld.sopp(aco_opcode::s_round_mode, -1, new_mode.round);
1828
if (set_denorm)
1829
bld.sopp(aco_opcode::s_denorm_mode, -1, new_mode.denorm);
1830
} else if (set_round || set_denorm) {
1831
/* "((size - 1) << 11) | register" (MODE is encoded as register 1) */
1832
Instruction* instr =
1833
bld.sopk(aco_opcode::s_setreg_imm32_b32, Operand::c8(new_mode.val), (7 << 11) | 1).instr;
1834
/* has to be a literal */
1835
instr->operands[0].setFixed(PhysReg{255});
1836
}
1837
}
1838
1839
void
1840
emit_set_mode_from_block(Builder& bld, Program& program, Block* block, bool always_set)
1841
{
1842
float_mode config_mode;
1843
config_mode.val = program.config->float_mode;
1844
1845
bool set_round = always_set && block->fp_mode.round != config_mode.round;
1846
bool set_denorm = always_set && block->fp_mode.denorm != config_mode.denorm;
1847
if (block->kind & block_kind_top_level) {
1848
for (unsigned pred : block->linear_preds) {
1849
if (program.blocks[pred].fp_mode.round != block->fp_mode.round)
1850
set_round = true;
1851
if (program.blocks[pred].fp_mode.denorm != block->fp_mode.denorm)
1852
set_denorm = true;
1853
}
1854
}
1855
/* only allow changing modes at top-level blocks so this doesn't break
1856
* the "jump over empty blocks" optimization */
1857
assert((!set_round && !set_denorm) || (block->kind & block_kind_top_level));
1858
emit_set_mode(bld, block->fp_mode, set_round, set_denorm);
1859
}
1860
1861
void
1862
lower_to_hw_instr(Program* program)
1863
{
1864
Block* discard_block = NULL;
1865
1866
for (int block_idx = program->blocks.size() - 1; block_idx >= 0; block_idx--) {
1867
Block* block = &program->blocks[block_idx];
1868
lower_context ctx;
1869
ctx.program = program;
1870
ctx.block = block;
1871
Builder bld(program, &ctx.instructions);
1872
1873
emit_set_mode_from_block(bld, *program, block, (block_idx == 0));
1874
1875
for (size_t instr_idx = 0; instr_idx < block->instructions.size(); instr_idx++) {
1876
aco_ptr<Instruction>& instr = block->instructions[instr_idx];
1877
aco_ptr<Instruction> mov;
1878
if (instr->isPseudo() && instr->opcode != aco_opcode::p_unit_test) {
1879
Pseudo_instruction* pi = &instr->pseudo();
1880
1881
switch (instr->opcode) {
1882
case aco_opcode::p_extract_vector: {
1883
PhysReg reg = instr->operands[0].physReg();
1884
Definition& def = instr->definitions[0];
1885
reg.reg_b += instr->operands[1].constantValue() * def.bytes();
1886
1887
if (reg == def.physReg())
1888
break;
1889
1890
RegClass op_rc = def.regClass().is_subdword()
1891
? def.regClass()
1892
: RegClass(instr->operands[0].getTemp().type(), def.size());
1893
std::map<PhysReg, copy_operation> copy_operations;
1894
copy_operations[def.physReg()] = {Operand(reg, op_rc), def, def.bytes()};
1895
handle_operands(copy_operations, &ctx, program->chip_class, pi);
1896
break;
1897
}
1898
case aco_opcode::p_create_vector: {
1899
std::map<PhysReg, copy_operation> copy_operations;
1900
PhysReg reg = instr->definitions[0].physReg();
1901
1902
for (const Operand& op : instr->operands) {
1903
if (op.isConstant()) {
1904
const Definition def = Definition(
1905
reg, RegClass(instr->definitions[0].getTemp().type(), op.size()));
1906
copy_operations[reg] = {op, def, op.bytes()};
1907
reg.reg_b += op.bytes();
1908
continue;
1909
}
1910
if (op.isUndefined()) {
1911
// TODO: coalesce subdword copies if dst byte is 0
1912
reg.reg_b += op.bytes();
1913
continue;
1914
}
1915
1916
RegClass rc_def =
1917
op.regClass().is_subdword()
1918
? op.regClass()
1919
: RegClass(instr->definitions[0].getTemp().type(), op.size());
1920
const Definition def = Definition(reg, rc_def);
1921
copy_operations[def.physReg()] = {op, def, op.bytes()};
1922
reg.reg_b += op.bytes();
1923
}
1924
handle_operands(copy_operations, &ctx, program->chip_class, pi);
1925
break;
1926
}
1927
case aco_opcode::p_split_vector: {
1928
std::map<PhysReg, copy_operation> copy_operations;
1929
PhysReg reg = instr->operands[0].physReg();
1930
1931
for (const Definition& def : instr->definitions) {
1932
RegClass rc_op = def.regClass().is_subdword()
1933
? def.regClass()
1934
: RegClass(instr->operands[0].getTemp().type(), def.size());
1935
const Operand op = Operand(reg, rc_op);
1936
copy_operations[def.physReg()] = {op, def, def.bytes()};
1937
reg.reg_b += def.bytes();
1938
}
1939
handle_operands(copy_operations, &ctx, program->chip_class, pi);
1940
break;
1941
}
1942
case aco_opcode::p_parallelcopy:
1943
case aco_opcode::p_wqm: {
1944
std::map<PhysReg, copy_operation> copy_operations;
1945
for (unsigned j = 0; j < instr->operands.size(); j++) {
1946
assert(instr->definitions[j].bytes() == instr->operands[j].bytes());
1947
copy_operations[instr->definitions[j].physReg()] = {
1948
instr->operands[j], instr->definitions[j], instr->operands[j].bytes()};
1949
}
1950
handle_operands(copy_operations, &ctx, program->chip_class, pi);
1951
break;
1952
}
1953
case aco_opcode::p_exit_early_if: {
1954
/* don't bother with an early exit near the end of the program */
1955
if ((block->instructions.size() - 1 - instr_idx) <= 4 &&
1956
block->instructions.back()->opcode == aco_opcode::s_endpgm) {
1957
unsigned null_exp_dest =
1958
(ctx.program->stage.hw == HWStage::FS) ? 9 /* NULL */ : V_008DFC_SQ_EXP_POS;
1959
bool ignore_early_exit = true;
1960
1961
for (unsigned k = instr_idx + 1; k < block->instructions.size(); ++k) {
1962
const aco_ptr<Instruction>& instr2 = block->instructions[k];
1963
if (instr2->opcode == aco_opcode::s_endpgm ||
1964
instr2->opcode == aco_opcode::p_logical_end)
1965
continue;
1966
else if (instr2->opcode == aco_opcode::exp &&
1967
instr2->exp().dest == null_exp_dest)
1968
continue;
1969
else if (instr2->opcode == aco_opcode::p_parallelcopy &&
1970
instr2->definitions[0].isFixed() &&
1971
instr2->definitions[0].physReg() == exec)
1972
continue;
1973
1974
ignore_early_exit = false;
1975
}
1976
1977
if (ignore_early_exit)
1978
break;
1979
}
1980
1981
if (!discard_block) {
1982
discard_block = program->create_and_insert_block();
1983
block = &program->blocks[block_idx];
1984
1985
bld.reset(discard_block);
1986
bld.exp(aco_opcode::exp, Operand(v1), Operand(v1), Operand(v1), Operand(v1), 0,
1987
V_008DFC_SQ_EXP_NULL, false, true, true);
1988
bld.sopp(aco_opcode::s_endpgm);
1989
1990
bld.reset(&ctx.instructions);
1991
}
1992
1993
// TODO: exec can be zero here with block_kind_discard
1994
1995
assert(instr->operands[0].physReg() == scc);
1996
bld.sopp(aco_opcode::s_cbranch_scc0, Definition(exec, s2), instr->operands[0],
1997
discard_block->index);
1998
1999
discard_block->linear_preds.push_back(block->index);
2000
block->linear_succs.push_back(discard_block->index);
2001
break;
2002
}
2003
case aco_opcode::p_spill: {
2004
assert(instr->operands[0].regClass() == v1.as_linear());
2005
for (unsigned i = 0; i < instr->operands[2].size(); i++) {
2006
Operand src =
2007
instr->operands[2].isConstant()
2008
? Operand::c32(uint32_t(instr->operands[2].constantValue64() >> (32 * i)))
2009
: Operand(PhysReg{instr->operands[2].physReg() + i}, s1);
2010
bld.writelane(bld.def(v1, instr->operands[0].physReg()), src,
2011
Operand::c32(instr->operands[1].constantValue() + i),
2012
instr->operands[0]);
2013
}
2014
break;
2015
}
2016
case aco_opcode::p_reload: {
2017
assert(instr->operands[0].regClass() == v1.as_linear());
2018
for (unsigned i = 0; i < instr->definitions[0].size(); i++)
2019
bld.readlane(bld.def(s1, PhysReg{instr->definitions[0].physReg() + i}),
2020
instr->operands[0],
2021
Operand::c32(instr->operands[1].constantValue() + i));
2022
break;
2023
}
2024
case aco_opcode::p_as_uniform: {
2025
if (instr->operands[0].isConstant() ||
2026
instr->operands[0].regClass().type() == RegType::sgpr) {
2027
std::map<PhysReg, copy_operation> copy_operations;
2028
copy_operations[instr->definitions[0].physReg()] = {
2029
instr->operands[0], instr->definitions[0], instr->definitions[0].bytes()};
2030
handle_operands(copy_operations, &ctx, program->chip_class, pi);
2031
} else {
2032
assert(instr->operands[0].regClass().type() == RegType::vgpr);
2033
assert(instr->definitions[0].regClass().type() == RegType::sgpr);
2034
assert(instr->operands[0].size() == instr->definitions[0].size());
2035
for (unsigned i = 0; i < instr->definitions[0].size(); i++) {
2036
bld.vop1(aco_opcode::v_readfirstlane_b32,
2037
bld.def(s1, PhysReg{instr->definitions[0].physReg() + i}),
2038
Operand(PhysReg{instr->operands[0].physReg() + i}, v1));
2039
}
2040
}
2041
break;
2042
}
2043
case aco_opcode::p_bpermute: {
2044
if (ctx.program->chip_class <= GFX7)
2045
emit_gfx6_bpermute(program, instr, bld);
2046
else if (ctx.program->chip_class >= GFX10 && ctx.program->wave_size == 64)
2047
emit_gfx10_wave64_bpermute(program, instr, bld);
2048
else
2049
unreachable("Current hardware supports ds_bpermute, don't emit p_bpermute.");
2050
break;
2051
}
2052
case aco_opcode::p_constaddr: {
2053
unsigned id = instr->definitions[0].tempId();
2054
PhysReg reg = instr->definitions[0].physReg();
2055
bld.sop1(aco_opcode::p_constaddr_getpc, instr->definitions[0], Operand::c32(id));
2056
bld.sop2(aco_opcode::p_constaddr_addlo, Definition(reg, s1), bld.def(s1, scc),
2057
Operand(reg, s1), Operand::c32(id));
2058
bld.sop2(aco_opcode::s_addc_u32, Definition(reg.advance(4), s1), bld.def(s1, scc),
2059
Operand(reg.advance(4), s1), Operand::zero(), Operand(scc, s1));
2060
break;
2061
}
2062
case aco_opcode::p_extract: {
2063
assert(instr->operands[1].isConstant());
2064
assert(instr->operands[2].isConstant());
2065
assert(instr->operands[3].isConstant());
2066
if (instr->definitions[0].regClass() == s1)
2067
assert(instr->definitions.size() >= 2 && instr->definitions[1].physReg() == scc);
2068
Definition dst = instr->definitions[0];
2069
Operand op = instr->operands[0];
2070
unsigned bits = instr->operands[2].constantValue();
2071
unsigned index = instr->operands[1].constantValue();
2072
unsigned offset = index * bits;
2073
bool signext = !instr->operands[3].constantEquals(0);
2074
2075
if (dst.regClass() == s1) {
2076
if (offset == (32 - bits)) {
2077
bld.sop2(signext ? aco_opcode::s_ashr_i32 : aco_opcode::s_lshr_b32, dst,
2078
bld.def(s1, scc), op, Operand::c32(offset));
2079
} else if (offset == 0 && signext && (bits == 8 || bits == 16)) {
2080
bld.sop1(bits == 8 ? aco_opcode::s_sext_i32_i8 : aco_opcode::s_sext_i32_i16,
2081
dst, op);
2082
} else {
2083
bld.sop2(signext ? aco_opcode::s_bfe_i32 : aco_opcode::s_bfe_u32, dst,
2084
bld.def(s1, scc), op, Operand::c32((bits << 16) | offset));
2085
}
2086
} else if (dst.regClass() == v1 || ctx.program->chip_class <= GFX7) {
2087
assert(op.physReg().byte() == 0 && dst.physReg().byte() == 0);
2088
if (offset == (32 - bits) && op.regClass() != s1) {
2089
bld.vop2(signext ? aco_opcode::v_ashrrev_i32 : aco_opcode::v_lshrrev_b32, dst,
2090
Operand::c32(offset), op);
2091
} else {
2092
bld.vop3(signext ? aco_opcode::v_bfe_i32 : aco_opcode::v_bfe_u32, dst, op,
2093
Operand::c32(offset), Operand::c32(bits));
2094
}
2095
} else if (dst.regClass() == v2b) {
2096
aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(
2097
aco_opcode::v_mov_b32,
2098
(Format)((uint16_t)Format::VOP1 | (uint16_t)Format::SDWA), 1, 1)};
2099
sdwa->operands[0] = Operand(op.physReg().advance(-op.physReg().byte()),
2100
RegClass::get(op.regClass().type(), 4));
2101
sdwa->definitions[0] = dst;
2102
sdwa->sel[0] = sdwa_ubyte0 + op.physReg().byte() + index;
2103
if (signext)
2104
sdwa->sel[0] |= sdwa_sext;
2105
sdwa->dst_sel = sdwa_uword;
2106
bld.insert(std::move(sdwa));
2107
}
2108
break;
2109
}
2110
case aco_opcode::p_insert: {
2111
assert(instr->operands[1].isConstant());
2112
assert(instr->operands[2].isConstant());
2113
if (instr->definitions[0].regClass() == s1)
2114
assert(instr->definitions.size() >= 2 && instr->definitions[1].physReg() == scc);
2115
Definition dst = instr->definitions[0];
2116
Operand op = instr->operands[0];
2117
unsigned bits = instr->operands[2].constantValue();
2118
unsigned index = instr->operands[1].constantValue();
2119
unsigned offset = index * bits;
2120
2121
if (dst.regClass() == s1) {
2122
if (offset == (32 - bits)) {
2123
bld.sop2(aco_opcode::s_lshl_b32, dst, bld.def(s1, scc), op,
2124
Operand::c32(offset));
2125
} else if (offset == 0) {
2126
bld.sop2(aco_opcode::s_bfe_u32, dst, bld.def(s1, scc), op,
2127
Operand::c32(bits << 16));
2128
} else {
2129
bld.sop2(aco_opcode::s_bfe_u32, dst, bld.def(s1, scc), op,
2130
Operand::c32(bits << 16));
2131
bld.sop2(aco_opcode::s_lshl_b32, dst, bld.def(s1, scc),
2132
Operand(dst.physReg(), s1), Operand::c32(offset));
2133
}
2134
} else if (dst.regClass() == v1 || ctx.program->chip_class <= GFX7) {
2135
if (offset == (dst.bytes() * 8u - bits)) {
2136
bld.vop2(aco_opcode::v_lshlrev_b32, dst, Operand::c32(offset), op);
2137
} else if (offset == 0) {
2138
bld.vop3(aco_opcode::v_bfe_u32, dst, op, Operand::zero(), Operand::c32(bits));
2139
} else if (program->chip_class >= GFX9 ||
2140
(op.regClass() != s1 && program->chip_class >= GFX8)) {
2141
aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(
2142
aco_opcode::v_mov_b32,
2143
(Format)((uint16_t)Format::VOP1 | (uint16_t)Format::SDWA), 1, 1)};
2144
sdwa->operands[0] = op;
2145
sdwa->definitions[0] = dst;
2146
sdwa->sel[0] = sdwa_udword;
2147
sdwa->dst_sel = (bits == 8 ? sdwa_ubyte0 : sdwa_uword0) + (offset / bits);
2148
bld.insert(std::move(sdwa));
2149
} else {
2150
bld.vop3(aco_opcode::v_bfe_u32, dst, op, Operand::zero(), Operand::c32(bits));
2151
bld.vop2(aco_opcode::v_lshlrev_b32, dst, Operand::c32(offset),
2152
Operand(dst.physReg(), v1));
2153
}
2154
} else {
2155
assert(dst.regClass() == v2b);
2156
aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(
2157
aco_opcode::v_mov_b32,
2158
(Format)((uint16_t)Format::VOP1 | (uint16_t)Format::SDWA), 1, 1)};
2159
sdwa->operands[0] = op;
2160
sdwa->definitions[0] =
2161
Definition(dst.physReg().advance(-dst.physReg().byte()), v1);
2162
sdwa->sel[0] = sdwa_uword;
2163
sdwa->dst_sel = sdwa_ubyte0 + dst.physReg().byte() + index;
2164
sdwa->dst_preserve = 1;
2165
bld.insert(std::move(sdwa));
2166
}
2167
break;
2168
}
2169
default: break;
2170
}
2171
} else if (instr->isBranch()) {
2172
Pseudo_branch_instruction* branch = &instr->branch();
2173
uint32_t target = branch->target[0];
2174
2175
/* check if all blocks from current to target are empty */
2176
/* In case there are <= 4 SALU or <= 2 VALU instructions, remove the branch */
2177
bool can_remove = block->index < target;
2178
unsigned num_scalar = 0;
2179
unsigned num_vector = 0;
2180
for (unsigned i = block->index + 1; can_remove && i < branch->target[0]; i++) {
2181
/* uniform branches must not be ignored if they
2182
* are about to jump over actual instructions */
2183
if (!program->blocks[i].instructions.empty() &&
2184
(branch->opcode != aco_opcode::p_cbranch_z ||
2185
branch->operands[0].physReg() != exec)) {
2186
can_remove = false;
2187
break;
2188
}
2189
2190
for (aco_ptr<Instruction>& inst : program->blocks[i].instructions) {
2191
if (inst->isSOPP()) {
2192
can_remove = false;
2193
} else if (inst->isSALU()) {
2194
num_scalar++;
2195
} else if (inst->isVALU()) {
2196
num_vector++;
2197
} else {
2198
can_remove = false;
2199
}
2200
2201
if (num_scalar + num_vector * 2 > 4)
2202
can_remove = false;
2203
2204
if (!can_remove)
2205
break;
2206
}
2207
}
2208
2209
if (can_remove)
2210
continue;
2211
2212
switch (instr->opcode) {
2213
case aco_opcode::p_branch:
2214
assert(block->linear_succs[0] == target);
2215
bld.sopp(aco_opcode::s_branch, branch->definitions[0], target);
2216
break;
2217
case aco_opcode::p_cbranch_nz:
2218
assert(block->linear_succs[1] == target);
2219
if (branch->operands[0].physReg() == exec)
2220
bld.sopp(aco_opcode::s_cbranch_execnz, branch->definitions[0], target);
2221
else if (branch->operands[0].physReg() == vcc)
2222
bld.sopp(aco_opcode::s_cbranch_vccnz, branch->definitions[0], target);
2223
else {
2224
assert(branch->operands[0].physReg() == scc);
2225
bld.sopp(aco_opcode::s_cbranch_scc1, branch->definitions[0], target);
2226
}
2227
break;
2228
case aco_opcode::p_cbranch_z:
2229
assert(block->linear_succs[1] == target);
2230
if (branch->operands[0].physReg() == exec)
2231
bld.sopp(aco_opcode::s_cbranch_execz, branch->definitions[0], target);
2232
else if (branch->operands[0].physReg() == vcc)
2233
bld.sopp(aco_opcode::s_cbranch_vccz, branch->definitions[0], target);
2234
else {
2235
assert(branch->operands[0].physReg() == scc);
2236
bld.sopp(aco_opcode::s_cbranch_scc0, branch->definitions[0], target);
2237
}
2238
break;
2239
default: unreachable("Unknown Pseudo branch instruction!");
2240
}
2241
2242
} else if (instr->isReduction()) {
2243
Pseudo_reduction_instruction& reduce = instr->reduction();
2244
emit_reduction(&ctx, reduce.opcode, reduce.reduce_op, reduce.cluster_size,
2245
reduce.operands[1].physReg(), // tmp
2246
reduce.definitions[1].physReg(), // stmp
2247
reduce.operands[2].physReg(), // vtmp
2248
reduce.definitions[2].physReg(), // sitmp
2249
reduce.operands[0], reduce.definitions[0]);
2250
} else if (instr->isBarrier()) {
2251
Pseudo_barrier_instruction& barrier = instr->barrier();
2252
2253
/* Anything larger than a workgroup isn't possible. Anything
2254
* smaller requires no instructions and this pseudo instruction
2255
* would only be included to control optimizations. */
2256
bool emit_s_barrier = barrier.exec_scope == scope_workgroup &&
2257
program->workgroup_size > program->wave_size;
2258
2259
bld.insert(std::move(instr));
2260
if (emit_s_barrier)
2261
bld.sopp(aco_opcode::s_barrier);
2262
} else if (instr->opcode == aco_opcode::p_cvt_f16_f32_rtne) {
2263
float_mode new_mode = block->fp_mode;
2264
new_mode.round16_64 = fp_round_ne;
2265
bool set_round = new_mode.round != block->fp_mode.round;
2266
2267
emit_set_mode(bld, new_mode, set_round, false);
2268
2269
instr->opcode = aco_opcode::v_cvt_f16_f32;
2270
ctx.instructions.emplace_back(std::move(instr));
2271
2272
emit_set_mode(bld, block->fp_mode, set_round, false);
2273
} else {
2274
ctx.instructions.emplace_back(std::move(instr));
2275
}
2276
}
2277
block->instructions.swap(ctx.instructions);
2278
}
2279
}
2280
2281
} // namespace aco
2282
2283