Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/freedreno/ir3/ir3_nir_lower_io_offsets.c
4565 views
1
/*
2
* Copyright © 2018-2019 Igalia S.L.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include "compiler/nir/nir_builder.h"
25
#include "ir3_nir.h"
26
27
/**
28
* This pass moves to NIR certain offset computations for different I/O
29
* ops that are currently implemented on the IR3 backend compiler, to
30
* give NIR a chance to optimize them:
31
*
32
* - Dword-offset for SSBO load, store and atomics: A new, similar intrinsic
33
* is emitted that replaces the original one, adding a new source that
34
* holds the result of the original byte-offset source divided by 4.
35
*/
36
37
/* Returns the ir3-specific intrinsic opcode corresponding to an SSBO
38
* instruction that is handled by this pass. It also conveniently returns
39
* the offset source index in @offset_src_idx.
40
*
41
* If @intrinsic is not SSBO, or it is not handled by the pass, -1 is
42
* returned.
43
*/
44
static int
45
get_ir3_intrinsic_for_ssbo_intrinsic(unsigned intrinsic,
46
uint8_t *offset_src_idx)
47
{
48
debug_assert(offset_src_idx);
49
50
*offset_src_idx = 1;
51
52
switch (intrinsic) {
53
case nir_intrinsic_store_ssbo:
54
*offset_src_idx = 2;
55
return nir_intrinsic_store_ssbo_ir3;
56
case nir_intrinsic_load_ssbo:
57
return nir_intrinsic_load_ssbo_ir3;
58
case nir_intrinsic_ssbo_atomic_add:
59
return nir_intrinsic_ssbo_atomic_add_ir3;
60
case nir_intrinsic_ssbo_atomic_imin:
61
return nir_intrinsic_ssbo_atomic_imin_ir3;
62
case nir_intrinsic_ssbo_atomic_umin:
63
return nir_intrinsic_ssbo_atomic_umin_ir3;
64
case nir_intrinsic_ssbo_atomic_imax:
65
return nir_intrinsic_ssbo_atomic_imax_ir3;
66
case nir_intrinsic_ssbo_atomic_umax:
67
return nir_intrinsic_ssbo_atomic_umax_ir3;
68
case nir_intrinsic_ssbo_atomic_and:
69
return nir_intrinsic_ssbo_atomic_and_ir3;
70
case nir_intrinsic_ssbo_atomic_or:
71
return nir_intrinsic_ssbo_atomic_or_ir3;
72
case nir_intrinsic_ssbo_atomic_xor:
73
return nir_intrinsic_ssbo_atomic_xor_ir3;
74
case nir_intrinsic_ssbo_atomic_exchange:
75
return nir_intrinsic_ssbo_atomic_exchange_ir3;
76
case nir_intrinsic_ssbo_atomic_comp_swap:
77
return nir_intrinsic_ssbo_atomic_comp_swap_ir3;
78
default:
79
break;
80
}
81
82
return -1;
83
}
84
85
static nir_ssa_def *
86
check_and_propagate_bit_shift32(nir_builder *b, nir_alu_instr *alu_instr,
87
int32_t direction, int32_t shift)
88
{
89
debug_assert(alu_instr->src[1].src.is_ssa);
90
nir_ssa_def *shift_ssa = alu_instr->src[1].src.ssa;
91
92
/* Only propagate if the shift is a const value so we can check value range
93
* statically.
94
*/
95
nir_const_value *const_val = nir_src_as_const_value(alu_instr->src[1].src);
96
if (!const_val)
97
return NULL;
98
99
int32_t current_shift = const_val[0].i32 * direction;
100
int32_t new_shift = current_shift + shift;
101
102
/* If the merge would reverse the direction, bail out.
103
* e.g, 'x << 2' then 'x >> 4' is not 'x >> 2'.
104
*/
105
if (current_shift * new_shift < 0)
106
return NULL;
107
108
/* If the propagation would overflow an int32_t, bail out too to be on the
109
* safe side.
110
*/
111
if (new_shift < -31 || new_shift > 31)
112
return NULL;
113
114
/* Add or substract shift depending on the final direction (SHR vs. SHL). */
115
if (shift * direction < 0)
116
shift_ssa = nir_isub(b, shift_ssa, nir_imm_int(b, abs(shift)));
117
else
118
shift_ssa = nir_iadd(b, shift_ssa, nir_imm_int(b, abs(shift)));
119
120
return shift_ssa;
121
}
122
123
nir_ssa_def *
124
ir3_nir_try_propagate_bit_shift(nir_builder *b, nir_ssa_def *offset,
125
int32_t shift)
126
{
127
nir_instr *offset_instr = offset->parent_instr;
128
if (offset_instr->type != nir_instr_type_alu)
129
return NULL;
130
131
nir_alu_instr *alu = nir_instr_as_alu(offset_instr);
132
nir_ssa_def *shift_ssa;
133
nir_ssa_def *new_offset = NULL;
134
135
/* the first src could be something like ssa_18.x, but we only want
136
* the single component. Otherwise the ishl/ishr/ushr could turn
137
* into a vec4 operation:
138
*/
139
nir_ssa_def *src0 = nir_mov_alu(b, alu->src[0], 1);
140
141
switch (alu->op) {
142
case nir_op_ishl:
143
shift_ssa = check_and_propagate_bit_shift32(b, alu, 1, shift);
144
if (shift_ssa)
145
new_offset = nir_ishl(b, src0, shift_ssa);
146
break;
147
case nir_op_ishr:
148
shift_ssa = check_and_propagate_bit_shift32(b, alu, -1, shift);
149
if (shift_ssa)
150
new_offset = nir_ishr(b, src0, shift_ssa);
151
break;
152
case nir_op_ushr:
153
shift_ssa = check_and_propagate_bit_shift32(b, alu, -1, shift);
154
if (shift_ssa)
155
new_offset = nir_ushr(b, src0, shift_ssa);
156
break;
157
default:
158
return NULL;
159
}
160
161
return new_offset;
162
}
163
164
static bool
165
lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
166
unsigned ir3_ssbo_opcode, uint8_t offset_src_idx)
167
{
168
unsigned num_srcs = nir_intrinsic_infos[intrinsic->intrinsic].num_srcs;
169
int shift = 2;
170
171
bool has_dest = nir_intrinsic_infos[intrinsic->intrinsic].has_dest;
172
nir_ssa_def *new_dest = NULL;
173
174
/* for 16-bit ssbo access, offset is in 16-bit words instead of dwords */
175
if ((has_dest && intrinsic->dest.ssa.bit_size == 16) ||
176
(!has_dest && intrinsic->src[0].ssa->bit_size == 16))
177
shift = 1;
178
179
/* Here we create a new intrinsic and copy over all contents from the old
180
* one. */
181
182
nir_intrinsic_instr *new_intrinsic;
183
nir_src *target_src;
184
185
b->cursor = nir_before_instr(&intrinsic->instr);
186
187
/* 'offset_src_idx' holds the index of the source that represent the offset. */
188
new_intrinsic = nir_intrinsic_instr_create(b->shader, ir3_ssbo_opcode);
189
190
debug_assert(intrinsic->src[offset_src_idx].is_ssa);
191
nir_ssa_def *offset = intrinsic->src[offset_src_idx].ssa;
192
193
/* Since we don't have value range checking, we first try to propagate
194
* the division by 4 ('offset >> 2') into another bit-shift instruction that
195
* possibly defines the offset. If that's the case, we emit a similar
196
* instructions adjusting (merging) the shift value.
197
*
198
* Here we use the convention that shifting right is negative while shifting
199
* left is positive. So 'x / 4' ~ 'x >> 2' or 'x << -2'.
200
*/
201
nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, offset, -shift);
202
203
/* The new source that will hold the dword-offset is always the last
204
* one for every intrinsic.
205
*/
206
target_src = &new_intrinsic->src[num_srcs];
207
*target_src = nir_src_for_ssa(offset);
208
209
if (has_dest) {
210
debug_assert(intrinsic->dest.is_ssa);
211
nir_ssa_def *dest = &intrinsic->dest.ssa;
212
nir_ssa_dest_init(&new_intrinsic->instr, &new_intrinsic->dest,
213
dest->num_components, dest->bit_size, NULL);
214
new_dest = &new_intrinsic->dest.ssa;
215
}
216
217
for (unsigned i = 0; i < num_srcs; i++)
218
new_intrinsic->src[i] = nir_src_for_ssa(intrinsic->src[i].ssa);
219
220
nir_intrinsic_copy_const_indices(new_intrinsic, intrinsic);
221
222
new_intrinsic->num_components = intrinsic->num_components;
223
224
/* If we managed to propagate the division by 4, just use the new offset
225
* register and don't emit the SHR.
226
*/
227
if (new_offset)
228
offset = new_offset;
229
else
230
offset = nir_ushr(b, offset, nir_imm_int(b, shift));
231
232
/* Insert the new intrinsic right before the old one. */
233
nir_builder_instr_insert(b, &new_intrinsic->instr);
234
235
/* Replace the last source of the new intrinsic by the result of
236
* the offset divided by 4.
237
*/
238
nir_instr_rewrite_src(&new_intrinsic->instr, target_src,
239
nir_src_for_ssa(offset));
240
241
if (has_dest) {
242
/* Replace the uses of the original destination by that
243
* of the new intrinsic.
244
*/
245
nir_ssa_def_rewrite_uses(&intrinsic->dest.ssa, new_dest);
246
}
247
248
/* Finally remove the original intrinsic. */
249
nir_instr_remove(&intrinsic->instr);
250
251
return true;
252
}
253
254
static bool
255
lower_io_offsets_block(nir_block *block, nir_builder *b, void *mem_ctx,
256
int gpu_id)
257
{
258
bool progress = false;
259
260
nir_foreach_instr_safe (instr, block) {
261
if (instr->type != nir_instr_type_intrinsic)
262
continue;
263
264
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
265
266
/* SSBO */
267
int ir3_intrinsic;
268
uint8_t offset_src_idx;
269
ir3_intrinsic =
270
get_ir3_intrinsic_for_ssbo_intrinsic(intr->intrinsic, &offset_src_idx);
271
if (ir3_intrinsic != -1) {
272
progress |= lower_offset_for_ssbo(intr, b, (unsigned)ir3_intrinsic,
273
offset_src_idx);
274
}
275
}
276
277
return progress;
278
}
279
280
static bool
281
lower_io_offsets_func(nir_function_impl *impl, int gpu_id)
282
{
283
void *mem_ctx = ralloc_parent(impl);
284
nir_builder b;
285
nir_builder_init(&b, impl);
286
287
bool progress = false;
288
nir_foreach_block_safe (block, impl) {
289
progress |= lower_io_offsets_block(block, &b, mem_ctx, gpu_id);
290
}
291
292
if (progress) {
293
nir_metadata_preserve(impl,
294
nir_metadata_block_index | nir_metadata_dominance);
295
}
296
297
return progress;
298
}
299
300
bool
301
ir3_nir_lower_io_offsets(nir_shader *shader, int gpu_id)
302
{
303
bool progress = false;
304
305
nir_foreach_function (function, shader) {
306
if (function->impl)
307
progress |= lower_io_offsets_func(function->impl, gpu_id);
308
}
309
310
return progress;
311
}
312
313