Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/compiler/nir/nir_lower_bit_size.c
4546 views
1
/*
2
* Copyright © 2018 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include "nir_builder.h"
25
26
/**
27
* Some ALU operations may not be supported in hardware in specific bit-sizes.
28
* This pass allows implementations to selectively lower such operations to
29
* a bit-size that is supported natively and then converts the result back to
30
* the original bit-size.
31
*/
32
33
static nir_ssa_def *convert_to_bit_size(nir_builder *bld, nir_ssa_def *src,
34
nir_alu_type type, unsigned bit_size)
35
{
36
/* create b2i32(a) instead of i2i32(b2i8(a))/i2i32(b2i16(a)) */
37
nir_alu_instr *alu = nir_src_as_alu_instr(nir_src_for_ssa(src));
38
if ((type & (nir_type_uint | nir_type_int)) && bit_size == 32 &&
39
alu && (alu->op == nir_op_b2i8 || alu->op == nir_op_b2i16)) {
40
nir_alu_instr *instr = nir_alu_instr_create(bld->shader, nir_op_b2i32);
41
nir_alu_src_copy(&instr->src[0], &alu->src[0], instr);
42
return nir_builder_alu_instr_finish_and_insert(bld, instr);
43
}
44
45
return nir_convert_to_bit_size(bld, src, type, bit_size);
46
}
47
48
static void
49
lower_alu_instr(nir_builder *bld, nir_alu_instr *alu, unsigned bit_size)
50
{
51
const nir_op op = alu->op;
52
unsigned dst_bit_size = alu->dest.dest.ssa.bit_size;
53
54
bld->cursor = nir_before_instr(&alu->instr);
55
56
/* Convert each source to the requested bit-size */
57
nir_ssa_def *srcs[NIR_MAX_VEC_COMPONENTS] = { NULL };
58
for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++) {
59
nir_ssa_def *src = nir_ssa_for_alu_src(bld, alu, i);
60
61
nir_alu_type type = nir_op_infos[op].input_types[i];
62
if (nir_alu_type_get_type_size(type) == 0)
63
src = convert_to_bit_size(bld, src, type, bit_size);
64
65
if (i == 1 && (op == nir_op_ishl || op == nir_op_ishr || op == nir_op_ushr)) {
66
assert(util_is_power_of_two_nonzero(dst_bit_size));
67
src = nir_iand(bld, src, nir_imm_int(bld, dst_bit_size - 1));
68
}
69
70
srcs[i] = src;
71
}
72
73
/* Emit the lowered ALU instruction */
74
nir_ssa_def *lowered_dst = NULL;
75
if (op == nir_op_imul_high || op == nir_op_umul_high) {
76
assert(dst_bit_size * 2 <= bit_size);
77
nir_ssa_def *lowered_dst = nir_imul(bld, srcs[0], srcs[1]);
78
if (nir_op_infos[op].output_type & nir_type_uint)
79
lowered_dst = nir_ushr_imm(bld, lowered_dst, dst_bit_size);
80
else
81
lowered_dst = nir_ishr_imm(bld, lowered_dst, dst_bit_size);
82
} else {
83
lowered_dst = nir_build_alu_src_arr(bld, op, srcs);
84
}
85
86
87
/* Convert result back to the original bit-size */
88
if (nir_alu_type_get_type_size(nir_op_infos[op].output_type) == 0 &&
89
dst_bit_size != bit_size) {
90
nir_alu_type type = nir_op_infos[op].output_type;
91
nir_ssa_def *dst = nir_convert_to_bit_size(bld, lowered_dst, type, dst_bit_size);
92
nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, dst);
93
} else {
94
nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, lowered_dst);
95
}
96
}
97
98
static void
99
lower_intrinsic_instr(nir_builder *b, nir_intrinsic_instr *intrin,
100
unsigned bit_size)
101
{
102
switch (intrin->intrinsic) {
103
case nir_intrinsic_read_invocation:
104
case nir_intrinsic_read_first_invocation:
105
case nir_intrinsic_vote_feq:
106
case nir_intrinsic_vote_ieq:
107
case nir_intrinsic_shuffle:
108
case nir_intrinsic_shuffle_xor:
109
case nir_intrinsic_shuffle_up:
110
case nir_intrinsic_shuffle_down:
111
case nir_intrinsic_quad_broadcast:
112
case nir_intrinsic_quad_swap_horizontal:
113
case nir_intrinsic_quad_swap_vertical:
114
case nir_intrinsic_quad_swap_diagonal:
115
case nir_intrinsic_reduce:
116
case nir_intrinsic_inclusive_scan:
117
case nir_intrinsic_exclusive_scan: {
118
assert(intrin->src[0].is_ssa && intrin->dest.is_ssa);
119
const unsigned old_bit_size = intrin->dest.ssa.bit_size;
120
assert(old_bit_size < bit_size);
121
122
nir_alu_type type = nir_type_uint;
123
if (nir_intrinsic_has_reduction_op(intrin))
124
type = nir_op_infos[nir_intrinsic_reduction_op(intrin)].input_types[0];
125
else if (intrin->intrinsic == nir_intrinsic_vote_feq)
126
type = nir_type_float;
127
128
b->cursor = nir_before_instr(&intrin->instr);
129
nir_intrinsic_instr *new_intrin =
130
nir_instr_as_intrinsic(nir_instr_clone(b->shader, &intrin->instr));
131
132
nir_ssa_def *new_src = nir_convert_to_bit_size(b, intrin->src[0].ssa,
133
type, bit_size);
134
new_intrin->src[0] = nir_src_for_ssa(new_src);
135
136
if (intrin->intrinsic == nir_intrinsic_vote_feq ||
137
intrin->intrinsic == nir_intrinsic_vote_ieq) {
138
/* These return a Boolean; it's always 1-bit */
139
assert(new_intrin->dest.ssa.bit_size == 1);
140
} else {
141
/* These return the same bit size as the source; we need to adjust
142
* the size and then we'll have to emit a down-cast.
143
*/
144
assert(intrin->src[0].ssa->bit_size == intrin->dest.ssa.bit_size);
145
new_intrin->dest.ssa.bit_size = bit_size;
146
}
147
148
nir_builder_instr_insert(b, &new_intrin->instr);
149
150
nir_ssa_def *res = &new_intrin->dest.ssa;
151
if (intrin->intrinsic == nir_intrinsic_exclusive_scan) {
152
/* For exclusive scan, we have to be careful because the identity
153
* value for the higher bit size may get added into the mix by
154
* disabled channels. For some cases (imin/imax in particular),
155
* this value won't convert to the right identity value when we
156
* down-cast so we have to clamp it.
157
*/
158
switch (nir_intrinsic_reduction_op(intrin)) {
159
case nir_op_imin: {
160
int64_t int_max = (1ull << (old_bit_size - 1)) - 1;
161
res = nir_imin(b, res, nir_imm_intN_t(b, int_max, bit_size));
162
break;
163
}
164
case nir_op_imax: {
165
int64_t int_min = -(int64_t)(1ull << (old_bit_size - 1));
166
res = nir_imax(b, res, nir_imm_intN_t(b, int_min, bit_size));
167
break;
168
}
169
default:
170
break;
171
}
172
}
173
174
if (intrin->intrinsic != nir_intrinsic_vote_feq &&
175
intrin->intrinsic != nir_intrinsic_vote_ieq)
176
res = nir_u2u(b, res, old_bit_size);
177
178
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, res);
179
break;
180
}
181
182
default:
183
unreachable("Unsupported instruction");
184
}
185
}
186
187
static void
188
lower_phi_instr(nir_builder *b, nir_phi_instr *phi, unsigned bit_size,
189
nir_phi_instr *last_phi)
190
{
191
assert(phi->dest.is_ssa);
192
unsigned old_bit_size = phi->dest.ssa.bit_size;
193
assert(old_bit_size < bit_size);
194
195
nir_foreach_phi_src(src, phi) {
196
b->cursor = nir_after_block_before_jump(src->pred);
197
assert(src->src.is_ssa);
198
nir_ssa_def *new_src = nir_u2u(b, src->src.ssa, bit_size);
199
200
nir_instr_rewrite_src(&phi->instr, &src->src, nir_src_for_ssa(new_src));
201
}
202
203
phi->dest.ssa.bit_size = bit_size;
204
205
b->cursor = nir_after_instr(&last_phi->instr);
206
207
nir_ssa_def *new_dest = nir_u2u(b, &phi->dest.ssa, old_bit_size);
208
nir_ssa_def_rewrite_uses_after(&phi->dest.ssa, new_dest,
209
new_dest->parent_instr);
210
}
211
212
static bool
213
lower_impl(nir_function_impl *impl,
214
nir_lower_bit_size_callback callback,
215
void *callback_data)
216
{
217
nir_builder b;
218
nir_builder_init(&b, impl);
219
bool progress = false;
220
221
nir_foreach_block(block, impl) {
222
/* Stash this so we can rewrite phi destinations quickly. */
223
nir_phi_instr *last_phi = nir_block_last_phi_instr(block);
224
225
nir_foreach_instr_safe(instr, block) {
226
unsigned lower_bit_size = callback(instr, callback_data);
227
if (lower_bit_size == 0)
228
continue;
229
230
switch (instr->type) {
231
case nir_instr_type_alu:
232
lower_alu_instr(&b, nir_instr_as_alu(instr), lower_bit_size);
233
break;
234
235
case nir_instr_type_intrinsic:
236
lower_intrinsic_instr(&b, nir_instr_as_intrinsic(instr),
237
lower_bit_size);
238
break;
239
240
case nir_instr_type_phi:
241
lower_phi_instr(&b, nir_instr_as_phi(instr),
242
lower_bit_size, last_phi);
243
break;
244
245
default:
246
unreachable("Unsupported instruction type");
247
}
248
progress = true;
249
}
250
}
251
252
if (progress) {
253
nir_metadata_preserve(impl, nir_metadata_block_index |
254
nir_metadata_dominance);
255
} else {
256
nir_metadata_preserve(impl, nir_metadata_all);
257
}
258
259
return progress;
260
}
261
262
bool
263
nir_lower_bit_size(nir_shader *shader,
264
nir_lower_bit_size_callback callback,
265
void *callback_data)
266
{
267
bool progress = false;
268
269
nir_foreach_function(function, shader) {
270
if (function->impl)
271
progress |= lower_impl(function->impl, callback, callback_data);
272
}
273
274
return progress;
275
}
276
277
static void
278
split_phi(nir_builder *b, nir_phi_instr *phi)
279
{
280
nir_phi_instr *lowered[2] = {
281
nir_phi_instr_create(b->shader),
282
nir_phi_instr_create(b->shader)
283
};
284
int num_components = phi->dest.ssa.num_components;
285
assert(phi->dest.ssa.bit_size == 64);
286
287
nir_foreach_phi_src(src, phi) {
288
assert(num_components == src->src.ssa->num_components);
289
290
b->cursor = nir_before_src(&src->src, false);
291
292
nir_ssa_def *x = nir_unpack_64_2x32_split_x(b, src->src.ssa);
293
nir_ssa_def *y = nir_unpack_64_2x32_split_y(b, src->src.ssa);
294
295
nir_phi_src *xsrc = rzalloc(lowered[0], nir_phi_src);
296
xsrc->pred = src->pred;
297
xsrc->src = nir_src_for_ssa(x);
298
exec_list_push_tail(&lowered[0]->srcs, &xsrc->node);
299
300
nir_phi_src *ysrc = rzalloc(lowered[1], nir_phi_src);
301
ysrc->pred = src->pred;
302
ysrc->src = nir_src_for_ssa(y);
303
exec_list_push_tail(&lowered[1]->srcs, &ysrc->node);
304
}
305
306
nir_ssa_dest_init(&lowered[0]->instr, &lowered[0]->dest,
307
num_components, 32, NULL);
308
nir_ssa_dest_init(&lowered[1]->instr, &lowered[1]->dest,
309
num_components, 32, NULL);
310
311
b->cursor = nir_before_instr(&phi->instr);
312
nir_builder_instr_insert(b, &lowered[0]->instr);
313
nir_builder_instr_insert(b, &lowered[1]->instr);
314
315
b->cursor = nir_after_phis(nir_cursor_current_block(b->cursor));
316
nir_ssa_def *merged = nir_pack_64_2x32_split(b, &lowered[0]->dest.ssa, &lowered[1]->dest.ssa);
317
nir_ssa_def_rewrite_uses(&phi->dest.ssa, merged);
318
nir_instr_remove(&phi->instr);
319
}
320
321
static bool
322
lower_64bit_phi_impl(nir_function_impl *impl)
323
{
324
nir_builder b;
325
nir_builder_init(&b, impl);
326
bool progress = false;
327
328
nir_foreach_block(block, impl) {
329
nir_foreach_instr_safe(instr, block) {
330
if (instr->type != nir_instr_type_phi)
331
break;
332
333
nir_phi_instr *phi = nir_instr_as_phi(instr);
334
assert(phi->dest.is_ssa);
335
336
if (phi->dest.ssa.bit_size <= 32)
337
continue;
338
339
split_phi(&b, phi);
340
progress = true;
341
}
342
}
343
344
if (progress) {
345
nir_metadata_preserve(impl, nir_metadata_block_index |
346
nir_metadata_dominance);
347
} else {
348
nir_metadata_preserve(impl, nir_metadata_all);
349
}
350
351
return progress;
352
}
353
354
bool
355
nir_lower_64bit_phis(nir_shader *shader)
356
{
357
bool progress = false;
358
359
nir_foreach_function(function, shader) {
360
if (function->impl)
361
progress |= lower_64bit_phi_impl(function->impl);
362
}
363
364
return progress;
365
}
366
367