Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/compiler/nir/nir_lower_alu_to_scalar.c
4545 views
1
/*
2
* Copyright © 2014-2015 Broadcom
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include "nir.h"
25
#include "nir_builder.h"
26
27
struct alu_to_scalar_data {
28
nir_instr_filter_cb cb;
29
const void *data;
30
};
31
32
/** @file nir_lower_alu_to_scalar.c
33
*
34
* Replaces nir_alu_instr operations with more than one channel used in the
35
* arguments with individual per-channel operations.
36
*/
37
38
static bool
39
inst_is_vector_alu(const nir_instr *instr, const void *_state)
40
{
41
if (instr->type != nir_instr_type_alu)
42
return false;
43
44
nir_alu_instr *alu = nir_instr_as_alu(instr);
45
46
/* There is no ALU instruction which has a scalar destination, scalar
47
* src[0], and some other vector source.
48
*/
49
assert(alu->dest.dest.is_ssa);
50
assert(alu->src[0].src.is_ssa);
51
return alu->dest.dest.ssa.num_components > 1 ||
52
nir_op_infos[alu->op].input_sizes[0] > 1;
53
}
54
55
static void
56
nir_alu_ssa_dest_init(nir_alu_instr *alu, unsigned num_components,
57
unsigned bit_size)
58
{
59
nir_ssa_dest_init(&alu->instr, &alu->dest.dest, num_components,
60
bit_size, NULL);
61
alu->dest.write_mask = (1 << num_components) - 1;
62
}
63
64
static nir_ssa_def *
65
lower_reduction(nir_alu_instr *alu, nir_op chan_op, nir_op merge_op,
66
nir_builder *builder)
67
{
68
unsigned num_components = nir_op_infos[alu->op].input_sizes[0];
69
70
nir_ssa_def *last = NULL;
71
for (int i = num_components - 1; i >= 0; i--) {
72
nir_alu_instr *chan = nir_alu_instr_create(builder->shader, chan_op);
73
nir_alu_ssa_dest_init(chan, 1, alu->dest.dest.ssa.bit_size);
74
nir_alu_src_copy(&chan->src[0], &alu->src[0], chan);
75
chan->src[0].swizzle[0] = chan->src[0].swizzle[i];
76
if (nir_op_infos[chan_op].num_inputs > 1) {
77
assert(nir_op_infos[chan_op].num_inputs == 2);
78
nir_alu_src_copy(&chan->src[1], &alu->src[1], chan);
79
chan->src[1].swizzle[0] = chan->src[1].swizzle[i];
80
}
81
chan->exact = alu->exact;
82
83
nir_builder_instr_insert(builder, &chan->instr);
84
85
if (i == num_components - 1) {
86
last = &chan->dest.dest.ssa;
87
} else {
88
last = nir_build_alu(builder, merge_op,
89
last, &chan->dest.dest.ssa, NULL, NULL);
90
}
91
}
92
93
return last;
94
}
95
96
static nir_ssa_def *
97
lower_alu_instr_scalar(nir_builder *b, nir_instr *instr, void *_data)
98
{
99
struct alu_to_scalar_data *data = _data;
100
nir_alu_instr *alu = nir_instr_as_alu(instr);
101
unsigned num_src = nir_op_infos[alu->op].num_inputs;
102
unsigned i, chan;
103
104
assert(alu->dest.dest.is_ssa);
105
assert(alu->dest.write_mask != 0);
106
107
b->cursor = nir_before_instr(&alu->instr);
108
b->exact = alu->exact;
109
110
if (data->cb && !data->cb(instr, data->data))
111
return NULL;
112
113
#define LOWER_REDUCTION(name, chan, merge) \
114
case name##2: \
115
case name##3: \
116
case name##4: \
117
case name##8: \
118
case name##16: \
119
return lower_reduction(alu, chan, merge, b); \
120
121
switch (alu->op) {
122
case nir_op_vec16:
123
case nir_op_vec8:
124
case nir_op_vec5:
125
case nir_op_vec4:
126
case nir_op_vec3:
127
case nir_op_vec2:
128
case nir_op_cube_face_coord_amd:
129
case nir_op_cube_face_index_amd:
130
/* We don't need to scalarize these ops, they're the ones generated to
131
* group up outputs into a value that can be SSAed.
132
*/
133
return NULL;
134
135
case nir_op_pack_half_2x16: {
136
if (!b->shader->options->lower_pack_half_2x16)
137
return NULL;
138
139
nir_ssa_def *src_vec2 = nir_ssa_for_alu_src(b, alu, 0);
140
return nir_pack_half_2x16_split(b, nir_channel(b, src_vec2, 0),
141
nir_channel(b, src_vec2, 1));
142
}
143
144
case nir_op_unpack_unorm_4x8:
145
case nir_op_unpack_snorm_4x8:
146
case nir_op_unpack_unorm_2x16:
147
case nir_op_unpack_snorm_2x16:
148
/* There is no scalar version of these ops, unless we were to break it
149
* down to bitshifts and math (which is definitely not intended).
150
*/
151
return NULL;
152
153
case nir_op_unpack_half_2x16_flush_to_zero:
154
case nir_op_unpack_half_2x16: {
155
if (!b->shader->options->lower_unpack_half_2x16)
156
return NULL;
157
158
nir_ssa_def *packed = nir_ssa_for_alu_src(b, alu, 0);
159
if (alu->op == nir_op_unpack_half_2x16_flush_to_zero) {
160
return nir_vec2(b,
161
nir_unpack_half_2x16_split_x_flush_to_zero(b,
162
packed),
163
nir_unpack_half_2x16_split_y_flush_to_zero(b,
164
packed));
165
} else {
166
return nir_vec2(b,
167
nir_unpack_half_2x16_split_x(b, packed),
168
nir_unpack_half_2x16_split_y(b, packed));
169
}
170
}
171
172
case nir_op_pack_uvec2_to_uint: {
173
assert(b->shader->options->lower_pack_snorm_2x16 ||
174
b->shader->options->lower_pack_unorm_2x16);
175
176
nir_ssa_def *word = nir_extract_u16(b, nir_ssa_for_alu_src(b, alu, 0),
177
nir_imm_int(b, 0));
178
return nir_ior(b, nir_ishl(b, nir_channel(b, word, 1),
179
nir_imm_int(b, 16)),
180
nir_channel(b, word, 0));
181
}
182
183
case nir_op_pack_uvec4_to_uint: {
184
assert(b->shader->options->lower_pack_snorm_4x8 ||
185
b->shader->options->lower_pack_unorm_4x8);
186
187
nir_ssa_def *byte = nir_extract_u8(b, nir_ssa_for_alu_src(b, alu, 0),
188
nir_imm_int(b, 0));
189
return nir_ior(b, nir_ior(b, nir_ishl(b, nir_channel(b, byte, 3),
190
nir_imm_int(b, 24)),
191
nir_ishl(b, nir_channel(b, byte, 2),
192
nir_imm_int(b, 16))),
193
nir_ior(b, nir_ishl(b, nir_channel(b, byte, 1),
194
nir_imm_int(b, 8)),
195
nir_channel(b, byte, 0)));
196
}
197
198
case nir_op_fdph: {
199
nir_ssa_def *src0_vec = nir_ssa_for_alu_src(b, alu, 0);
200
nir_ssa_def *src1_vec = nir_ssa_for_alu_src(b, alu, 1);
201
202
nir_ssa_def *sum[4];
203
for (unsigned i = 0; i < 3; i++) {
204
sum[i] = nir_fmul(b, nir_channel(b, src0_vec, i),
205
nir_channel(b, src1_vec, i));
206
}
207
sum[3] = nir_channel(b, src1_vec, 3);
208
209
return nir_fadd(b, nir_fadd(b, sum[0], sum[1]),
210
nir_fadd(b, sum[2], sum[3]));
211
}
212
213
case nir_op_pack_64_2x32: {
214
if (!b->shader->options->lower_pack_64_2x32)
215
return NULL;
216
217
nir_ssa_def *src_vec2 = nir_ssa_for_alu_src(b, alu, 0);
218
return nir_pack_64_2x32_split(b, nir_channel(b, src_vec2, 0),
219
nir_channel(b, src_vec2, 1));
220
}
221
case nir_op_pack_64_4x16: {
222
if (!b->shader->options->lower_pack_64_4x16)
223
return NULL;
224
225
nir_ssa_def *src_vec4 = nir_ssa_for_alu_src(b, alu, 0);
226
nir_ssa_def *xy = nir_pack_32_2x16_split(b, nir_channel(b, src_vec4, 0),
227
nir_channel(b, src_vec4, 1));
228
nir_ssa_def *zw = nir_pack_32_2x16_split(b, nir_channel(b, src_vec4, 2),
229
nir_channel(b, src_vec4, 3));
230
231
return nir_pack_64_2x32_split(b, xy, zw);
232
}
233
case nir_op_pack_32_2x16: {
234
if (!b->shader->options->lower_pack_32_2x16)
235
return NULL;
236
237
nir_ssa_def *src_vec2 = nir_ssa_for_alu_src(b, alu, 0);
238
return nir_pack_32_2x16_split(b, nir_channel(b, src_vec2, 0),
239
nir_channel(b, src_vec2, 1));
240
}
241
case nir_op_unpack_64_2x32:
242
case nir_op_unpack_64_4x16:
243
case nir_op_unpack_32_2x16:
244
case nir_op_unpack_double_2x32_dxil:
245
return NULL;
246
247
LOWER_REDUCTION(nir_op_fdot, nir_op_fmul, nir_op_fadd);
248
LOWER_REDUCTION(nir_op_ball_fequal, nir_op_feq, nir_op_iand);
249
LOWER_REDUCTION(nir_op_ball_iequal, nir_op_ieq, nir_op_iand);
250
LOWER_REDUCTION(nir_op_bany_fnequal, nir_op_fneu, nir_op_ior);
251
LOWER_REDUCTION(nir_op_bany_inequal, nir_op_ine, nir_op_ior);
252
LOWER_REDUCTION(nir_op_b8all_fequal, nir_op_feq8, nir_op_iand);
253
LOWER_REDUCTION(nir_op_b8all_iequal, nir_op_ieq8, nir_op_iand);
254
LOWER_REDUCTION(nir_op_b8any_fnequal, nir_op_fneu8, nir_op_ior);
255
LOWER_REDUCTION(nir_op_b8any_inequal, nir_op_ine8, nir_op_ior);
256
LOWER_REDUCTION(nir_op_b16all_fequal, nir_op_feq16, nir_op_iand);
257
LOWER_REDUCTION(nir_op_b16all_iequal, nir_op_ieq16, nir_op_iand);
258
LOWER_REDUCTION(nir_op_b16any_fnequal, nir_op_fneu16, nir_op_ior);
259
LOWER_REDUCTION(nir_op_b16any_inequal, nir_op_ine16, nir_op_ior);
260
LOWER_REDUCTION(nir_op_b32all_fequal, nir_op_feq32, nir_op_iand);
261
LOWER_REDUCTION(nir_op_b32all_iequal, nir_op_ieq32, nir_op_iand);
262
LOWER_REDUCTION(nir_op_b32any_fnequal, nir_op_fneu32, nir_op_ior);
263
LOWER_REDUCTION(nir_op_b32any_inequal, nir_op_ine32, nir_op_ior);
264
LOWER_REDUCTION(nir_op_fall_equal, nir_op_seq, nir_op_fmin);
265
LOWER_REDUCTION(nir_op_fany_nequal, nir_op_sne, nir_op_fmax);
266
267
default:
268
break;
269
}
270
271
if (alu->dest.dest.ssa.num_components == 1)
272
return NULL;
273
274
unsigned num_components = alu->dest.dest.ssa.num_components;
275
nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS] = { NULL };
276
277
for (chan = 0; chan < NIR_MAX_VEC_COMPONENTS; chan++) {
278
if (!(alu->dest.write_mask & (1 << chan)))
279
continue;
280
281
nir_alu_instr *lower = nir_alu_instr_create(b->shader, alu->op);
282
for (i = 0; i < num_src; i++) {
283
/* We only handle same-size-as-dest (input_sizes[] == 0) or scalar
284
* args (input_sizes[] == 1).
285
*/
286
assert(nir_op_infos[alu->op].input_sizes[i] < 2);
287
unsigned src_chan = (nir_op_infos[alu->op].input_sizes[i] == 1 ?
288
0 : chan);
289
290
nir_alu_src_copy(&lower->src[i], &alu->src[i], lower);
291
for (int j = 0; j < NIR_MAX_VEC_COMPONENTS; j++)
292
lower->src[i].swizzle[j] = alu->src[i].swizzle[src_chan];
293
}
294
295
nir_alu_ssa_dest_init(lower, 1, alu->dest.dest.ssa.bit_size);
296
lower->dest.saturate = alu->dest.saturate;
297
comps[chan] = &lower->dest.dest.ssa;
298
lower->exact = alu->exact;
299
300
nir_builder_instr_insert(b, &lower->instr);
301
}
302
303
return nir_vec(b, comps, num_components);
304
}
305
306
bool
307
nir_lower_alu_to_scalar(nir_shader *shader, nir_instr_filter_cb cb, const void *_data)
308
{
309
struct alu_to_scalar_data data = {
310
.cb = cb,
311
.data = _data,
312
};
313
314
return nir_shader_lower_instructions(shader,
315
inst_is_vector_alu,
316
lower_alu_instr_scalar,
317
&data);
318
}
319
320