Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/panfrost/midgard/mir.c
4564 views
1
/*
2
* Copyright (C) 2019 Alyssa Rosenzweig <[email protected]>
3
* Copyright (C) 2019-2020 Collabora, Ltd.
4
*
5
* Permission is hereby granted, free of charge, to any person obtaining a
6
* copy of this software and associated documentation files (the "Software"),
7
* to deal in the Software without restriction, including without limitation
8
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
* and/or sell copies of the Software, and to permit persons to whom the
10
* Software is furnished to do so, subject to the following conditions:
11
*
12
* The above copyright notice and this permission notice (including the next
13
* paragraph) shall be included in all copies or substantial portions of the
14
* Software.
15
*
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
* SOFTWARE.
23
*/
24
25
#include "compiler.h"
26
#include "midgard_ops.h"
27
28
void mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old, unsigned new)
29
{
30
mir_foreach_src(ins, i) {
31
if (ins->src[i] == old)
32
ins->src[i] = new;
33
}
34
}
35
36
void mir_rewrite_index_dst_single(midgard_instruction *ins, unsigned old, unsigned new)
37
{
38
if (ins->dest == old)
39
ins->dest = new;
40
}
41
42
static void
43
mir_rewrite_index_src_single_swizzle(midgard_instruction *ins, unsigned old, unsigned new, unsigned *swizzle)
44
{
45
for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
46
if (ins->src[i] != old) continue;
47
48
ins->src[i] = new;
49
mir_compose_swizzle(ins->swizzle[i], swizzle, ins->swizzle[i]);
50
}
51
}
52
53
void
54
mir_rewrite_index_src(compiler_context *ctx, unsigned old, unsigned new)
55
{
56
mir_foreach_instr_global(ctx, ins) {
57
mir_rewrite_index_src_single(ins, old, new);
58
}
59
}
60
61
void
62
mir_rewrite_index_src_swizzle(compiler_context *ctx, unsigned old, unsigned new, unsigned *swizzle)
63
{
64
mir_foreach_instr_global(ctx, ins) {
65
mir_rewrite_index_src_single_swizzle(ins, old, new, swizzle);
66
}
67
}
68
69
void
70
mir_rewrite_index_dst(compiler_context *ctx, unsigned old, unsigned new)
71
{
72
mir_foreach_instr_global(ctx, ins) {
73
mir_rewrite_index_dst_single(ins, old, new);
74
}
75
76
/* Implicitly written before the shader */
77
if (ctx->blend_input == old)
78
ctx->blend_input = new;
79
80
if (ctx->blend_src1 == old)
81
ctx->blend_src1 = new;
82
}
83
84
void
85
mir_rewrite_index(compiler_context *ctx, unsigned old, unsigned new)
86
{
87
mir_rewrite_index_src(ctx, old, new);
88
mir_rewrite_index_dst(ctx, old, new);
89
}
90
91
unsigned
92
mir_use_count(compiler_context *ctx, unsigned value)
93
{
94
unsigned used_count = 0;
95
96
mir_foreach_instr_global(ctx, ins) {
97
if (mir_has_arg(ins, value))
98
++used_count;
99
}
100
101
if (ctx->blend_input == value)
102
++used_count;
103
104
if (ctx->blend_src1 == value)
105
++used_count;
106
107
return used_count;
108
}
109
110
/* Checks if a value is used only once (or totally dead), which is an important
111
* heuristic to figure out if certain optimizations are Worth It (TM) */
112
113
bool
114
mir_single_use(compiler_context *ctx, unsigned value)
115
{
116
/* We can replicate constants in places so who cares */
117
if (value == SSA_FIXED_REGISTER(REGISTER_CONSTANT))
118
return true;
119
120
return mir_use_count(ctx, value) <= 1;
121
}
122
123
bool
124
mir_nontrivial_mod(midgard_instruction *ins, unsigned i, bool check_swizzle)
125
{
126
bool is_int = midgard_is_integer_op(ins->op);
127
128
if (is_int) {
129
if (ins->src_shift[i]) return true;
130
} else {
131
if (ins->src_neg[i]) return true;
132
if (ins->src_abs[i]) return true;
133
}
134
135
if (ins->dest_type != ins->src_types[i]) return true;
136
137
if (check_swizzle) {
138
for (unsigned c = 0; c < 16; ++c) {
139
if (!(ins->mask & (1 << c))) continue;
140
if (ins->swizzle[i][c] != c) return true;
141
}
142
}
143
144
return false;
145
}
146
147
bool
148
mir_nontrivial_outmod(midgard_instruction *ins)
149
{
150
bool is_int = midgard_is_integer_op(ins->op);
151
unsigned mod = ins->outmod;
152
153
if (ins->dest_type != ins->src_types[1])
154
return true;
155
156
if (is_int)
157
return mod != midgard_outmod_keeplo;
158
else
159
return mod != midgard_outmod_none;
160
}
161
162
/* 128 / sz = exp2(log2(128 / sz))
163
* = exp2(log2(128) - log2(sz))
164
* = exp2(7 - log2(sz))
165
* = 1 << (7 - log2(sz))
166
*/
167
168
static unsigned
169
mir_components_for_bits(unsigned bits)
170
{
171
return 1 << (7 - util_logbase2(bits));
172
}
173
174
unsigned
175
mir_components_for_type(nir_alu_type T)
176
{
177
unsigned sz = nir_alu_type_get_type_size(T);
178
return mir_components_for_bits(sz);
179
}
180
181
uint16_t
182
mir_from_bytemask(uint16_t bytemask, unsigned bits)
183
{
184
unsigned value = 0;
185
unsigned count = bits / 8;
186
187
for (unsigned c = 0, d = 0; c < 16; c += count, ++d) {
188
bool a = (bytemask & (1 << c)) != 0;
189
190
for (unsigned q = c; q < count; ++q)
191
assert(((bytemask & (1 << q)) != 0) == a);
192
193
value |= (a << d);
194
}
195
196
return value;
197
}
198
199
/* Rounds up a bytemask to fill a given component count. Iterate each
200
* component, and check if any bytes in the component are masked on */
201
202
uint16_t
203
mir_round_bytemask_up(uint16_t mask, unsigned bits)
204
{
205
unsigned bytes = bits / 8;
206
unsigned maxmask = mask_of(bytes);
207
unsigned channels = mir_components_for_bits(bits);
208
209
for (unsigned c = 0; c < channels; ++c) {
210
unsigned submask = maxmask << (c * bytes);
211
212
if (mask & submask)
213
mask |= submask;
214
}
215
216
return mask;
217
}
218
219
/* Grabs the per-byte mask of an instruction (as opposed to per-component) */
220
221
uint16_t
222
mir_bytemask(midgard_instruction *ins)
223
{
224
unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);
225
return pan_to_bytemask(type_size, ins->mask);
226
}
227
228
void
229
mir_set_bytemask(midgard_instruction *ins, uint16_t bytemask)
230
{
231
unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);
232
ins->mask = mir_from_bytemask(bytemask, type_size);
233
}
234
235
/* Checks if we should use an upper destination override, rather than the lower
236
* one in the IR. Returns zero if no, returns the bytes to shift otherwise */
237
238
signed
239
mir_upper_override(midgard_instruction *ins, unsigned inst_size)
240
{
241
unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);
242
243
/* If the sizes are the same, there's nothing to override */
244
if (type_size == inst_size)
245
return -1;
246
247
/* There are 16 bytes per vector, so there are (16/bytes)
248
* components per vector. So the magic half is half of
249
* (16/bytes), which simplifies to 8/bytes = 8 / (bits / 8) = 64 / bits
250
* */
251
252
unsigned threshold = mir_components_for_bits(type_size) >> 1;
253
254
/* How many components did we shift over? */
255
unsigned zeroes = __builtin_ctz(ins->mask);
256
257
/* Did we hit the threshold? */
258
return (zeroes >= threshold) ? threshold : 0;
259
}
260
261
/* Creates a mask of the components of a node read by an instruction, by
262
* analyzing the swizzle with respect to the instruction's mask. E.g.:
263
*
264
* fadd r0.xz, r1.yyyy, r2.zwyx
265
*
266
* will return a mask of Z/Y for r2
267
*/
268
269
static uint16_t
270
mir_bytemask_of_read_components_single(unsigned *swizzle, unsigned inmask, unsigned bits)
271
{
272
unsigned cmask = 0;
273
274
for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c) {
275
if (!(inmask & (1 << c))) continue;
276
cmask |= (1 << swizzle[c]);
277
}
278
279
return pan_to_bytemask(bits, cmask);
280
}
281
282
uint16_t
283
mir_bytemask_of_read_components_index(midgard_instruction *ins, unsigned i)
284
{
285
/* Conditional branches read one 32-bit component = 4 bytes (TODO: multi branch??) */
286
if (ins->compact_branch && ins->branch.conditional && (i == 0))
287
return 0xF;
288
289
/* ALU ops act componentwise so we need to pay attention to
290
* their mask. Texture/ldst does not so we don't clamp source
291
* readmasks based on the writemask */
292
unsigned qmask = ~0;
293
294
/* Handle dot products and things */
295
if (ins->type == TAG_ALU_4 && !ins->compact_branch) {
296
unsigned props = alu_opcode_props[ins->op].props;
297
298
unsigned channel_override = GET_CHANNEL_COUNT(props);
299
300
if (channel_override)
301
qmask = mask_of(channel_override);
302
else
303
qmask = ins->mask;
304
}
305
306
return mir_bytemask_of_read_components_single(ins->swizzle[i], qmask,
307
nir_alu_type_get_type_size(ins->src_types[i]));
308
}
309
310
uint16_t
311
mir_bytemask_of_read_components(midgard_instruction *ins, unsigned node)
312
{
313
uint16_t mask = 0;
314
315
if (node == ~0)
316
return 0;
317
318
mir_foreach_src(ins, i) {
319
if (ins->src[i] != node) continue;
320
mask |= mir_bytemask_of_read_components_index(ins, i);
321
}
322
323
return mask;
324
}
325
326
/* Register allocation occurs after instruction scheduling, which is fine until
327
* we start needing to spill registers and therefore insert instructions into
328
* an already-scheduled program. We don't have to be terribly efficient about
329
* this, since spilling is already slow. So just semantically we need to insert
330
* the instruction into a new bundle before/after the bundle of the instruction
331
* in question */
332
333
static midgard_bundle
334
mir_bundle_for_op(compiler_context *ctx, midgard_instruction ins)
335
{
336
midgard_instruction *u = mir_upload_ins(ctx, ins);
337
338
midgard_bundle bundle = {
339
.tag = ins.type,
340
.instruction_count = 1,
341
.instructions = { u },
342
};
343
344
if (bundle.tag == TAG_ALU_4) {
345
assert(OP_IS_MOVE(u->op));
346
u->unit = UNIT_VMUL;
347
348
size_t bytes_emitted = sizeof(uint32_t) + sizeof(midgard_reg_info) + sizeof(midgard_vector_alu);
349
bundle.padding = ~(bytes_emitted - 1) & 0xF;
350
bundle.control = ins.type | u->unit;
351
}
352
353
return bundle;
354
}
355
356
static unsigned
357
mir_bundle_idx_for_ins(midgard_instruction *tag, midgard_block *block)
358
{
359
midgard_bundle *bundles =
360
(midgard_bundle *) block->bundles.data;
361
362
size_t count = (block->bundles.size / sizeof(midgard_bundle));
363
364
for (unsigned i = 0; i < count; ++i) {
365
for (unsigned j = 0; j < bundles[i].instruction_count; ++j) {
366
if (bundles[i].instructions[j] == tag)
367
return i;
368
}
369
}
370
371
mir_print_instruction(tag);
372
unreachable("Instruction not scheduled in block");
373
}
374
375
void
376
mir_insert_instruction_before_scheduled(
377
compiler_context *ctx,
378
midgard_block *block,
379
midgard_instruction *tag,
380
midgard_instruction ins)
381
{
382
unsigned before = mir_bundle_idx_for_ins(tag, block);
383
size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);
384
UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);
385
386
midgard_bundle *bundles = (midgard_bundle *) block->bundles.data;
387
memmove(bundles + before + 1, bundles + before, (count - before) * sizeof(midgard_bundle));
388
midgard_bundle *before_bundle = bundles + before + 1;
389
390
midgard_bundle new = mir_bundle_for_op(ctx, ins);
391
memcpy(bundles + before, &new, sizeof(new));
392
393
list_addtail(&new.instructions[0]->link, &before_bundle->instructions[0]->link);
394
block->quadword_count += midgard_tag_props[new.tag].size;
395
}
396
397
void
398
mir_insert_instruction_after_scheduled(
399
compiler_context *ctx,
400
midgard_block *block,
401
midgard_instruction *tag,
402
midgard_instruction ins)
403
{
404
/* We need to grow the bundles array to add our new bundle */
405
size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);
406
UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);
407
408
/* Find the bundle that we want to insert after */
409
unsigned after = mir_bundle_idx_for_ins(tag, block);
410
411
/* All the bundles after that one, we move ahead by one */
412
midgard_bundle *bundles = (midgard_bundle *) block->bundles.data;
413
memmove(bundles + after + 2, bundles + after + 1, (count - after - 1) * sizeof(midgard_bundle));
414
midgard_bundle *after_bundle = bundles + after;
415
416
midgard_bundle new = mir_bundle_for_op(ctx, ins);
417
memcpy(bundles + after + 1, &new, sizeof(new));
418
list_add(&new.instructions[0]->link, &after_bundle->instructions[after_bundle->instruction_count - 1]->link);
419
block->quadword_count += midgard_tag_props[new.tag].size;
420
}
421
422
/* Flip the first-two arguments of a (binary) op. Currently ALU
423
* only, no known uses for ldst/tex */
424
425
void
426
mir_flip(midgard_instruction *ins)
427
{
428
unsigned temp = ins->src[0];
429
ins->src[0] = ins->src[1];
430
ins->src[1] = temp;
431
432
assert(ins->type == TAG_ALU_4);
433
434
temp = ins->src_types[0];
435
ins->src_types[0] = ins->src_types[1];
436
ins->src_types[1] = temp;
437
438
temp = ins->src_abs[0];
439
ins->src_abs[0] = ins->src_abs[1];
440
ins->src_abs[1] = temp;
441
442
temp = ins->src_neg[0];
443
ins->src_neg[0] = ins->src_neg[1];
444
ins->src_neg[1] = temp;
445
446
temp = ins->src_invert[0];
447
ins->src_invert[0] = ins->src_invert[1];
448
ins->src_invert[1] = temp;
449
450
unsigned temp_swizzle[16];
451
memcpy(temp_swizzle, ins->swizzle[0], sizeof(ins->swizzle[0]));
452
memcpy(ins->swizzle[0], ins->swizzle[1], sizeof(ins->swizzle[0]));
453
memcpy(ins->swizzle[1], temp_swizzle, sizeof(ins->swizzle[0]));
454
}
455
456
/* Before squashing, calculate ctx->temp_count just by observing the MIR */
457
458
void
459
mir_compute_temp_count(compiler_context *ctx)
460
{
461
if (ctx->temp_count)
462
return;
463
464
unsigned max_dest = 0;
465
466
mir_foreach_instr_global(ctx, ins) {
467
if (ins->dest < SSA_FIXED_MINIMUM)
468
max_dest = MAX2(max_dest, ins->dest + 1);
469
}
470
471
ctx->temp_count = max_dest;
472
}
473
474