Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/panfrost/util/pan_lower_framebuffer.c
4560 views
1
/*
2
* Copyright (C) 2020 Collabora, Ltd.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*
23
* Authors (Collabora):
24
* Alyssa Rosenzweig <[email protected]>
25
*/
26
27
/**
28
* Implements framebuffer format conversions in software for Midgard/Bifrost
29
* blend shaders. This pass is designed for a single render target; Midgard
30
* duplicates blend shaders for MRT to simplify everything. A particular
31
* framebuffer format may be categorized as 1) typed load available, 2) typed
32
* unpack available, or 3) software unpack only, and likewise for stores. The
33
* first two types are handled in the compiler backend directly, so this module
34
* is responsible for identifying type 3 formats (hardware dependent) and
35
* inserting appropriate ALU code to perform the conversion from the packed
36
* type to a designated unpacked type, and vice versa.
37
*
38
* The unpacked type depends on the format:
39
*
40
* - For 32-bit float formats, 32-bit floats.
41
* - For other floats, 16-bit floats.
42
* - For 32-bit ints, 32-bit ints.
43
* - For 8-bit ints, 8-bit ints.
44
* - For other ints, 16-bit ints.
45
*
46
* The rationale is to optimize blending and logic op instructions by using the
47
* smallest precision necessary to store the pixel losslessly.
48
*/
49
50
#include "compiler/nir/nir.h"
51
#include "compiler/nir/nir_builder.h"
52
#include "compiler/nir/nir_format_convert.h"
53
#include "util/format/u_format.h"
54
#include "pan_lower_framebuffer.h"
55
#include "panfrost-quirks.h"
56
57
/* Determines the unpacked type best suiting a given format, so the rest of the
58
* pipeline may be adjusted accordingly */
59
60
nir_alu_type
61
pan_unpacked_type_for_format(const struct util_format_description *desc)
62
{
63
int c = util_format_get_first_non_void_channel(desc->format);
64
65
if (c == -1)
66
unreachable("Void format not renderable");
67
68
bool large = (desc->channel[c].size > 16);
69
bool large_norm = (desc->channel[c].size > 8);
70
bool bit8 = (desc->channel[c].size == 8);
71
assert(desc->channel[c].size <= 32);
72
73
if (desc->channel[c].normalized)
74
return large_norm ? nir_type_float32 : nir_type_float16;
75
76
switch (desc->channel[c].type) {
77
case UTIL_FORMAT_TYPE_UNSIGNED:
78
return bit8 ? nir_type_uint8 :
79
large ? nir_type_uint32 : nir_type_uint16;
80
case UTIL_FORMAT_TYPE_SIGNED:
81
return bit8 ? nir_type_int8 :
82
large ? nir_type_int32 : nir_type_int16;
83
case UTIL_FORMAT_TYPE_FLOAT:
84
return large ? nir_type_float32 : nir_type_float16;
85
default:
86
unreachable("Format not renderable");
87
}
88
}
89
90
enum pan_format_class
91
pan_format_class_load(const struct util_format_description *desc, unsigned quirks)
92
{
93
/* Pure integers can be loaded via EXT_framebuffer_fetch and should be
94
* handled as a raw load with a size conversion (it's cheap). Likewise,
95
* since float framebuffers are internally implemented as raw (i.e.
96
* integer) framebuffers with blend shaders to go back and forth, they
97
* should be s/w as well */
98
99
if (util_format_is_pure_integer(desc->format) || util_format_is_float(desc->format))
100
return PAN_FORMAT_SOFTWARE;
101
102
/* Check if we can do anything better than software architecturally */
103
if (quirks & MIDGARD_NO_TYPED_BLEND_LOADS) {
104
return (quirks & NO_BLEND_PACKS)
105
? PAN_FORMAT_SOFTWARE : PAN_FORMAT_PACK;
106
}
107
108
/* Some formats are missing as typed on some GPUs but have unpacks */
109
if (quirks & MIDGARD_MISSING_LOADS) {
110
switch (desc->format) {
111
case PIPE_FORMAT_R11G11B10_FLOAT:
112
case PIPE_FORMAT_R10G10B10A2_UNORM:
113
case PIPE_FORMAT_B10G10R10A2_UNORM:
114
case PIPE_FORMAT_R10G10B10X2_UNORM:
115
case PIPE_FORMAT_B10G10R10X2_UNORM:
116
case PIPE_FORMAT_R10G10B10A2_UINT:
117
return PAN_FORMAT_PACK;
118
default:
119
return PAN_FORMAT_NATIVE;
120
}
121
}
122
123
/* Otherwise, we can do native */
124
return PAN_FORMAT_NATIVE;
125
}
126
127
enum pan_format_class
128
pan_format_class_store(const struct util_format_description *desc, unsigned quirks)
129
{
130
/* Check if we can do anything better than software architecturally */
131
if (quirks & MIDGARD_NO_TYPED_BLEND_STORES) {
132
return (quirks & NO_BLEND_PACKS)
133
? PAN_FORMAT_SOFTWARE : PAN_FORMAT_PACK;
134
}
135
136
return PAN_FORMAT_NATIVE;
137
}
138
139
/* Convenience method */
140
141
static enum pan_format_class
142
pan_format_class(const struct util_format_description *desc, unsigned quirks, bool is_store)
143
{
144
if (is_store)
145
return pan_format_class_store(desc, quirks);
146
else
147
return pan_format_class_load(desc, quirks);
148
}
149
150
/* Software packs/unpacks, by format class. Packs take in the pixel value typed
151
* as `pan_unpacked_type_for_format` of the format and return an i32vec4
152
* suitable for storing (with components replicated to fill). Unpacks do the
153
* reverse but cannot rely on replication.
154
*
155
* Pure 32 formats (R32F ... RGBA32F) are 32 unpacked, so just need to
156
* replicate to fill */
157
158
static nir_ssa_def *
159
pan_pack_pure_32(nir_builder *b, nir_ssa_def *v, unsigned num_components)
160
{
161
nir_ssa_def *replicated[4];
162
163
for (unsigned i = 0; i < 4; ++i)
164
replicated[i] = nir_channel(b, v, i % num_components);
165
166
return nir_vec(b, replicated, 4);
167
}
168
169
static nir_ssa_def *
170
pan_unpack_pure_32(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
171
{
172
return nir_channels(b, pack, (1 << num_components) - 1);
173
}
174
175
/* Pure x16 formats are x16 unpacked, so it's similar, but we need to pack
176
* upper/lower halves of course */
177
178
static nir_ssa_def *
179
pan_pack_pure_16(nir_builder *b, nir_ssa_def *v, unsigned num_components)
180
{
181
nir_ssa_def *replicated[4];
182
183
for (unsigned i = 0; i < 4; ++i) {
184
unsigned c = 2 * i;
185
186
nir_ssa_def *parts[2] = {
187
nir_channel(b, v, (c + 0) % num_components),
188
nir_channel(b, v, (c + 1) % num_components)
189
};
190
191
replicated[i] = nir_pack_32_2x16(b, nir_vec(b, parts, 2));
192
}
193
194
return nir_vec(b, replicated, 4);
195
}
196
197
static nir_ssa_def *
198
pan_unpack_pure_16(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
199
{
200
nir_ssa_def *unpacked[4];
201
202
assert(num_components <= 4);
203
204
for (unsigned i = 0; i < num_components; i += 2) {
205
nir_ssa_def *halves =
206
nir_unpack_32_2x16(b, nir_channel(b, pack, i >> 1));
207
208
unpacked[i + 0] = nir_channel(b, halves, 0);
209
unpacked[i + 1] = nir_channel(b, halves, 1);
210
}
211
212
for (unsigned i = num_components; i < 4; ++i)
213
unpacked[i] = nir_imm_intN_t(b, 0, 16);
214
215
return nir_vec(b, unpacked, 4);
216
}
217
218
/* And likewise for x8. pan_fill_4 fills a 4-channel vector with a n-channel
219
* vector (n <= 4), replicating as needed. pan_replicate_4 constructs a
220
* 4-channel vector from a scalar via replication */
221
222
static nir_ssa_def *
223
pan_fill_4(nir_builder *b, nir_ssa_def *v, unsigned num_components)
224
{
225
nir_ssa_def *q[4];
226
assert(v->num_components <= 4);
227
228
for (unsigned j = 0; j < 4; ++j)
229
q[j] = nir_channel(b, v, j % num_components);
230
231
return nir_vec(b, q, 4);
232
}
233
234
static nir_ssa_def *
235
pan_extend(nir_builder *b, nir_ssa_def *v, unsigned N)
236
{
237
nir_ssa_def *q[4];
238
assert(v->num_components <= 4);
239
assert(N <= 4);
240
241
for (unsigned j = 0; j < v->num_components; ++j)
242
q[j] = nir_channel(b, v, j);
243
244
for (unsigned j = v->num_components; j < N; ++j)
245
q[j] = nir_imm_intN_t(b, 0, v->bit_size);
246
247
return nir_vec(b, q, N);
248
}
249
250
static nir_ssa_def *
251
pan_replicate_4(nir_builder *b, nir_ssa_def *v)
252
{
253
nir_ssa_def *replicated[4] = { v, v, v, v };
254
return nir_vec(b, replicated, 4);
255
}
256
257
static nir_ssa_def *
258
pan_pack_pure_8(nir_builder *b, nir_ssa_def *v, unsigned num_components)
259
{
260
return pan_replicate_4(b, nir_pack_32_4x8(b, pan_fill_4(b, v, num_components)));
261
}
262
263
static nir_ssa_def *
264
pan_unpack_pure_8(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
265
{
266
assert(num_components <= 4);
267
nir_ssa_def *unpacked = nir_unpack_32_4x8(b, nir_channel(b, pack, 0));
268
return nir_channels(b, unpacked, (1 << num_components) - 1);
269
}
270
271
/* UNORM 8 is unpacked to f16 vec4. We could directly use the un/pack_unorm_4x8
272
* ops provided we replicate appropriately, but for packing we'd rather stay in
273
* 8/16-bit whereas the NIR op forces 32-bit, so we do it manually */
274
275
static nir_ssa_def *
276
pan_pack_unorm_8(nir_builder *b, nir_ssa_def *v)
277
{
278
return pan_replicate_4(b, nir_pack_32_4x8(b,
279
nir_f2u8(b, nir_fround_even(b, nir_fmul(b, nir_fsat(b,
280
pan_fill_4(b, v, v->num_components)), nir_imm_float16(b, 255.0))))));
281
}
282
283
static nir_ssa_def *
284
pan_unpack_unorm_8(nir_builder *b, nir_ssa_def *pack, unsigned num_components)
285
{
286
assert(num_components <= 4);
287
nir_ssa_def *unpacked = nir_unpack_unorm_4x8(b, nir_channel(b, pack, 0));
288
return nir_f2fmp(b, unpacked);
289
}
290
291
/* UNORM 4 is also unpacked to f16, which prevents us from using the shared
292
* unpack which strongly assumes fp32. However, on the tilebuffer it is actually packed as:
293
*
294
* [AAAA] [0000] [BBBB] [0000] [GGGG] [0000] [RRRR] [0000]
295
*
296
* In other words, spacing it out so we're aligned to bytes and on top. So
297
* pack as:
298
*
299
* pack_32_4x8(f2u8_rte(v * 15.0) << 4)
300
*/
301
302
static nir_ssa_def *
303
pan_pack_unorm_small(nir_builder *b, nir_ssa_def *v,
304
nir_ssa_def *scales, nir_ssa_def *shifts)
305
{
306
nir_ssa_def *f = nir_fmul(b, nir_fsat(b, pan_fill_4(b, v, v->num_components)), scales);
307
nir_ssa_def *u8 = nir_f2u8(b, nir_fround_even(b, f));
308
nir_ssa_def *s = nir_ishl(b, u8, shifts);
309
nir_ssa_def *repl = nir_pack_32_4x8(b, s);
310
311
return pan_replicate_4(b, repl);
312
}
313
314
static nir_ssa_def *
315
pan_unpack_unorm_small(nir_builder *b, nir_ssa_def *pack,
316
nir_ssa_def *scales, nir_ssa_def *shifts)
317
{
318
nir_ssa_def *channels = nir_unpack_32_4x8(b, nir_channel(b, pack, 0));
319
nir_ssa_def *raw = nir_ushr(b, nir_i2i16(b, channels), shifts);
320
return nir_fmul(b, nir_u2f16(b, raw), scales);
321
}
322
323
static nir_ssa_def *
324
pan_pack_unorm_4(nir_builder *b, nir_ssa_def *v)
325
{
326
return pan_pack_unorm_small(b, v,
327
nir_imm_vec4_16(b, 15.0, 15.0, 15.0, 15.0),
328
nir_imm_ivec4(b, 4, 4, 4, 4));
329
}
330
331
static nir_ssa_def *
332
pan_unpack_unorm_4(nir_builder *b, nir_ssa_def *v)
333
{
334
return pan_unpack_unorm_small(b, v,
335
nir_imm_vec4_16(b, 1.0 / 15.0, 1.0 / 15.0, 1.0 / 15.0, 1.0 / 15.0),
336
nir_imm_ivec4(b, 4, 4, 4, 4));
337
}
338
339
/* UNORM RGB5_A1 and RGB565 are similar */
340
341
static nir_ssa_def *
342
pan_pack_unorm_5551(nir_builder *b, nir_ssa_def *v)
343
{
344
return pan_pack_unorm_small(b, v,
345
nir_imm_vec4_16(b, 31.0, 31.0, 31.0, 1.0),
346
nir_imm_ivec4(b, 3, 3, 3, 7));
347
}
348
349
static nir_ssa_def *
350
pan_unpack_unorm_5551(nir_builder *b, nir_ssa_def *v)
351
{
352
return pan_unpack_unorm_small(b, v,
353
nir_imm_vec4_16(b, 1.0 / 31.0, 1.0 / 31.0, 1.0 / 31.0, 1.0),
354
nir_imm_ivec4(b, 3, 3, 3, 7));
355
}
356
357
static nir_ssa_def *
358
pan_pack_unorm_565(nir_builder *b, nir_ssa_def *v)
359
{
360
return pan_pack_unorm_small(b, v,
361
nir_imm_vec4_16(b, 31.0, 63.0, 31.0, 0.0),
362
nir_imm_ivec4(b, 3, 2, 3, 0));
363
}
364
365
static nir_ssa_def *
366
pan_unpack_unorm_565(nir_builder *b, nir_ssa_def *v)
367
{
368
return pan_unpack_unorm_small(b, v,
369
nir_imm_vec4_16(b, 1.0 / 31.0, 1.0 / 63.0, 1.0 / 31.0, 0.0),
370
nir_imm_ivec4(b, 3, 2, 3, 0));
371
}
372
373
/* RGB10_A2 is packed in the tilebuffer as the bottom 3 bytes being the top
374
* 8-bits of RGB and the top byte being RGBA as 2-bits packed. As imirkin
375
* pointed out, this means free conversion to RGBX8 */
376
377
static nir_ssa_def *
378
pan_pack_unorm_1010102(nir_builder *b, nir_ssa_def *v)
379
{
380
nir_ssa_def *scale = nir_imm_vec4_16(b, 1023.0, 1023.0, 1023.0, 3.0);
381
nir_ssa_def *s = nir_f2u32(b, nir_fround_even(b, nir_f2f32(b, nir_fmul(b, nir_fsat(b, v), scale))));
382
383
nir_ssa_def *top8 = nir_ushr(b, s, nir_imm_ivec4(b, 0x2, 0x2, 0x2, 0x2));
384
nir_ssa_def *top8_rgb = nir_pack_32_4x8(b, nir_u2u8(b, top8));
385
386
nir_ssa_def *bottom2 = nir_iand(b, s, nir_imm_ivec4(b, 0x3, 0x3, 0x3, 0x3));
387
388
nir_ssa_def *top =
389
nir_ior(b,
390
nir_ior(b,
391
nir_ishl(b, nir_channel(b, bottom2, 0), nir_imm_int(b, 24 + 0)),
392
nir_ishl(b, nir_channel(b, bottom2, 1), nir_imm_int(b, 24 + 2))),
393
nir_ior(b,
394
nir_ishl(b, nir_channel(b, bottom2, 2), nir_imm_int(b, 24 + 4)),
395
nir_ishl(b, nir_channel(b, bottom2, 3), nir_imm_int(b, 24 + 6))));
396
397
nir_ssa_def *p = nir_ior(b, top, top8_rgb);
398
return pan_replicate_4(b, p);
399
}
400
401
static nir_ssa_def *
402
pan_unpack_unorm_1010102(nir_builder *b, nir_ssa_def *packed)
403
{
404
nir_ssa_def *p = nir_channel(b, packed, 0);
405
nir_ssa_def *bytes = nir_unpack_32_4x8(b, p);
406
nir_ssa_def *ubytes = nir_i2i16(b, bytes);
407
408
nir_ssa_def *shifts = nir_ushr(b, pan_replicate_4(b, nir_channel(b, ubytes, 3)),
409
nir_imm_ivec4(b, 0, 2, 4, 6));
410
nir_ssa_def *precision = nir_iand(b, shifts,
411
nir_i2i16(b, nir_imm_ivec4(b, 0x3, 0x3, 0x3, 0x3)));
412
413
nir_ssa_def *top_rgb = nir_ishl(b, nir_channels(b, ubytes, 0x7), nir_imm_int(b, 2));
414
top_rgb = nir_ior(b, nir_channels(b, precision, 0x7), top_rgb);
415
416
nir_ssa_def *chans [4] = {
417
nir_channel(b, top_rgb, 0),
418
nir_channel(b, top_rgb, 1),
419
nir_channel(b, top_rgb, 2),
420
nir_channel(b, precision, 3)
421
};
422
423
nir_ssa_def *scale = nir_imm_vec4(b, 1.0 / 1023.0, 1.0 / 1023.0, 1.0 / 1023.0, 1.0 / 3.0);
424
return nir_f2fmp(b, nir_fmul(b, nir_u2f32(b, nir_vec(b, chans, 4)), scale));
425
}
426
427
/* On the other hand, the pure int RGB10_A2 is identical to the spec */
428
429
static nir_ssa_def *
430
pan_pack_uint_1010102(nir_builder *b, nir_ssa_def *v)
431
{
432
nir_ssa_def *shift = nir_ishl(b, nir_u2u32(b, v),
433
nir_imm_ivec4(b, 0, 10, 20, 30));
434
435
nir_ssa_def *p = nir_ior(b,
436
nir_ior(b, nir_channel(b, shift, 0), nir_channel(b, shift, 1)),
437
nir_ior(b, nir_channel(b, shift, 2), nir_channel(b, shift, 3)));
438
439
return pan_replicate_4(b, p);
440
}
441
442
static nir_ssa_def *
443
pan_unpack_uint_1010102(nir_builder *b, nir_ssa_def *packed)
444
{
445
nir_ssa_def *chan = nir_channel(b, packed, 0);
446
447
nir_ssa_def *shift = nir_ushr(b, pan_replicate_4(b, chan),
448
nir_imm_ivec4(b, 0, 10, 20, 30));
449
450
nir_ssa_def *mask = nir_iand(b, shift,
451
nir_imm_ivec4(b, 0x3ff, 0x3ff, 0x3ff, 0x3));
452
453
return nir_i2i16(b, mask);
454
}
455
456
/* NIR means we can *finally* catch a break */
457
458
static nir_ssa_def *
459
pan_pack_r11g11b10(nir_builder *b, nir_ssa_def *v)
460
{
461
return pan_replicate_4(b, nir_format_pack_11f11f10f(b,
462
nir_f2f32(b, v)));
463
}
464
465
static nir_ssa_def *
466
pan_unpack_r11g11b10(nir_builder *b, nir_ssa_def *v)
467
{
468
nir_ssa_def *f32 = nir_format_unpack_11f11f10f(b, nir_channel(b, v, 0));
469
nir_ssa_def *f16 = nir_f2fmp(b, f32);
470
471
/* Extend to vec4 with alpha */
472
nir_ssa_def *components[4] = {
473
nir_channel(b, f16, 0),
474
nir_channel(b, f16, 1),
475
nir_channel(b, f16, 2),
476
nir_imm_float16(b, 1.0)
477
};
478
479
return nir_vec(b, components, 4);
480
}
481
482
/* Wrapper around sRGB conversion */
483
484
static nir_ssa_def *
485
pan_linear_to_srgb(nir_builder *b, nir_ssa_def *linear)
486
{
487
nir_ssa_def *rgb = nir_channels(b, linear, 0x7);
488
489
/* TODO: fp16 native conversion */
490
nir_ssa_def *srgb = nir_f2fmp(b,
491
nir_format_linear_to_srgb(b, nir_f2f32(b, rgb)));
492
493
nir_ssa_def *comp[4] = {
494
nir_channel(b, srgb, 0),
495
nir_channel(b, srgb, 1),
496
nir_channel(b, srgb, 2),
497
nir_channel(b, linear, 3),
498
};
499
500
return nir_vec(b, comp, 4);
501
}
502
503
static nir_ssa_def *
504
pan_srgb_to_linear(nir_builder *b, nir_ssa_def *srgb)
505
{
506
nir_ssa_def *rgb = nir_channels(b, srgb, 0x7);
507
508
/* TODO: fp16 native conversion */
509
nir_ssa_def *linear = nir_f2fmp(b,
510
nir_format_srgb_to_linear(b, nir_f2f32(b, rgb)));
511
512
nir_ssa_def *comp[4] = {
513
nir_channel(b, linear, 0),
514
nir_channel(b, linear, 1),
515
nir_channel(b, linear, 2),
516
nir_channel(b, srgb, 3),
517
};
518
519
return nir_vec(b, comp, 4);
520
}
521
522
523
524
/* Generic dispatches for un/pack regardless of format */
525
526
static bool
527
pan_is_unorm4(const struct util_format_description *desc)
528
{
529
switch (desc->format) {
530
case PIPE_FORMAT_B4G4R4A4_UNORM:
531
case PIPE_FORMAT_B4G4R4X4_UNORM:
532
case PIPE_FORMAT_A4R4_UNORM:
533
case PIPE_FORMAT_R4A4_UNORM:
534
case PIPE_FORMAT_A4B4G4R4_UNORM:
535
case PIPE_FORMAT_R4G4B4A4_UNORM:
536
return true;
537
default:
538
return false;
539
}
540
541
}
542
543
static nir_ssa_def *
544
pan_unpack(nir_builder *b,
545
const struct util_format_description *desc,
546
nir_ssa_def *packed)
547
{
548
if (util_format_is_unorm8(desc))
549
return pan_unpack_unorm_8(b, packed, desc->nr_channels);
550
551
if (pan_is_unorm4(desc))
552
return pan_unpack_unorm_4(b, packed);
553
554
if (desc->is_array) {
555
int c = util_format_get_first_non_void_channel(desc->format);
556
assert(c >= 0);
557
struct util_format_channel_description d = desc->channel[c];
558
559
if (d.size == 32 || d.size == 16) {
560
assert(!d.normalized);
561
assert(d.type == UTIL_FORMAT_TYPE_FLOAT || d.pure_integer);
562
563
return d.size == 32 ? pan_unpack_pure_32(b, packed, desc->nr_channels) :
564
pan_unpack_pure_16(b, packed, desc->nr_channels);
565
} else if (d.size == 8) {
566
assert(d.pure_integer);
567
return pan_unpack_pure_8(b, packed, desc->nr_channels);
568
} else {
569
unreachable("Unrenderable size");
570
}
571
}
572
573
switch (desc->format) {
574
case PIPE_FORMAT_B5G5R5A1_UNORM:
575
case PIPE_FORMAT_R5G5B5A1_UNORM:
576
return pan_unpack_unorm_5551(b, packed);
577
case PIPE_FORMAT_B5G6R5_UNORM:
578
return pan_unpack_unorm_565(b, packed);
579
case PIPE_FORMAT_R10G10B10A2_UNORM:
580
return pan_unpack_unorm_1010102(b, packed);
581
case PIPE_FORMAT_R10G10B10A2_UINT:
582
return pan_unpack_uint_1010102(b, packed);
583
case PIPE_FORMAT_R11G11B10_FLOAT:
584
return pan_unpack_r11g11b10(b, packed);
585
default:
586
break;
587
}
588
589
fprintf(stderr, "%s\n", desc->name);
590
unreachable("Unknown format");
591
}
592
593
static nir_ssa_def *
594
pan_pack(nir_builder *b,
595
const struct util_format_description *desc,
596
nir_ssa_def *unpacked)
597
{
598
if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
599
unpacked = pan_linear_to_srgb(b, unpacked);
600
601
if (util_format_is_unorm8(desc))
602
return pan_pack_unorm_8(b, unpacked);
603
604
if (pan_is_unorm4(desc))
605
return pan_pack_unorm_4(b, unpacked);
606
607
if (desc->is_array) {
608
int c = util_format_get_first_non_void_channel(desc->format);
609
assert(c >= 0);
610
struct util_format_channel_description d = desc->channel[c];
611
612
if (d.size == 32 || d.size == 16) {
613
assert(!d.normalized);
614
assert(d.type == UTIL_FORMAT_TYPE_FLOAT || d.pure_integer);
615
616
return d.size == 32 ? pan_pack_pure_32(b, unpacked, desc->nr_channels) :
617
pan_pack_pure_16(b, unpacked, desc->nr_channels);
618
} else if (d.size == 8) {
619
assert(d.pure_integer);
620
return pan_pack_pure_8(b, unpacked, desc->nr_channels);
621
} else {
622
unreachable("Unrenderable size");
623
}
624
}
625
626
switch (desc->format) {
627
case PIPE_FORMAT_B5G5R5A1_UNORM:
628
case PIPE_FORMAT_R5G5B5A1_UNORM:
629
return pan_pack_unorm_5551(b, unpacked);
630
case PIPE_FORMAT_B5G6R5_UNORM:
631
return pan_pack_unorm_565(b, unpacked);
632
case PIPE_FORMAT_R10G10B10A2_UNORM:
633
return pan_pack_unorm_1010102(b, unpacked);
634
case PIPE_FORMAT_R10G10B10A2_UINT:
635
return pan_pack_uint_1010102(b, unpacked);
636
case PIPE_FORMAT_R11G11B10_FLOAT:
637
return pan_pack_r11g11b10(b, unpacked);
638
default:
639
break;
640
}
641
642
fprintf(stderr, "%s\n", desc->name);
643
unreachable("Unknown format");
644
}
645
646
static void
647
pan_lower_fb_store(nir_shader *shader,
648
nir_builder *b,
649
nir_intrinsic_instr *intr,
650
const struct util_format_description *desc,
651
unsigned quirks)
652
{
653
/* For stores, add conversion before */
654
nir_ssa_def *unpacked = nir_ssa_for_src(b, intr->src[1], 4);
655
nir_ssa_def *packed = pan_pack(b, desc, unpacked);
656
657
nir_store_raw_output_pan(b, packed);
658
}
659
660
static nir_ssa_def *
661
pan_sample_id(nir_builder *b, int sample)
662
{
663
return (sample >= 0) ? nir_imm_int(b, sample) : nir_load_sample_id(b);
664
}
665
666
static void
667
pan_lower_fb_load(nir_shader *shader,
668
nir_builder *b,
669
nir_intrinsic_instr *intr,
670
const struct util_format_description *desc,
671
unsigned base, int sample, unsigned quirks)
672
{
673
nir_ssa_def *packed =
674
nir_load_raw_output_pan(b, 4, 32, pan_sample_id(b, sample),
675
.base = base);
676
677
/* Convert the raw value */
678
nir_ssa_def *unpacked = pan_unpack(b, desc, packed);
679
680
if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
681
unpacked = pan_srgb_to_linear(b, unpacked);
682
683
/* Convert to the size of the load intrinsic.
684
*
685
* We can assume that the type will match with the framebuffer format:
686
*
687
* Page 170 of the PDF of the OpenGL ES 3.0.6 spec says:
688
*
689
* If [UNORM or SNORM, convert to fixed-point]; otherwise no type
690
* conversion is applied. If the values written by the fragment shader
691
* do not match the format(s) of the corresponding color buffer(s),
692
* the result is undefined.
693
*/
694
695
unsigned bits = nir_dest_bit_size(intr->dest);
696
697
nir_alu_type src_type;
698
if (desc->channel[0].pure_integer) {
699
if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED)
700
src_type = nir_type_int;
701
else
702
src_type = nir_type_uint;
703
} else {
704
src_type = nir_type_float;
705
}
706
707
unpacked = nir_convert_to_bit_size(b, unpacked, src_type, bits);
708
unpacked = pan_extend(b, unpacked, nir_dest_num_components(intr->dest));
709
710
nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, unpacked, &intr->instr);
711
}
712
713
bool
714
pan_lower_framebuffer(nir_shader *shader, const enum pipe_format *rt_fmts,
715
bool is_blend, unsigned quirks)
716
{
717
if (shader->info.stage != MESA_SHADER_FRAGMENT)
718
return false;
719
720
bool progress = false;
721
722
nir_foreach_function(func, shader) {
723
nir_foreach_block(block, func->impl) {
724
nir_foreach_instr_safe(instr, block) {
725
if (instr->type != nir_instr_type_intrinsic)
726
continue;
727
728
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
729
730
bool is_load = intr->intrinsic == nir_intrinsic_load_deref;
731
bool is_store = intr->intrinsic == nir_intrinsic_store_deref;
732
733
if (!(is_load || (is_store && is_blend)))
734
continue;
735
736
nir_variable *var = nir_intrinsic_get_var(intr, 0);
737
738
if (var->data.mode != nir_var_shader_out)
739
continue;
740
741
if (var->data.location < FRAG_RESULT_DATA0)
742
continue;
743
744
unsigned base = var->data.driver_location;
745
unsigned rt = var->data.location - FRAG_RESULT_DATA0;
746
747
if (rt_fmts[rt] == PIPE_FORMAT_NONE)
748
continue;
749
750
const struct util_format_description *desc =
751
util_format_description(rt_fmts[rt]);
752
753
enum pan_format_class fmt_class =
754
pan_format_class(desc, quirks, is_store);
755
756
/* Don't lower */
757
if (fmt_class == PAN_FORMAT_NATIVE)
758
continue;
759
760
/* EXT_shader_framebuffer_fetch requires
761
* per-sample loads.
762
* MSAA blend shaders are not yet handled, so
763
* for now always load sample 0. */
764
int sample = is_blend ? 0 : -1;
765
766
nir_builder b;
767
nir_builder_init(&b, func->impl);
768
769
if (is_store) {
770
b.cursor = nir_before_instr(instr);
771
pan_lower_fb_store(shader, &b, intr, desc, quirks);
772
} else {
773
b.cursor = nir_after_instr(instr);
774
pan_lower_fb_load(shader, &b, intr, desc, base, sample, quirks);
775
}
776
777
nir_instr_remove(instr);
778
779
progress = true;
780
}
781
}
782
783
nir_metadata_preserve(func->impl, nir_metadata_block_index |
784
nir_metadata_dominance);
785
}
786
787
return progress;
788
}
789
790