Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/radeonsi/si_clear.c
4570 views
1
/*
2
* Copyright 2017 Advanced Micro Devices, Inc.
3
* All Rights Reserved.
4
*
5
* Permission is hereby granted, free of charge, to any person obtaining a
6
* copy of this software and associated documentation files (the "Software"),
7
* to deal in the Software without restriction, including without limitation
8
* on the rights to use, copy, modify, merge, publish, distribute, sub
9
* license, and/or sell copies of the Software, and to permit persons to whom
10
* the Software is furnished to do so, subject to the following conditions:
11
*
12
* The above copyright notice and this permission notice (including the next
13
* paragraph) shall be included in all copies or substantial portions of the
14
* Software.
15
*
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22
* USE OR OTHER DEALINGS IN THE SOFTWARE.
23
*/
24
25
#include "si_pipe.h"
26
#include "sid.h"
27
#include "util/format/u_format.h"
28
#include "util/u_pack_color.h"
29
#include "util/u_surface.h"
30
31
enum
32
{
33
SI_CLEAR = SI_SAVE_FRAGMENT_STATE,
34
SI_CLEAR_SURFACE = SI_SAVE_FRAMEBUFFER | SI_SAVE_FRAGMENT_STATE,
35
};
36
37
void si_init_buffer_clear(struct si_clear_info *info,
38
struct pipe_resource *resource, uint64_t offset,
39
uint32_t size, uint32_t clear_value)
40
{
41
info->resource = resource;
42
info->offset = offset;
43
info->size = size;
44
info->clear_value = clear_value;
45
info->writemask = 0xffffffff;
46
info->is_dcc_msaa = false;
47
}
48
49
static void si_init_buffer_clear_rmw(struct si_clear_info *info,
50
struct pipe_resource *resource, uint64_t offset,
51
uint32_t size, uint32_t clear_value, uint32_t writemask)
52
{
53
si_init_buffer_clear(info, resource, offset, size, clear_value);
54
info->writemask = writemask;
55
}
56
57
void si_execute_clears(struct si_context *sctx, struct si_clear_info *info,
58
unsigned num_clears, unsigned types)
59
{
60
if (!num_clears)
61
return;
62
63
/* Flush caches and wait for idle. */
64
if (types & (SI_CLEAR_TYPE_CMASK | SI_CLEAR_TYPE_DCC))
65
sctx->flags |= si_get_flush_flags(sctx, SI_COHERENCY_CB_META, L2_LRU);
66
67
if (types & SI_CLEAR_TYPE_HTILE)
68
sctx->flags |= si_get_flush_flags(sctx, SI_COHERENCY_DB_META, L2_LRU);
69
70
/* Flush caches in case we use compute. */
71
sctx->flags |= SI_CONTEXT_INV_VCACHE;
72
73
/* GFX6-8: CB and DB don't use L2. */
74
if (sctx->chip_class <= GFX8)
75
sctx->flags |= SI_CONTEXT_INV_L2;
76
77
/* Execute clears. */
78
for (unsigned i = 0; i < num_clears; i++) {
79
if (info[i].is_dcc_msaa) {
80
gfx9_clear_dcc_msaa(sctx, info[i].resource, info[i].clear_value,
81
SI_OP_SKIP_CACHE_INV_BEFORE, SI_COHERENCY_CP);
82
continue;
83
}
84
85
assert(info[i].size > 0);
86
87
if (info[i].writemask != 0xffffffff) {
88
si_compute_clear_buffer_rmw(sctx, info[i].resource, info[i].offset, info[i].size,
89
info[i].clear_value, info[i].writemask,
90
SI_OP_SKIP_CACHE_INV_BEFORE, SI_COHERENCY_CP);
91
} else {
92
/* Compute shaders are much faster on both dGPUs and APUs. Don't use CP DMA. */
93
si_clear_buffer(sctx, info[i].resource, info[i].offset, info[i].size,
94
&info[i].clear_value, 4, SI_OP_SKIP_CACHE_INV_BEFORE,
95
SI_COHERENCY_CP, SI_COMPUTE_CLEAR_METHOD);
96
}
97
}
98
99
/* Wait for idle. */
100
sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
101
102
/* GFX6-8: CB and DB don't use L2. */
103
if (sctx->chip_class <= GFX8)
104
sctx->flags |= SI_CONTEXT_WB_L2;
105
}
106
107
static bool si_alloc_separate_cmask(struct si_screen *sscreen, struct si_texture *tex)
108
{
109
/* CMASK for MSAA is allocated in advance or always disabled
110
* by "nofmask" option.
111
*/
112
if (tex->cmask_buffer)
113
return true;
114
115
if (!tex->surface.cmask_size)
116
return false;
117
118
tex->cmask_buffer =
119
si_aligned_buffer_create(&sscreen->b, SI_RESOURCE_FLAG_UNMAPPABLE, PIPE_USAGE_DEFAULT,
120
tex->surface.cmask_size, 1 << tex->surface.cmask_alignment_log2);
121
if (tex->cmask_buffer == NULL)
122
return false;
123
124
tex->cmask_base_address_reg = tex->cmask_buffer->gpu_address >> 8;
125
tex->cb_color_info |= S_028C70_FAST_CLEAR(1);
126
127
p_atomic_inc(&sscreen->compressed_colortex_counter);
128
return true;
129
}
130
131
static bool si_set_clear_color(struct si_texture *tex, enum pipe_format surface_format,
132
const union pipe_color_union *color)
133
{
134
union util_color uc;
135
136
memset(&uc, 0, sizeof(uc));
137
138
if (tex->surface.bpe == 16) {
139
/* DCC fast clear only:
140
* CLEAR_WORD0 = R = G = B
141
* CLEAR_WORD1 = A
142
*/
143
assert(color->ui[0] == color->ui[1] && color->ui[0] == color->ui[2]);
144
uc.ui[0] = color->ui[0];
145
uc.ui[1] = color->ui[3];
146
} else {
147
if (tex->swap_rgb_to_bgr)
148
surface_format = util_format_rgb_to_bgr(surface_format);
149
150
util_pack_color_union(surface_format, &uc, color);
151
}
152
153
if (memcmp(tex->color_clear_value, &uc, 2 * sizeof(uint32_t)) == 0)
154
return false;
155
156
memcpy(tex->color_clear_value, &uc, 2 * sizeof(uint32_t));
157
return true;
158
}
159
160
/** Linearize and convert luminance/intensity to red. */
161
enum pipe_format si_simplify_cb_format(enum pipe_format format)
162
{
163
format = util_format_linear(format);
164
format = util_format_luminance_to_red(format);
165
return util_format_intensity_to_red(format);
166
}
167
168
bool vi_alpha_is_on_msb(struct si_screen *sscreen, enum pipe_format format)
169
{
170
format = si_simplify_cb_format(format);
171
const struct util_format_description *desc = util_format_description(format);
172
173
/* Formats with 3 channels can't have alpha. */
174
if (desc->nr_channels == 3)
175
return true; /* same as xxxA; is any value OK here? */
176
177
if (sscreen->info.chip_class >= GFX10 && desc->nr_channels == 1)
178
return desc->swizzle[3] == PIPE_SWIZZLE_X;
179
180
return si_translate_colorswap(format, false) <= 1;
181
}
182
183
static bool vi_get_fast_clear_parameters(struct si_screen *sscreen, enum pipe_format base_format,
184
enum pipe_format surface_format,
185
const union pipe_color_union *color, uint32_t *clear_value,
186
bool *eliminate_needed)
187
{
188
/* If we want to clear without needing a fast clear eliminate step, we
189
* can set color and alpha independently to 0 or 1 (or 0/max for integer
190
* formats).
191
*/
192
bool values[4] = {}; /* whether to clear to 0 or 1 */
193
bool color_value = false; /* clear color to 0 or 1 */
194
bool alpha_value = false; /* clear alpha to 0 or 1 */
195
int alpha_channel; /* index of the alpha component */
196
bool has_color = false;
197
bool has_alpha = false;
198
199
const struct util_format_description *desc =
200
util_format_description(si_simplify_cb_format(surface_format));
201
202
/* 128-bit fast clear with different R,G,B values is unsupported. */
203
if (desc->block.bits == 128 && (color->ui[0] != color->ui[1] || color->ui[0] != color->ui[2]))
204
return false;
205
206
*eliminate_needed = true;
207
*clear_value = DCC_CLEAR_COLOR_REG;
208
209
if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN)
210
return true; /* need ELIMINATE_FAST_CLEAR */
211
212
bool base_alpha_is_on_msb = vi_alpha_is_on_msb(sscreen, base_format);
213
bool surf_alpha_is_on_msb = vi_alpha_is_on_msb(sscreen, surface_format);
214
215
/* Formats with 3 channels can't have alpha. */
216
if (desc->nr_channels == 3)
217
alpha_channel = -1;
218
else if (surf_alpha_is_on_msb)
219
alpha_channel = desc->nr_channels - 1;
220
else
221
alpha_channel = 0;
222
223
for (int i = 0; i < 4; ++i) {
224
if (desc->swizzle[i] >= PIPE_SWIZZLE_0)
225
continue;
226
227
if (desc->channel[i].pure_integer && desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
228
/* Use the maximum value for clamping the clear color. */
229
int max = u_bit_consecutive(0, desc->channel[i].size - 1);
230
231
values[i] = color->i[i] != 0;
232
if (color->i[i] != 0 && MIN2(color->i[i], max) != max)
233
return true; /* need ELIMINATE_FAST_CLEAR */
234
} else if (desc->channel[i].pure_integer &&
235
desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) {
236
/* Use the maximum value for clamping the clear color. */
237
unsigned max = u_bit_consecutive(0, desc->channel[i].size);
238
239
values[i] = color->ui[i] != 0U;
240
if (color->ui[i] != 0U && MIN2(color->ui[i], max) != max)
241
return true; /* need ELIMINATE_FAST_CLEAR */
242
} else {
243
values[i] = color->f[i] != 0.0F;
244
if (color->f[i] != 0.0F && color->f[i] != 1.0F)
245
return true; /* need ELIMINATE_FAST_CLEAR */
246
}
247
248
if (desc->swizzle[i] == alpha_channel) {
249
alpha_value = values[i];
250
has_alpha = true;
251
} else {
252
color_value = values[i];
253
has_color = true;
254
}
255
}
256
257
/* If alpha isn't present, make it the same as color, and vice versa. */
258
if (!has_alpha)
259
alpha_value = color_value;
260
else if (!has_color)
261
color_value = alpha_value;
262
263
if (color_value != alpha_value && base_alpha_is_on_msb != surf_alpha_is_on_msb)
264
return true; /* require ELIMINATE_FAST_CLEAR */
265
266
/* Check if all color values are equal if they are present. */
267
for (int i = 0; i < 4; ++i) {
268
if (desc->swizzle[i] <= PIPE_SWIZZLE_W && desc->swizzle[i] != alpha_channel &&
269
values[i] != color_value)
270
return true; /* require ELIMINATE_FAST_CLEAR */
271
}
272
273
/* This doesn't need ELIMINATE_FAST_CLEAR.
274
* On chips predating Raven2, the DCC clear codes and the CB clear
275
* color registers must match.
276
*/
277
*eliminate_needed = false;
278
279
if (color_value) {
280
if (alpha_value)
281
*clear_value = DCC_CLEAR_COLOR_1111;
282
else
283
*clear_value = DCC_CLEAR_COLOR_1110;
284
} else {
285
if (alpha_value)
286
*clear_value = DCC_CLEAR_COLOR_0001;
287
else
288
*clear_value = DCC_CLEAR_COLOR_0000;
289
}
290
return true;
291
}
292
293
bool vi_dcc_get_clear_info(struct si_context *sctx, struct si_texture *tex, unsigned level,
294
unsigned clear_value, struct si_clear_info *out)
295
{
296
struct pipe_resource *dcc_buffer = &tex->buffer.b.b;
297
uint64_t dcc_offset = tex->surface.meta_offset;
298
uint32_t clear_size;
299
300
assert(vi_dcc_enabled(tex, level));
301
302
if (sctx->chip_class >= GFX10) {
303
/* 4x and 8x MSAA needs a sophisticated compute shader for
304
* the clear. */
305
if (tex->buffer.b.b.nr_storage_samples >= 4)
306
return false;
307
308
unsigned num_layers = util_num_layers(&tex->buffer.b.b, level);
309
310
if (num_layers == 1) {
311
/* Clear a specific level. */
312
dcc_offset += tex->surface.u.gfx9.meta_levels[level].offset;
313
clear_size = tex->surface.u.gfx9.meta_levels[level].size;
314
} else if (tex->buffer.b.b.last_level == 0) {
315
/* Clear all layers having only 1 level. */
316
clear_size = tex->surface.meta_size;
317
} else {
318
/* Clearing DCC with both multiple levels and multiple layers is not
319
* implemented.
320
*/
321
return false;
322
}
323
} else if (sctx->chip_class == GFX9) {
324
/* TODO: Implement DCC fast clear for level 0 of mipmapped textures. Mipmapped
325
* DCC has to clear a rectangular area of DCC for level 0 (because the whole miptree
326
* is organized in a 2D plane).
327
*/
328
if (tex->buffer.b.b.last_level > 0)
329
return false;
330
331
/* 4x and 8x MSAA need to clear only sample 0 and 1 in a compute shader and leave other
332
* samples untouched. (only the first 2 samples are compressed) */
333
if (tex->buffer.b.b.nr_storage_samples >= 4) {
334
si_init_buffer_clear(out, dcc_buffer, 0, 0, clear_value);
335
out->is_dcc_msaa = true;
336
return true;
337
}
338
339
clear_size = tex->surface.meta_size;
340
} else {
341
unsigned num_layers = util_num_layers(&tex->buffer.b.b, level);
342
343
/* If this is 0, fast clear isn't possible. (can occur with MSAA) */
344
if (!tex->surface.u.legacy.color.dcc_level[level].dcc_fast_clear_size)
345
return false;
346
347
/* Layered 4x and 8x MSAA DCC fast clears need to clear
348
* dcc_fast_clear_size bytes for each layer. A compute shader
349
* would be more efficient than separate per-layer clear operations.
350
*/
351
if (tex->buffer.b.b.nr_storage_samples >= 4 && num_layers > 1)
352
return false;
353
354
dcc_offset += tex->surface.u.legacy.color.dcc_level[level].dcc_offset;
355
clear_size = tex->surface.u.legacy.color.dcc_level[level].dcc_fast_clear_size * num_layers;
356
}
357
358
si_init_buffer_clear(out, dcc_buffer, dcc_offset, clear_size, clear_value);
359
return true;
360
}
361
362
/* Set the same micro tile mode as the destination of the last MSAA resolve.
363
* This allows hitting the MSAA resolve fast path, which requires that both
364
* src and dst micro tile modes match.
365
*/
366
static void si_set_optimal_micro_tile_mode(struct si_screen *sscreen, struct si_texture *tex)
367
{
368
if (sscreen->info.chip_class >= GFX10 || tex->buffer.b.is_shared ||
369
tex->buffer.b.b.nr_samples <= 1 ||
370
tex->surface.micro_tile_mode == tex->last_msaa_resolve_target_micro_mode)
371
return;
372
373
assert(sscreen->info.chip_class >= GFX9 ||
374
tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);
375
assert(tex->buffer.b.b.last_level == 0);
376
377
if (sscreen->info.chip_class >= GFX9) {
378
/* 4K or larger tiles only. 0 is linear. 1-3 are 256B tiles. */
379
assert(tex->surface.u.gfx9.swizzle_mode >= 4);
380
381
/* If you do swizzle_mode % 4, you'll get:
382
* 0 = Depth
383
* 1 = Standard,
384
* 2 = Displayable
385
* 3 = Rotated
386
*
387
* Depth-sample order isn't allowed:
388
*/
389
assert(tex->surface.u.gfx9.swizzle_mode % 4 != 0);
390
391
switch (tex->last_msaa_resolve_target_micro_mode) {
392
case RADEON_MICRO_MODE_DISPLAY:
393
tex->surface.u.gfx9.swizzle_mode &= ~0x3;
394
tex->surface.u.gfx9.swizzle_mode += 2; /* D */
395
break;
396
case RADEON_MICRO_MODE_STANDARD:
397
tex->surface.u.gfx9.swizzle_mode &= ~0x3;
398
tex->surface.u.gfx9.swizzle_mode += 1; /* S */
399
break;
400
case RADEON_MICRO_MODE_RENDER:
401
tex->surface.u.gfx9.swizzle_mode &= ~0x3;
402
tex->surface.u.gfx9.swizzle_mode += 3; /* R */
403
break;
404
default: /* depth */
405
assert(!"unexpected micro mode");
406
return;
407
}
408
} else if (sscreen->info.chip_class >= GFX7) {
409
/* These magic numbers were copied from addrlib. It doesn't use
410
* any definitions for them either. They are all 2D_TILED_THIN1
411
* modes with different bpp and micro tile mode.
412
*/
413
switch (tex->last_msaa_resolve_target_micro_mode) {
414
case RADEON_MICRO_MODE_DISPLAY:
415
tex->surface.u.legacy.tiling_index[0] = 10;
416
break;
417
case RADEON_MICRO_MODE_STANDARD:
418
tex->surface.u.legacy.tiling_index[0] = 14;
419
break;
420
case RADEON_MICRO_MODE_RENDER:
421
tex->surface.u.legacy.tiling_index[0] = 28;
422
break;
423
default: /* depth, thick */
424
assert(!"unexpected micro mode");
425
return;
426
}
427
} else { /* GFX6 */
428
switch (tex->last_msaa_resolve_target_micro_mode) {
429
case RADEON_MICRO_MODE_DISPLAY:
430
switch (tex->surface.bpe) {
431
case 1:
432
tex->surface.u.legacy.tiling_index[0] = 10;
433
break;
434
case 2:
435
tex->surface.u.legacy.tiling_index[0] = 11;
436
break;
437
default: /* 4, 8 */
438
tex->surface.u.legacy.tiling_index[0] = 12;
439
break;
440
}
441
break;
442
case RADEON_MICRO_MODE_STANDARD:
443
switch (tex->surface.bpe) {
444
case 1:
445
tex->surface.u.legacy.tiling_index[0] = 14;
446
break;
447
case 2:
448
tex->surface.u.legacy.tiling_index[0] = 15;
449
break;
450
case 4:
451
tex->surface.u.legacy.tiling_index[0] = 16;
452
break;
453
default: /* 8, 16 */
454
tex->surface.u.legacy.tiling_index[0] = 17;
455
break;
456
}
457
break;
458
default: /* depth, thick */
459
assert(!"unexpected micro mode");
460
return;
461
}
462
}
463
464
tex->surface.micro_tile_mode = tex->last_msaa_resolve_target_micro_mode;
465
466
p_atomic_inc(&sscreen->dirty_tex_counter);
467
}
468
469
static uint32_t si_get_htile_clear_value(struct si_texture *tex, float depth)
470
{
471
/* Maximum 14-bit UINT value. */
472
const uint32_t max_z_value = 0x3FFF;
473
474
/* For clears, Zmask and Smem will always be set to zero. */
475
const uint32_t zmask = 0;
476
const uint32_t smem = 0;
477
478
/* Convert depthValue to 14-bit zmin/zmax uint values. */
479
const uint32_t zmin = (depth * max_z_value) + 0.5f;
480
const uint32_t zmax = zmin;
481
482
if (tex->htile_stencil_disabled) {
483
/* Z-only HTILE is laid out as follows:
484
* |31 18|17 4|3 0|
485
* +---------+---------+-------+
486
* | Max Z | Min Z | ZMask |
487
*/
488
return ((zmax & 0x3FFF) << 18) |
489
((zmin & 0x3FFF) << 4) |
490
((zmask & 0xF) << 0);
491
} else {
492
/* Z+S HTILE is laid out as-follows:
493
* |31 12|11 10|9 8|7 6|5 4|3 0|
494
* +-----------+-----+------+-----+-----+-------+
495
* | Z Range | | SMem | SR1 | SR0 | ZMask |
496
*
497
* The base value for zRange is either zMax or zMin, depending on ZRANGE_PRECISION.
498
* For a fast clear, zMin == zMax == clearValue. This means that the base will
499
* always be the clear value (converted to 14-bit UINT).
500
*
501
* When abs(zMax-zMin) < 16, the delta is equal to the difference. In the case of
502
* fast clears, where zMax == zMin, the delta is always zero.
503
*/
504
const uint32_t delta = 0;
505
const uint32_t zrange = (zmax << 6) | delta;
506
507
/* SResults 0 & 1 are set based on the stencil compare state.
508
* For fast-clear, the default value of sr0 and sr1 are both 0x3.
509
*/
510
const uint32_t sresults = 0xf;
511
512
return ((zrange & 0xFFFFF) << 12) |
513
((smem & 0x3) << 8) |
514
((sresults & 0xF) << 4) |
515
((zmask & 0xF) << 0);
516
}
517
}
518
519
static bool si_can_fast_clear_depth(struct si_texture *zstex, unsigned level, float depth,
520
unsigned buffers)
521
{
522
/* TC-compatible HTILE only supports depth clears to 0 or 1. */
523
return buffers & PIPE_CLEAR_DEPTH &&
524
si_htile_enabled(zstex, level, PIPE_MASK_Z) &&
525
(!zstex->tc_compatible_htile || depth == 0 || depth == 1);
526
}
527
528
static bool si_can_fast_clear_stencil(struct si_texture *zstex, unsigned level, uint8_t stencil,
529
unsigned buffers)
530
{
531
/* TC-compatible HTILE only supports stencil clears to 0. */
532
return buffers & PIPE_CLEAR_STENCIL &&
533
si_htile_enabled(zstex, level, PIPE_MASK_S) &&
534
(!zstex->tc_compatible_htile || stencil == 0);
535
}
536
537
static void si_fast_clear(struct si_context *sctx, unsigned *buffers,
538
const union pipe_color_union *color, float depth, uint8_t stencil)
539
{
540
struct pipe_framebuffer_state *fb = &sctx->framebuffer.state;
541
struct si_clear_info info[8 * 2 + 1]; /* MRTs * (CMASK + DCC) + ZS */
542
unsigned num_clears = 0;
543
unsigned clear_types = 0;
544
unsigned num_pixels = fb->width * fb->height;
545
546
/* This function is broken in BE, so just disable this path for now */
547
#if UTIL_ARCH_BIG_ENDIAN
548
return;
549
#endif
550
551
if (sctx->render_cond)
552
return;
553
554
/* Gather information about what to clear. */
555
unsigned color_buffer_mask = (*buffers & PIPE_CLEAR_COLOR) >> util_logbase2(PIPE_CLEAR_COLOR0);
556
while (color_buffer_mask) {
557
unsigned i = u_bit_scan(&color_buffer_mask);
558
559
struct si_texture *tex = (struct si_texture *)fb->cbufs[i]->texture;
560
unsigned level = fb->cbufs[i]->u.tex.level;
561
unsigned num_layers = util_num_layers(&tex->buffer.b.b, level);
562
563
/* the clear is allowed if all layers are bound */
564
if (fb->cbufs[i]->u.tex.first_layer != 0 ||
565
fb->cbufs[i]->u.tex.last_layer != num_layers - 1) {
566
continue;
567
}
568
569
/* We can change the micro tile mode before a full clear. */
570
/* This is only used for MSAA textures when clearing all layers. */
571
si_set_optimal_micro_tile_mode(sctx->screen, tex);
572
573
if (tex->swap_rgb_to_bgr_on_next_clear) {
574
assert(!tex->swap_rgb_to_bgr);
575
assert(tex->buffer.b.b.nr_samples >= 2);
576
tex->swap_rgb_to_bgr = true;
577
tex->swap_rgb_to_bgr_on_next_clear = false;
578
579
/* Update all sampler views and images. */
580
p_atomic_inc(&sctx->screen->dirty_tex_counter);
581
}
582
583
/* only supported on tiled surfaces */
584
if (tex->surface.is_linear) {
585
continue;
586
}
587
588
if (sctx->chip_class <= GFX8 && tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D &&
589
!sctx->screen->info.htile_cmask_support_1d_tiling)
590
continue;
591
592
/* Use a slow clear for small surfaces where the cost of
593
* the eliminate pass can be higher than the benefit of fast
594
* clear. The closed driver does this, but the numbers may differ.
595
*
596
* This helps on both dGPUs and APUs, even small APUs like Mullins.
597
*/
598
bool fb_too_small = num_pixels * num_layers <= 512 * 512;
599
bool too_small = tex->buffer.b.b.nr_samples <= 1 && fb_too_small;
600
bool eliminate_needed = false;
601
bool fmask_decompress_needed = false;
602
603
/* Try to clear DCC first, otherwise try CMASK. */
604
if (vi_dcc_enabled(tex, level)) {
605
uint32_t reset_value;
606
607
if (sctx->screen->debug_flags & DBG(NO_DCC_CLEAR))
608
continue;
609
610
if (!vi_get_fast_clear_parameters(sctx->screen, tex->buffer.b.b.format,
611
fb->cbufs[i]->format, color, &reset_value,
612
&eliminate_needed))
613
continue;
614
615
/* Shared textures can't use fast clear without an explicit flush
616
* because the clear color is not exported.
617
*
618
* Chips without DCC constant encoding must set the clear color registers
619
* correctly even if the fast clear eliminate pass is not needed.
620
*/
621
if ((eliminate_needed || !sctx->screen->info.has_dcc_constant_encode) &&
622
tex->buffer.b.is_shared &&
623
!(tex->buffer.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
624
continue;
625
626
if (eliminate_needed && too_small)
627
continue;
628
629
/* We can clear any level, but we only set up the clear value registers for the first
630
* level. Therefore, all other levels can be cleared only if the clear value registers
631
* are not used, which is only the case with DCC constant encoding and 0/1 clear values.
632
*/
633
if (level > 0 && (eliminate_needed || !sctx->screen->info.has_dcc_constant_encode))
634
continue;
635
636
if (tex->buffer.b.b.nr_samples >= 2 && eliminate_needed &&
637
!sctx->screen->allow_dcc_msaa_clear_to_reg_for_bpp[util_logbase2(tex->surface.bpe)])
638
continue;
639
640
assert(num_clears < ARRAY_SIZE(info));
641
642
if (!vi_dcc_get_clear_info(sctx, tex, level, reset_value, &info[num_clears]))
643
continue;
644
645
num_clears++;
646
clear_types |= SI_CLEAR_TYPE_DCC;
647
648
si_mark_display_dcc_dirty(sctx, tex);
649
650
/* DCC fast clear with MSAA should clear CMASK to 0xC. */
651
if (tex->buffer.b.b.nr_samples >= 2 && tex->cmask_buffer) {
652
assert(num_clears < ARRAY_SIZE(info));
653
si_init_buffer_clear(&info[num_clears++], &tex->cmask_buffer->b.b,
654
tex->surface.cmask_offset, tex->surface.cmask_size, 0xCCCCCCCC);
655
clear_types |= SI_CLEAR_TYPE_CMASK;
656
fmask_decompress_needed = true;
657
}
658
} else {
659
if (level > 0)
660
continue;
661
662
/* Shared textures can't use fast clear without an explicit flush
663
* because the clear color is not exported.
664
*/
665
if (tex->buffer.b.is_shared &&
666
!(tex->buffer.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
667
continue;
668
669
if (too_small)
670
continue;
671
672
/* 128-bit formats are unsupported */
673
if (tex->surface.bpe > 8) {
674
continue;
675
}
676
677
/* RB+ doesn't work with CMASK fast clear on Stoney. */
678
if (sctx->family == CHIP_STONEY)
679
continue;
680
681
/* Disable fast clear if tex is encrypted */
682
if (tex->buffer.flags & RADEON_FLAG_ENCRYPTED)
683
continue;
684
685
uint64_t cmask_offset = 0;
686
unsigned clear_size = 0;
687
688
if (sctx->chip_class >= GFX10) {
689
assert(level == 0);
690
691
/* Clearing CMASK with both multiple levels and multiple layers is not
692
* implemented.
693
*/
694
if (num_layers > 1 && tex->buffer.b.b.last_level > 0)
695
continue;
696
697
if (!si_alloc_separate_cmask(sctx->screen, tex))
698
continue;
699
700
if (num_layers == 1) {
701
/* Clear level 0. */
702
cmask_offset = tex->surface.cmask_offset + tex->surface.u.gfx9.color.cmask_level0.offset;
703
clear_size = tex->surface.u.gfx9.color.cmask_level0.size;
704
} else if (tex->buffer.b.b.last_level == 0) {
705
/* Clear all layers having only 1 level. */
706
cmask_offset = tex->surface.cmask_offset;
707
clear_size = tex->surface.cmask_size;
708
} else {
709
assert(0); /* this is prevented above */
710
}
711
} else if (sctx->chip_class == GFX9) {
712
/* TODO: Implement CMASK fast clear for level 0 of mipmapped textures. Mipmapped
713
* CMASK has to clear a rectangular area of CMASK for level 0 (because the whole
714
* miptree is organized in a 2D plane).
715
*/
716
if (tex->buffer.b.b.last_level > 0)
717
continue;
718
719
if (!si_alloc_separate_cmask(sctx->screen, tex))
720
continue;
721
722
cmask_offset = tex->surface.cmask_offset;
723
clear_size = tex->surface.cmask_size;
724
} else {
725
if (!si_alloc_separate_cmask(sctx->screen, tex))
726
continue;
727
728
/* GFX6-8: This only covers mipmap level 0. */
729
cmask_offset = tex->surface.cmask_offset;
730
clear_size = tex->surface.cmask_size;
731
}
732
733
/* Do the fast clear. */
734
assert(num_clears < ARRAY_SIZE(info));
735
si_init_buffer_clear(&info[num_clears++], &tex->cmask_buffer->b.b,
736
cmask_offset, clear_size, 0);
737
clear_types |= SI_CLEAR_TYPE_CMASK;
738
eliminate_needed = true;
739
}
740
741
if ((eliminate_needed || fmask_decompress_needed) &&
742
!(tex->dirty_level_mask & (1 << level))) {
743
tex->dirty_level_mask |= 1 << level;
744
p_atomic_inc(&sctx->screen->compressed_colortex_counter);
745
}
746
747
*buffers &= ~(PIPE_CLEAR_COLOR0 << i);
748
749
/* Chips with DCC constant encoding don't need to set the clear
750
* color registers for DCC clear values 0 and 1.
751
*/
752
if (sctx->screen->info.has_dcc_constant_encode && !eliminate_needed)
753
continue;
754
755
if (si_set_clear_color(tex, fb->cbufs[i]->format, color)) {
756
sctx->framebuffer.dirty_cbufs |= 1 << i;
757
si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
758
}
759
}
760
761
/* Depth/stencil clears. */
762
struct pipe_surface *zsbuf = fb->zsbuf;
763
struct si_texture *zstex = zsbuf ? (struct si_texture *)zsbuf->texture : NULL;
764
unsigned zs_num_layers = zstex ? util_num_layers(&zstex->buffer.b.b, zsbuf->u.tex.level) : 0;
765
766
if (zstex && zsbuf->u.tex.first_layer == 0 &&
767
zsbuf->u.tex.last_layer == zs_num_layers - 1 &&
768
si_htile_enabled(zstex, zsbuf->u.tex.level, PIPE_MASK_ZS)) {
769
unsigned level = zsbuf->u.tex.level;
770
bool update_db_depth_clear = false;
771
bool update_db_stencil_clear = false;
772
bool fb_too_small = num_pixels * zs_num_layers <= 512 * 512;
773
774
/* Transition from TC-incompatible to TC-compatible HTILE if requested. */
775
if (zstex->enable_tc_compatible_htile_next_clear) {
776
/* If both depth and stencil are present, they must be cleared together. */
777
if ((*buffers & PIPE_CLEAR_DEPTHSTENCIL) == PIPE_CLEAR_DEPTHSTENCIL ||
778
(*buffers & PIPE_CLEAR_DEPTH && (!zstex->surface.has_stencil ||
779
zstex->htile_stencil_disabled))) {
780
/* The conversion from TC-incompatible to TC-compatible can only be done in one clear. */
781
assert(zstex->buffer.b.b.last_level == 0);
782
assert(!zstex->tc_compatible_htile);
783
784
/* Enable TC-compatible HTILE. */
785
zstex->enable_tc_compatible_htile_next_clear = false;
786
zstex->tc_compatible_htile = true;
787
788
/* Update the framebuffer state to reflect the change. */
789
sctx->framebuffer.DB_has_shader_readable_metadata = true;
790
sctx->framebuffer.dirty_zsbuf = true;
791
si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
792
793
/* Update all sampler views and shader images in all contexts. */
794
p_atomic_inc(&sctx->screen->dirty_tex_counter);
795
796
/* Perform the clear here if possible, else clear to uncompressed. */
797
uint32_t clear_value;
798
799
if (zstex->htile_stencil_disabled || !zstex->surface.has_stencil) {
800
if (si_can_fast_clear_depth(zstex, level, depth, *buffers)) {
801
/* Z-only clear. */
802
clear_value = si_get_htile_clear_value(zstex, depth);
803
*buffers &= ~PIPE_CLEAR_DEPTH;
804
zstex->depth_cleared_level_mask_once |= BITFIELD_BIT(level);
805
update_db_depth_clear = true;
806
}
807
} else if ((*buffers & PIPE_BIND_DEPTH_STENCIL) == PIPE_BIND_DEPTH_STENCIL) {
808
if (si_can_fast_clear_depth(zstex, level, depth, *buffers) &&
809
si_can_fast_clear_stencil(zstex, level, stencil, *buffers)) {
810
/* Combined Z+S clear. */
811
clear_value = si_get_htile_clear_value(zstex, depth);
812
*buffers &= ~PIPE_CLEAR_DEPTHSTENCIL;
813
zstex->depth_cleared_level_mask_once |= BITFIELD_BIT(level);
814
zstex->stencil_cleared_level_mask |= BITFIELD_BIT(level);
815
update_db_depth_clear = true;
816
update_db_stencil_clear = true;
817
}
818
}
819
820
if (!update_db_depth_clear) {
821
/* Clear to uncompressed, so that it doesn't contain values incompatible
822
* with the new TC-compatible HTILE setting.
823
*
824
* 0xfffff30f = uncompressed Z + S
825
* 0xfffc000f = uncompressed Z only
826
*/
827
clear_value = !zstex->htile_stencil_disabled ? 0xfffff30f : 0xfffc000f;
828
}
829
830
assert(num_clears < ARRAY_SIZE(info));
831
si_init_buffer_clear(&info[num_clears++], &zstex->buffer.b.b,
832
zstex->surface.meta_offset, zstex->surface.meta_size, clear_value);
833
clear_types |= SI_CLEAR_TYPE_HTILE;
834
}
835
} else if (num_clears || !fb_too_small) {
836
/* This is where the HTILE buffer clear is done.
837
*
838
* If there is no clear scheduled and the framebuffer size is too small, we should use
839
* the draw-based clear that is without waits. If there is some other clear scheduled,
840
* we will have to wait anyway, so add the HTILE buffer clear to the batch here.
841
* If the framebuffer size is large enough, use this codepath too.
842
*/
843
uint64_t htile_offset = zstex->surface.meta_offset;
844
unsigned htile_size = 0;
845
846
/* Determine the HTILE subset to clear. */
847
if (sctx->chip_class >= GFX10) {
848
/* This can only clear a layered texture with 1 level or a mipmap texture
849
* with 1 layer. Other cases are unimplemented.
850
*/
851
if (zs_num_layers == 1) {
852
/* Clear a specific level. */
853
htile_offset += zstex->surface.u.gfx9.meta_levels[level].offset;
854
htile_size = zstex->surface.u.gfx9.meta_levels[level].size;
855
} else if (zstex->buffer.b.b.last_level == 0) {
856
/* Clear all layers having only 1 level. */
857
htile_size = zstex->surface.meta_size;
858
}
859
} else {
860
/* This can only clear a layered texture with 1 level. Other cases are
861
* unimplemented.
862
*/
863
if (zstex->buffer.b.b.last_level == 0)
864
htile_size = zstex->surface.meta_size;
865
}
866
867
/* Perform the clear if it's possible. */
868
if (zstex->htile_stencil_disabled || !zstex->surface.has_stencil) {
869
if (htile_size &&
870
si_can_fast_clear_depth(zstex, level, depth, *buffers)) {
871
/* Z-only clear. */
872
assert(num_clears < ARRAY_SIZE(info));
873
si_init_buffer_clear(&info[num_clears++], &zstex->buffer.b.b, htile_offset,
874
htile_size, si_get_htile_clear_value(zstex, depth));
875
clear_types |= SI_CLEAR_TYPE_HTILE;
876
*buffers &= ~PIPE_CLEAR_DEPTH;
877
zstex->depth_cleared_level_mask_once |= BITFIELD_BIT(level);
878
update_db_depth_clear = true;
879
}
880
} else if ((*buffers & PIPE_BIND_DEPTH_STENCIL) == PIPE_BIND_DEPTH_STENCIL) {
881
if (htile_size &&
882
si_can_fast_clear_depth(zstex, level, depth, *buffers) &&
883
si_can_fast_clear_stencil(zstex, level, stencil, *buffers)) {
884
/* Combined Z+S clear. */
885
assert(num_clears < ARRAY_SIZE(info));
886
si_init_buffer_clear(&info[num_clears++], &zstex->buffer.b.b, htile_offset,
887
htile_size, si_get_htile_clear_value(zstex, depth));
888
clear_types |= SI_CLEAR_TYPE_HTILE;
889
*buffers &= ~PIPE_CLEAR_DEPTHSTENCIL;
890
zstex->depth_cleared_level_mask_once |= BITFIELD_BIT(level);
891
zstex->stencil_cleared_level_mask |= BITFIELD_BIT(level);
892
update_db_depth_clear = true;
893
update_db_stencil_clear = true;
894
}
895
} else {
896
/* Z-only or S-only clear when both Z/S are present using a read-modify-write
897
* compute shader.
898
*
899
* If we get both clears but only one of them can be fast-cleared, we use
900
* the draw-based fast clear to do both at the same time.
901
*/
902
const uint32_t htile_depth_writemask = 0xfffffc0f;
903
const uint32_t htile_stencil_writemask = 0x000003f0;
904
905
if (htile_size &&
906
!(*buffers & PIPE_CLEAR_STENCIL) &&
907
si_can_fast_clear_depth(zstex, level, depth, *buffers)) {
908
/* Z-only clear with stencil left intact. */
909
assert(num_clears < ARRAY_SIZE(info));
910
si_init_buffer_clear_rmw(&info[num_clears++], &zstex->buffer.b.b, htile_offset,
911
htile_size, si_get_htile_clear_value(zstex, depth),
912
htile_depth_writemask);
913
clear_types |= SI_CLEAR_TYPE_HTILE;
914
*buffers &= ~PIPE_CLEAR_DEPTH;
915
zstex->depth_cleared_level_mask_once |= BITFIELD_BIT(level);
916
update_db_depth_clear = true;
917
} else if (htile_size &&
918
!(*buffers & PIPE_CLEAR_DEPTH) &&
919
si_can_fast_clear_stencil(zstex, level, stencil, *buffers)) {
920
/* Stencil-only clear with depth left intact. */
921
assert(num_clears < ARRAY_SIZE(info));
922
si_init_buffer_clear_rmw(&info[num_clears++], &zstex->buffer.b.b, htile_offset,
923
htile_size, si_get_htile_clear_value(zstex, depth),
924
htile_stencil_writemask);
925
clear_types |= SI_CLEAR_TYPE_HTILE;
926
*buffers &= ~PIPE_CLEAR_STENCIL;
927
zstex->stencil_cleared_level_mask |= BITFIELD_BIT(level);
928
update_db_stencil_clear = true;
929
}
930
}
931
932
/* Update DB_DEPTH_CLEAR. */
933
if (update_db_depth_clear &&
934
zstex->depth_clear_value[level] != (float)depth) {
935
zstex->depth_clear_value[level] = depth;
936
sctx->framebuffer.dirty_zsbuf = true;
937
si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
938
}
939
940
/* Update DB_STENCIL_CLEAR. */
941
if (update_db_stencil_clear &&
942
zstex->stencil_clear_value[level] != stencil) {
943
zstex->stencil_clear_value[level] = stencil;
944
sctx->framebuffer.dirty_zsbuf = true;
945
si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
946
}
947
}
948
}
949
950
si_execute_clears(sctx, info, num_clears, clear_types);
951
}
952
953
static void si_clear(struct pipe_context *ctx, unsigned buffers,
954
const struct pipe_scissor_state *scissor_state,
955
const union pipe_color_union *color, double depth, unsigned stencil)
956
{
957
struct si_context *sctx = (struct si_context *)ctx;
958
struct pipe_framebuffer_state *fb = &sctx->framebuffer.state;
959
struct pipe_surface *zsbuf = fb->zsbuf;
960
struct si_texture *zstex = zsbuf ? (struct si_texture *)zsbuf->texture : NULL;
961
bool needs_db_flush = false;
962
963
/* Unset clear flags for non-existent buffers. */
964
for (unsigned i = 0; i < 8; i++) {
965
if (i >= fb->nr_cbufs || !fb->cbufs[i])
966
buffers &= ~(PIPE_CLEAR_COLOR0 << i);
967
}
968
if (!zsbuf)
969
buffers &= ~PIPE_CLEAR_DEPTHSTENCIL;
970
else if (!util_format_has_stencil(util_format_description(zsbuf->format)))
971
buffers &= ~PIPE_CLEAR_STENCIL;
972
973
if (buffers & PIPE_CLEAR_DEPTH)
974
zstex->depth_cleared_level_mask |= BITFIELD_BIT(zsbuf->u.tex.level);
975
976
si_fast_clear(sctx, &buffers, color, depth, stencil);
977
if (!buffers)
978
return; /* all buffers have been cleared */
979
980
if (buffers & PIPE_CLEAR_COLOR) {
981
/* These buffers cannot use fast clear, make sure to disable expansion. */
982
unsigned color_buffer_mask = (buffers & PIPE_CLEAR_COLOR) >> util_logbase2(PIPE_CLEAR_COLOR0);
983
while (color_buffer_mask) {
984
unsigned i = u_bit_scan(&color_buffer_mask);
985
struct si_texture *tex = (struct si_texture *)fb->cbufs[i]->texture;
986
if (tex->surface.fmask_size == 0)
987
tex->dirty_level_mask &= ~(1 << fb->cbufs[i]->u.tex.level);
988
}
989
}
990
991
if (zstex && zsbuf->u.tex.first_layer == 0 &&
992
zsbuf->u.tex.last_layer == util_max_layer(&zstex->buffer.b.b, 0)) {
993
unsigned level = zsbuf->u.tex.level;
994
995
if (si_can_fast_clear_depth(zstex, level, depth, buffers)) {
996
/* Need to disable EXPCLEAR temporarily if clearing
997
* to a new value. */
998
if (!(zstex->depth_cleared_level_mask_once & BITFIELD_BIT(level)) ||
999
zstex->depth_clear_value[level] != depth) {
1000
sctx->db_depth_disable_expclear = true;
1001
}
1002
1003
if (zstex->depth_clear_value[level] != (float)depth) {
1004
if ((zstex->depth_clear_value[level] != 0) != (depth != 0)) {
1005
/* ZRANGE_PRECISION register of a bound surface will change so we
1006
* must flush the DB caches. */
1007
needs_db_flush = true;
1008
}
1009
/* Update DB_DEPTH_CLEAR. */
1010
zstex->depth_clear_value[level] = depth;
1011
sctx->framebuffer.dirty_zsbuf = true;
1012
si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
1013
}
1014
sctx->db_depth_clear = true;
1015
si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
1016
}
1017
1018
if (si_can_fast_clear_stencil(zstex, level, stencil, buffers)) {
1019
stencil &= 0xff;
1020
1021
/* Need to disable EXPCLEAR temporarily if clearing
1022
* to a new value. */
1023
if (!(zstex->stencil_cleared_level_mask & BITFIELD_BIT(level)) ||
1024
zstex->stencil_clear_value[level] != stencil) {
1025
sctx->db_stencil_disable_expclear = true;
1026
}
1027
1028
if (zstex->stencil_clear_value[level] != (uint8_t)stencil) {
1029
/* Update DB_STENCIL_CLEAR. */
1030
zstex->stencil_clear_value[level] = stencil;
1031
sctx->framebuffer.dirty_zsbuf = true;
1032
si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
1033
}
1034
sctx->db_stencil_clear = true;
1035
si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
1036
}
1037
1038
if (needs_db_flush)
1039
sctx->flags |= SI_CONTEXT_FLUSH_AND_INV_DB;
1040
}
1041
1042
if (unlikely(sctx->thread_trace_enabled)) {
1043
if (buffers & PIPE_CLEAR_COLOR)
1044
sctx->sqtt_next_event = EventCmdClearColorImage;
1045
else if (buffers & PIPE_CLEAR_DEPTHSTENCIL)
1046
sctx->sqtt_next_event = EventCmdClearDepthStencilImage;
1047
}
1048
1049
si_blitter_begin(sctx, SI_CLEAR);
1050
util_blitter_clear(sctx->blitter, fb->width, fb->height, util_framebuffer_get_num_layers(fb),
1051
buffers, color, depth, stencil, sctx->framebuffer.nr_samples > 1);
1052
si_blitter_end(sctx);
1053
1054
if (sctx->db_depth_clear) {
1055
sctx->db_depth_clear = false;
1056
sctx->db_depth_disable_expclear = false;
1057
zstex->depth_cleared_level_mask_once |= BITFIELD_BIT(zsbuf->u.tex.level);
1058
si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
1059
}
1060
1061
if (sctx->db_stencil_clear) {
1062
sctx->db_stencil_clear = false;
1063
sctx->db_stencil_disable_expclear = false;
1064
zstex->stencil_cleared_level_mask |= BITFIELD_BIT(zsbuf->u.tex.level);
1065
si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
1066
}
1067
}
1068
1069
static void si_clear_render_target(struct pipe_context *ctx, struct pipe_surface *dst,
1070
const union pipe_color_union *color, unsigned dstx,
1071
unsigned dsty, unsigned width, unsigned height,
1072
bool render_condition_enabled)
1073
{
1074
struct si_context *sctx = (struct si_context *)ctx;
1075
struct si_texture *sdst = (struct si_texture *)dst->texture;
1076
1077
if (dst->texture->nr_samples <= 1 && !vi_dcc_enabled(sdst, dst->u.tex.level)) {
1078
si_compute_clear_render_target(ctx, dst, color, dstx, dsty, width, height,
1079
render_condition_enabled);
1080
return;
1081
}
1082
1083
si_blitter_begin(sctx,
1084
SI_CLEAR_SURFACE | (render_condition_enabled ? 0 : SI_DISABLE_RENDER_COND));
1085
util_blitter_clear_render_target(sctx->blitter, dst, color, dstx, dsty, width, height);
1086
si_blitter_end(sctx);
1087
}
1088
1089
static void si_clear_depth_stencil(struct pipe_context *ctx, struct pipe_surface *dst,
1090
unsigned clear_flags, double depth, unsigned stencil,
1091
unsigned dstx, unsigned dsty, unsigned width, unsigned height,
1092
bool render_condition_enabled)
1093
{
1094
struct si_context *sctx = (struct si_context *)ctx;
1095
1096
si_blitter_begin(sctx,
1097
SI_CLEAR_SURFACE | (render_condition_enabled ? 0 : SI_DISABLE_RENDER_COND));
1098
util_blitter_clear_depth_stencil(sctx->blitter, dst, clear_flags, depth, stencil, dstx, dsty,
1099
width, height);
1100
si_blitter_end(sctx);
1101
}
1102
1103
static void si_clear_texture(struct pipe_context *pipe, struct pipe_resource *tex, unsigned level,
1104
const struct pipe_box *box, const void *data)
1105
{
1106
struct pipe_screen *screen = pipe->screen;
1107
struct si_texture *stex = (struct si_texture *)tex;
1108
struct pipe_surface tmpl = {{0}};
1109
struct pipe_surface *sf;
1110
1111
tmpl.format = tex->format;
1112
tmpl.u.tex.first_layer = box->z;
1113
tmpl.u.tex.last_layer = box->z + box->depth - 1;
1114
tmpl.u.tex.level = level;
1115
sf = pipe->create_surface(pipe, tex, &tmpl);
1116
if (!sf)
1117
return;
1118
1119
if (stex->is_depth) {
1120
unsigned clear;
1121
float depth;
1122
uint8_t stencil = 0;
1123
1124
/* Depth is always present. */
1125
clear = PIPE_CLEAR_DEPTH;
1126
util_format_unpack_z_float(tex->format, &depth, data, 1);
1127
1128
if (stex->surface.has_stencil) {
1129
clear |= PIPE_CLEAR_STENCIL;
1130
util_format_unpack_s_8uint(tex->format, &stencil, data, 1);
1131
}
1132
1133
si_clear_depth_stencil(pipe, sf, clear, depth, stencil, box->x, box->y, box->width,
1134
box->height, false);
1135
} else {
1136
union pipe_color_union color;
1137
1138
util_format_unpack_rgba(tex->format, color.ui, data, 1);
1139
1140
if (screen->is_format_supported(screen, tex->format, tex->target, 0, 0,
1141
PIPE_BIND_RENDER_TARGET)) {
1142
si_clear_render_target(pipe, sf, &color, box->x, box->y, box->width, box->height, false);
1143
} else {
1144
/* Software fallback - just for R9G9B9E5_FLOAT */
1145
util_clear_render_target(pipe, sf, &color, box->x, box->y, box->width, box->height);
1146
}
1147
}
1148
pipe_surface_reference(&sf, NULL);
1149
}
1150
1151
void si_init_clear_functions(struct si_context *sctx)
1152
{
1153
sctx->b.clear_render_target = si_clear_render_target;
1154
sctx->b.clear_texture = si_clear_texture;
1155
1156
if (sctx->has_graphics) {
1157
sctx->b.clear = si_clear;
1158
sctx->b.clear_depth_stencil = si_clear_depth_stencil;
1159
}
1160
}
1161
1162