Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/amd/vulkan/radv_meta_clear.c
7233 views
1
/*
2
* Copyright © 2015 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include "nir/nir_builder.h"
25
#include "radv_debug.h"
26
#include "radv_meta.h"
27
#include "radv_private.h"
28
29
#include "util/format_rgb9e5.h"
30
#include "vk_format.h"
31
32
enum { DEPTH_CLEAR_SLOW, DEPTH_CLEAR_FAST_EXPCLEAR, DEPTH_CLEAR_FAST_NO_EXPCLEAR };
33
34
static void
35
build_color_shaders(struct nir_shader **out_vs, struct nir_shader **out_fs, uint32_t frag_output)
36
{
37
nir_builder vs_b =
38
nir_builder_init_simple_shader(MESA_SHADER_VERTEX, NULL, "meta_clear_color_vs");
39
nir_builder fs_b =
40
nir_builder_init_simple_shader(MESA_SHADER_FRAGMENT, NULL, "meta_clear_color_fs");
41
42
const struct glsl_type *position_type = glsl_vec4_type();
43
const struct glsl_type *color_type = glsl_vec4_type();
44
45
nir_variable *vs_out_pos =
46
nir_variable_create(vs_b.shader, nir_var_shader_out, position_type, "gl_Position");
47
vs_out_pos->data.location = VARYING_SLOT_POS;
48
49
nir_ssa_def *in_color_load =
50
nir_load_push_constant(&fs_b, 4, 32, nir_imm_int(&fs_b, 0), .range = 16);
51
52
nir_variable *fs_out_color =
53
nir_variable_create(fs_b.shader, nir_var_shader_out, color_type, "f_color");
54
fs_out_color->data.location = FRAG_RESULT_DATA0 + frag_output;
55
56
nir_store_var(&fs_b, fs_out_color, in_color_load, 0xf);
57
58
nir_ssa_def *outvec = radv_meta_gen_rect_vertices(&vs_b);
59
nir_store_var(&vs_b, vs_out_pos, outvec, 0xf);
60
61
const struct glsl_type *layer_type = glsl_int_type();
62
nir_variable *vs_out_layer =
63
nir_variable_create(vs_b.shader, nir_var_shader_out, layer_type, "v_layer");
64
vs_out_layer->data.location = VARYING_SLOT_LAYER;
65
vs_out_layer->data.interpolation = INTERP_MODE_FLAT;
66
nir_ssa_def *inst_id = nir_load_instance_id(&vs_b);
67
nir_ssa_def *base_instance = nir_load_base_instance(&vs_b);
68
69
nir_ssa_def *layer_id = nir_iadd(&vs_b, inst_id, base_instance);
70
nir_store_var(&vs_b, vs_out_layer, layer_id, 0x1);
71
72
*out_vs = vs_b.shader;
73
*out_fs = fs_b.shader;
74
}
75
76
static VkResult
77
create_pipeline(struct radv_device *device, struct radv_render_pass *render_pass, uint32_t samples,
78
struct nir_shader *vs_nir, struct nir_shader *fs_nir,
79
const VkPipelineVertexInputStateCreateInfo *vi_state,
80
const VkPipelineDepthStencilStateCreateInfo *ds_state,
81
const VkPipelineColorBlendStateCreateInfo *cb_state, const VkPipelineLayout layout,
82
const struct radv_graphics_pipeline_create_info *extra,
83
const VkAllocationCallbacks *alloc, VkPipeline *pipeline)
84
{
85
VkDevice device_h = radv_device_to_handle(device);
86
VkResult result;
87
88
result = radv_graphics_pipeline_create(
89
device_h, radv_pipeline_cache_to_handle(&device->meta_state.cache),
90
&(VkGraphicsPipelineCreateInfo){
91
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
92
.stageCount = fs_nir ? 2 : 1,
93
.pStages =
94
(VkPipelineShaderStageCreateInfo[]){
95
{
96
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
97
.stage = VK_SHADER_STAGE_VERTEX_BIT,
98
.module = vk_shader_module_handle_from_nir(vs_nir),
99
.pName = "main",
100
},
101
{
102
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
103
.stage = VK_SHADER_STAGE_FRAGMENT_BIT,
104
.module = vk_shader_module_handle_from_nir(fs_nir),
105
.pName = "main",
106
},
107
},
108
.pVertexInputState = vi_state,
109
.pInputAssemblyState =
110
&(VkPipelineInputAssemblyStateCreateInfo){
111
.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
112
.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
113
.primitiveRestartEnable = false,
114
},
115
.pViewportState =
116
&(VkPipelineViewportStateCreateInfo){
117
.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
118
.viewportCount = 1,
119
.scissorCount = 1,
120
},
121
.pRasterizationState =
122
&(VkPipelineRasterizationStateCreateInfo){
123
.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
124
.rasterizerDiscardEnable = false,
125
.polygonMode = VK_POLYGON_MODE_FILL,
126
.cullMode = VK_CULL_MODE_NONE,
127
.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
128
.depthBiasEnable = false,
129
},
130
.pMultisampleState =
131
&(VkPipelineMultisampleStateCreateInfo){
132
.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
133
.rasterizationSamples = samples,
134
.sampleShadingEnable = false,
135
.pSampleMask = NULL,
136
.alphaToCoverageEnable = false,
137
.alphaToOneEnable = false,
138
},
139
.pDepthStencilState = ds_state,
140
.pColorBlendState = cb_state,
141
.pDynamicState =
142
&(VkPipelineDynamicStateCreateInfo){
143
/* The meta clear pipeline declares all state as dynamic.
144
* As a consequence, vkCmdBindPipeline writes no dynamic state
145
* to the cmd buffer. Therefore, at the end of the meta clear,
146
* we need only restore dynamic state was vkCmdSet.
147
*/
148
.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
149
.dynamicStateCount = 8,
150
.pDynamicStates =
151
(VkDynamicState[]){
152
/* Everything except stencil write mask */
153
VK_DYNAMIC_STATE_VIEWPORT,
154
VK_DYNAMIC_STATE_SCISSOR,
155
VK_DYNAMIC_STATE_LINE_WIDTH,
156
VK_DYNAMIC_STATE_DEPTH_BIAS,
157
VK_DYNAMIC_STATE_BLEND_CONSTANTS,
158
VK_DYNAMIC_STATE_DEPTH_BOUNDS,
159
VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
160
VK_DYNAMIC_STATE_STENCIL_REFERENCE,
161
},
162
},
163
.layout = layout,
164
.flags = 0,
165
.renderPass = radv_render_pass_to_handle(render_pass),
166
.subpass = 0,
167
},
168
extra, alloc, pipeline);
169
170
ralloc_free(vs_nir);
171
ralloc_free(fs_nir);
172
173
return result;
174
}
175
176
static VkResult
177
create_color_renderpass(struct radv_device *device, VkFormat vk_format, uint32_t samples,
178
VkRenderPass *pass)
179
{
180
mtx_lock(&device->meta_state.mtx);
181
if (*pass) {
182
mtx_unlock(&device->meta_state.mtx);
183
return VK_SUCCESS;
184
}
185
186
VkResult result = radv_CreateRenderPass2(
187
radv_device_to_handle(device),
188
&(VkRenderPassCreateInfo2){
189
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2,
190
.attachmentCount = 1,
191
.pAttachments =
192
&(VkAttachmentDescription2){
193
.sType = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2,
194
.format = vk_format,
195
.samples = samples,
196
.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
197
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
198
.initialLayout = VK_IMAGE_LAYOUT_GENERAL,
199
.finalLayout = VK_IMAGE_LAYOUT_GENERAL,
200
},
201
.subpassCount = 1,
202
.pSubpasses =
203
&(VkSubpassDescription2){
204
.sType = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2,
205
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
206
.inputAttachmentCount = 0,
207
.colorAttachmentCount = 1,
208
.pColorAttachments =
209
&(VkAttachmentReference2){
210
.sType = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2,
211
.attachment = 0,
212
.layout = VK_IMAGE_LAYOUT_GENERAL,
213
},
214
.pResolveAttachments = NULL,
215
.pDepthStencilAttachment =
216
&(VkAttachmentReference2){
217
.sType = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2,
218
.attachment = VK_ATTACHMENT_UNUSED,
219
.layout = VK_IMAGE_LAYOUT_GENERAL,
220
},
221
.preserveAttachmentCount = 0,
222
.pPreserveAttachments = NULL,
223
},
224
.dependencyCount = 2,
225
.pDependencies =
226
(VkSubpassDependency2[]){{.sType = VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2,
227
.srcSubpass = VK_SUBPASS_EXTERNAL,
228
.dstSubpass = 0,
229
.srcStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
230
.dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
231
.srcAccessMask = 0,
232
.dstAccessMask = 0,
233
.dependencyFlags = 0},
234
{.sType = VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2,
235
.srcSubpass = 0,
236
.dstSubpass = VK_SUBPASS_EXTERNAL,
237
.srcStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
238
.dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
239
.srcAccessMask = 0,
240
.dstAccessMask = 0,
241
.dependencyFlags = 0}},
242
},
243
&device->meta_state.alloc, pass);
244
mtx_unlock(&device->meta_state.mtx);
245
return result;
246
}
247
248
static VkResult
249
create_color_pipeline(struct radv_device *device, uint32_t samples, uint32_t frag_output,
250
VkPipeline *pipeline, VkRenderPass pass)
251
{
252
struct nir_shader *vs_nir;
253
struct nir_shader *fs_nir;
254
VkResult result;
255
256
mtx_lock(&device->meta_state.mtx);
257
if (*pipeline) {
258
mtx_unlock(&device->meta_state.mtx);
259
return VK_SUCCESS;
260
}
261
262
build_color_shaders(&vs_nir, &fs_nir, frag_output);
263
264
const VkPipelineVertexInputStateCreateInfo vi_state = {
265
.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
266
.vertexBindingDescriptionCount = 0,
267
.vertexAttributeDescriptionCount = 0,
268
};
269
270
const VkPipelineDepthStencilStateCreateInfo ds_state = {
271
.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
272
.depthTestEnable = false,
273
.depthWriteEnable = false,
274
.depthBoundsTestEnable = false,
275
.stencilTestEnable = false,
276
};
277
278
VkPipelineColorBlendAttachmentState blend_attachment_state[MAX_RTS] = {0};
279
blend_attachment_state[frag_output] = (VkPipelineColorBlendAttachmentState){
280
.blendEnable = false,
281
.colorWriteMask = VK_COLOR_COMPONENT_A_BIT | VK_COLOR_COMPONENT_R_BIT |
282
VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT,
283
};
284
285
const VkPipelineColorBlendStateCreateInfo cb_state = {
286
.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
287
.logicOpEnable = false,
288
.attachmentCount = MAX_RTS,
289
.pAttachments = blend_attachment_state};
290
291
struct radv_graphics_pipeline_create_info extra = {
292
.use_rectlist = true,
293
};
294
result =
295
create_pipeline(device, radv_render_pass_from_handle(pass), samples, vs_nir, fs_nir,
296
&vi_state, &ds_state, &cb_state, device->meta_state.clear_color_p_layout,
297
&extra, &device->meta_state.alloc, pipeline);
298
299
mtx_unlock(&device->meta_state.mtx);
300
return result;
301
}
302
303
static void
304
finish_meta_clear_htile_mask_state(struct radv_device *device)
305
{
306
struct radv_meta_state *state = &device->meta_state;
307
308
radv_DestroyPipeline(radv_device_to_handle(device), state->clear_htile_mask_pipeline,
309
&state->alloc);
310
radv_DestroyPipelineLayout(radv_device_to_handle(device), state->clear_htile_mask_p_layout,
311
&state->alloc);
312
radv_DestroyDescriptorSetLayout(radv_device_to_handle(device), state->clear_htile_mask_ds_layout,
313
&state->alloc);
314
}
315
316
void
317
radv_device_finish_meta_clear_state(struct radv_device *device)
318
{
319
struct radv_meta_state *state = &device->meta_state;
320
321
for (uint32_t i = 0; i < ARRAY_SIZE(state->clear); ++i) {
322
for (uint32_t j = 0; j < ARRAY_SIZE(state->clear[i].color_pipelines); ++j) {
323
radv_DestroyPipeline(radv_device_to_handle(device), state->clear[i].color_pipelines[j],
324
&state->alloc);
325
radv_DestroyRenderPass(radv_device_to_handle(device), state->clear[i].render_pass[j],
326
&state->alloc);
327
}
328
329
for (uint32_t j = 0; j < NUM_DEPTH_CLEAR_PIPELINES; j++) {
330
radv_DestroyPipeline(radv_device_to_handle(device), state->clear[i].depth_only_pipeline[j],
331
&state->alloc);
332
radv_DestroyPipeline(radv_device_to_handle(device),
333
state->clear[i].stencil_only_pipeline[j], &state->alloc);
334
radv_DestroyPipeline(radv_device_to_handle(device),
335
state->clear[i].depthstencil_pipeline[j], &state->alloc);
336
337
radv_DestroyPipeline(radv_device_to_handle(device),
338
state->clear[i].depth_only_unrestricted_pipeline[j], &state->alloc);
339
radv_DestroyPipeline(radv_device_to_handle(device),
340
state->clear[i].stencil_only_unrestricted_pipeline[j], &state->alloc);
341
radv_DestroyPipeline(radv_device_to_handle(device),
342
state->clear[i].depthstencil_unrestricted_pipeline[j], &state->alloc);
343
}
344
radv_DestroyRenderPass(radv_device_to_handle(device), state->clear[i].depthstencil_rp,
345
&state->alloc);
346
}
347
radv_DestroyPipelineLayout(radv_device_to_handle(device), state->clear_color_p_layout,
348
&state->alloc);
349
radv_DestroyPipelineLayout(radv_device_to_handle(device), state->clear_depth_p_layout,
350
&state->alloc);
351
radv_DestroyPipelineLayout(radv_device_to_handle(device),
352
state->clear_depth_unrestricted_p_layout, &state->alloc);
353
354
finish_meta_clear_htile_mask_state(device);
355
}
356
357
static void
358
emit_color_clear(struct radv_cmd_buffer *cmd_buffer, const VkClearAttachment *clear_att,
359
const VkClearRect *clear_rect, uint32_t view_mask)
360
{
361
struct radv_device *device = cmd_buffer->device;
362
const struct radv_subpass *subpass = cmd_buffer->state.subpass;
363
const uint32_t subpass_att = clear_att->colorAttachment;
364
const uint32_t pass_att = subpass->color_attachments[subpass_att].attachment;
365
const struct radv_image_view *iview =
366
cmd_buffer->state.attachments ? cmd_buffer->state.attachments[pass_att].iview : NULL;
367
uint32_t samples, samples_log2;
368
VkFormat format;
369
unsigned fs_key;
370
VkClearColorValue clear_value = clear_att->clearValue.color;
371
VkCommandBuffer cmd_buffer_h = radv_cmd_buffer_to_handle(cmd_buffer);
372
VkPipeline pipeline;
373
374
/* When a framebuffer is bound to the current command buffer, get the
375
* number of samples from it. Otherwise, get the number of samples from
376
* the render pass because it's likely a secondary command buffer.
377
*/
378
if (iview) {
379
samples = iview->image->info.samples;
380
format = iview->vk_format;
381
} else {
382
samples = cmd_buffer->state.pass->attachments[pass_att].samples;
383
format = cmd_buffer->state.pass->attachments[pass_att].format;
384
}
385
386
samples_log2 = ffs(samples) - 1;
387
fs_key = radv_format_meta_fs_key(device, format);
388
389
if (fs_key == -1) {
390
radv_finishme("color clears incomplete");
391
return;
392
}
393
394
if (device->meta_state.clear[samples_log2].render_pass[fs_key] == VK_NULL_HANDLE) {
395
VkResult ret =
396
create_color_renderpass(device, radv_fs_key_format_exemplars[fs_key], samples,
397
&device->meta_state.clear[samples_log2].render_pass[fs_key]);
398
if (ret != VK_SUCCESS) {
399
cmd_buffer->record_result = ret;
400
return;
401
}
402
}
403
404
if (device->meta_state.clear[samples_log2].color_pipelines[fs_key] == VK_NULL_HANDLE) {
405
VkResult ret = create_color_pipeline(
406
device, samples, 0, &device->meta_state.clear[samples_log2].color_pipelines[fs_key],
407
device->meta_state.clear[samples_log2].render_pass[fs_key]);
408
if (ret != VK_SUCCESS) {
409
cmd_buffer->record_result = ret;
410
return;
411
}
412
}
413
414
pipeline = device->meta_state.clear[samples_log2].color_pipelines[fs_key];
415
if (!pipeline) {
416
radv_finishme("color clears incomplete");
417
return;
418
}
419
assert(samples_log2 < ARRAY_SIZE(device->meta_state.clear));
420
assert(pipeline);
421
assert(clear_att->aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
422
assert(clear_att->colorAttachment < subpass->color_count);
423
424
radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
425
device->meta_state.clear_color_p_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 0,
426
16, &clear_value);
427
428
struct radv_subpass clear_subpass = {
429
.color_count = 1,
430
.color_attachments =
431
(struct radv_subpass_attachment[]){subpass->color_attachments[clear_att->colorAttachment]},
432
.depth_stencil_attachment = NULL,
433
};
434
435
radv_cmd_buffer_set_subpass(cmd_buffer, &clear_subpass);
436
437
radv_CmdBindPipeline(cmd_buffer_h, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
438
439
radv_CmdSetViewport(radv_cmd_buffer_to_handle(cmd_buffer), 0, 1,
440
&(VkViewport){.x = clear_rect->rect.offset.x,
441
.y = clear_rect->rect.offset.y,
442
.width = clear_rect->rect.extent.width,
443
.height = clear_rect->rect.extent.height,
444
.minDepth = 0.0f,
445
.maxDepth = 1.0f});
446
447
radv_CmdSetScissor(radv_cmd_buffer_to_handle(cmd_buffer), 0, 1, &clear_rect->rect);
448
449
if (view_mask) {
450
u_foreach_bit(i, view_mask) radv_CmdDraw(cmd_buffer_h, 3, 1, 0, i);
451
} else {
452
radv_CmdDraw(cmd_buffer_h, 3, clear_rect->layerCount, 0, clear_rect->baseArrayLayer);
453
}
454
455
radv_cmd_buffer_set_subpass(cmd_buffer, subpass);
456
}
457
458
static void
459
build_depthstencil_shader(struct nir_shader **out_vs, struct nir_shader **out_fs, bool unrestricted)
460
{
461
nir_builder vs_b = nir_builder_init_simple_shader(
462
MESA_SHADER_VERTEX, NULL,
463
unrestricted ? "meta_clear_depthstencil_unrestricted_vs" : "meta_clear_depthstencil_vs");
464
nir_builder fs_b = nir_builder_init_simple_shader(
465
MESA_SHADER_FRAGMENT, NULL,
466
unrestricted ? "meta_clear_depthstencil_unrestricted_fs" : "meta_clear_depthstencil_fs");
467
468
const struct glsl_type *position_out_type = glsl_vec4_type();
469
470
nir_variable *vs_out_pos =
471
nir_variable_create(vs_b.shader, nir_var_shader_out, position_out_type, "gl_Position");
472
vs_out_pos->data.location = VARYING_SLOT_POS;
473
474
nir_ssa_def *z;
475
if (unrestricted) {
476
nir_ssa_def *in_color_load =
477
nir_load_push_constant(&fs_b, 1, 32, nir_imm_int(&fs_b, 0), .range = 4);
478
479
nir_variable *fs_out_depth =
480
nir_variable_create(fs_b.shader, nir_var_shader_out, glsl_int_type(), "f_depth");
481
fs_out_depth->data.location = FRAG_RESULT_DEPTH;
482
nir_store_var(&fs_b, fs_out_depth, in_color_load, 0x1);
483
484
z = nir_imm_float(&vs_b, 0.0);
485
} else {
486
z = nir_load_push_constant(&vs_b, 1, 32, nir_imm_int(&vs_b, 0), .range = 4);
487
}
488
489
nir_ssa_def *outvec = radv_meta_gen_rect_vertices_comp2(&vs_b, z);
490
nir_store_var(&vs_b, vs_out_pos, outvec, 0xf);
491
492
const struct glsl_type *layer_type = glsl_int_type();
493
nir_variable *vs_out_layer =
494
nir_variable_create(vs_b.shader, nir_var_shader_out, layer_type, "v_layer");
495
vs_out_layer->data.location = VARYING_SLOT_LAYER;
496
vs_out_layer->data.interpolation = INTERP_MODE_FLAT;
497
nir_ssa_def *inst_id = nir_load_instance_id(&vs_b);
498
nir_ssa_def *base_instance = nir_load_base_instance(&vs_b);
499
500
nir_ssa_def *layer_id = nir_iadd(&vs_b, inst_id, base_instance);
501
nir_store_var(&vs_b, vs_out_layer, layer_id, 0x1);
502
503
*out_vs = vs_b.shader;
504
*out_fs = fs_b.shader;
505
}
506
507
static VkResult
508
create_depthstencil_renderpass(struct radv_device *device, uint32_t samples,
509
VkRenderPass *render_pass)
510
{
511
mtx_lock(&device->meta_state.mtx);
512
if (*render_pass) {
513
mtx_unlock(&device->meta_state.mtx);
514
return VK_SUCCESS;
515
}
516
517
VkResult result = radv_CreateRenderPass2(
518
radv_device_to_handle(device),
519
&(VkRenderPassCreateInfo2){
520
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2,
521
.attachmentCount = 1,
522
.pAttachments =
523
&(VkAttachmentDescription2){
524
.sType = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2,
525
.format = VK_FORMAT_D32_SFLOAT_S8_UINT,
526
.samples = samples,
527
.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
528
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
529
.initialLayout = VK_IMAGE_LAYOUT_GENERAL,
530
.finalLayout = VK_IMAGE_LAYOUT_GENERAL,
531
},
532
.subpassCount = 1,
533
.pSubpasses =
534
&(VkSubpassDescription2){
535
.sType = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2,
536
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
537
.inputAttachmentCount = 0,
538
.colorAttachmentCount = 0,
539
.pColorAttachments = NULL,
540
.pResolveAttachments = NULL,
541
.pDepthStencilAttachment =
542
&(VkAttachmentReference2){
543
.sType = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2,
544
.attachment = 0,
545
.layout = VK_IMAGE_LAYOUT_GENERAL,
546
},
547
.preserveAttachmentCount = 0,
548
.pPreserveAttachments = NULL,
549
},
550
.dependencyCount = 2,
551
.pDependencies =
552
(VkSubpassDependency2[]){{.sType = VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2,
553
.srcSubpass = VK_SUBPASS_EXTERNAL,
554
.dstSubpass = 0,
555
.srcStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
556
.dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
557
.srcAccessMask = 0,
558
.dstAccessMask = 0,
559
.dependencyFlags = 0},
560
{.sType = VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2,
561
.srcSubpass = 0,
562
.dstSubpass = VK_SUBPASS_EXTERNAL,
563
.srcStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
564
.dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
565
.srcAccessMask = 0,
566
.dstAccessMask = 0,
567
.dependencyFlags = 0}}},
568
&device->meta_state.alloc, render_pass);
569
mtx_unlock(&device->meta_state.mtx);
570
return result;
571
}
572
573
static VkResult
574
create_depthstencil_pipeline(struct radv_device *device, VkImageAspectFlags aspects,
575
uint32_t samples, int index, bool unrestricted, VkPipeline *pipeline,
576
VkRenderPass render_pass)
577
{
578
struct nir_shader *vs_nir, *fs_nir;
579
VkResult result;
580
581
mtx_lock(&device->meta_state.mtx);
582
if (*pipeline) {
583
mtx_unlock(&device->meta_state.mtx);
584
return VK_SUCCESS;
585
}
586
587
build_depthstencil_shader(&vs_nir, &fs_nir, unrestricted);
588
589
const VkPipelineVertexInputStateCreateInfo vi_state = {
590
.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
591
.vertexBindingDescriptionCount = 0,
592
.vertexAttributeDescriptionCount = 0,
593
};
594
595
const VkPipelineDepthStencilStateCreateInfo ds_state = {
596
.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
597
.depthTestEnable = !!(aspects & VK_IMAGE_ASPECT_DEPTH_BIT),
598
.depthCompareOp = VK_COMPARE_OP_ALWAYS,
599
.depthWriteEnable = !!(aspects & VK_IMAGE_ASPECT_DEPTH_BIT),
600
.depthBoundsTestEnable = false,
601
.stencilTestEnable = !!(aspects & VK_IMAGE_ASPECT_STENCIL_BIT),
602
.front =
603
{
604
.passOp = VK_STENCIL_OP_REPLACE,
605
.compareOp = VK_COMPARE_OP_ALWAYS,
606
.writeMask = UINT32_MAX,
607
.reference = 0, /* dynamic */
608
},
609
.back = {0 /* dont care */},
610
};
611
612
const VkPipelineColorBlendStateCreateInfo cb_state = {
613
.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
614
.logicOpEnable = false,
615
.attachmentCount = 0,
616
.pAttachments = NULL,
617
};
618
619
struct radv_graphics_pipeline_create_info extra = {
620
.use_rectlist = true,
621
};
622
623
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
624
extra.db_depth_clear = index == DEPTH_CLEAR_SLOW ? false : true;
625
extra.db_depth_disable_expclear = index == DEPTH_CLEAR_FAST_NO_EXPCLEAR ? true : false;
626
}
627
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
628
extra.db_stencil_clear = index == DEPTH_CLEAR_SLOW ? false : true;
629
extra.db_stencil_disable_expclear = index == DEPTH_CLEAR_FAST_NO_EXPCLEAR ? true : false;
630
}
631
result =
632
create_pipeline(device, radv_render_pass_from_handle(render_pass), samples, vs_nir, fs_nir,
633
&vi_state, &ds_state, &cb_state, device->meta_state.clear_depth_p_layout,
634
&extra, &device->meta_state.alloc, pipeline);
635
636
mtx_unlock(&device->meta_state.mtx);
637
return result;
638
}
639
640
static bool
641
depth_view_can_fast_clear(struct radv_cmd_buffer *cmd_buffer, const struct radv_image_view *iview,
642
VkImageAspectFlags aspects, VkImageLayout layout, bool in_render_loop,
643
const VkClearRect *clear_rect, VkClearDepthStencilValue clear_value)
644
{
645
if (!iview)
646
return false;
647
648
uint32_t queue_mask = radv_image_queue_family_mask(iview->image, cmd_buffer->queue_family_index,
649
cmd_buffer->queue_family_index);
650
if (clear_rect->rect.offset.x || clear_rect->rect.offset.y ||
651
clear_rect->rect.extent.width != iview->extent.width ||
652
clear_rect->rect.extent.height != iview->extent.height)
653
return false;
654
if (radv_image_is_tc_compat_htile(iview->image) &&
655
(((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) && clear_value.depth != 0.0 &&
656
clear_value.depth != 1.0) ||
657
((aspects & VK_IMAGE_ASPECT_STENCIL_BIT) && clear_value.stencil != 0)))
658
return false;
659
if (radv_htile_enabled(iview->image, iview->base_mip) && iview->base_mip == 0 &&
660
iview->base_layer == 0 && iview->layer_count == iview->image->info.array_size &&
661
radv_layout_is_htile_compressed(cmd_buffer->device, iview->image, layout, in_render_loop,
662
queue_mask) &&
663
radv_image_extent_compare(iview->image, &iview->extent))
664
return true;
665
return false;
666
}
667
668
static VkPipeline
669
pick_depthstencil_pipeline(struct radv_cmd_buffer *cmd_buffer, struct radv_meta_state *meta_state,
670
const struct radv_image_view *iview, int samples_log2,
671
VkImageAspectFlags aspects, VkImageLayout layout, bool in_render_loop,
672
const VkClearRect *clear_rect, VkClearDepthStencilValue clear_value)
673
{
674
bool fast = depth_view_can_fast_clear(cmd_buffer, iview, aspects, layout, in_render_loop,
675
clear_rect, clear_value);
676
bool unrestricted = cmd_buffer->device->vk.enabled_extensions.EXT_depth_range_unrestricted;
677
int index = DEPTH_CLEAR_SLOW;
678
VkPipeline *pipeline;
679
680
if (fast) {
681
/* we don't know the previous clear values, so we always have
682
* the NO_EXPCLEAR path */
683
index = DEPTH_CLEAR_FAST_NO_EXPCLEAR;
684
}
685
686
switch (aspects) {
687
case VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT:
688
pipeline = unrestricted
689
? &meta_state->clear[samples_log2].depthstencil_unrestricted_pipeline[index]
690
: &meta_state->clear[samples_log2].depthstencil_pipeline[index];
691
break;
692
case VK_IMAGE_ASPECT_DEPTH_BIT:
693
pipeline = unrestricted
694
? &meta_state->clear[samples_log2].depth_only_unrestricted_pipeline[index]
695
: &meta_state->clear[samples_log2].depth_only_pipeline[index];
696
break;
697
case VK_IMAGE_ASPECT_STENCIL_BIT:
698
pipeline = unrestricted
699
? &meta_state->clear[samples_log2].stencil_only_unrestricted_pipeline[index]
700
: &meta_state->clear[samples_log2].stencil_only_pipeline[index];
701
break;
702
default:
703
unreachable("expected depth or stencil aspect");
704
}
705
706
if (cmd_buffer->device->meta_state.clear[samples_log2].depthstencil_rp == VK_NULL_HANDLE) {
707
VkResult ret = create_depthstencil_renderpass(
708
cmd_buffer->device, 1u << samples_log2,
709
&cmd_buffer->device->meta_state.clear[samples_log2].depthstencil_rp);
710
if (ret != VK_SUCCESS) {
711
cmd_buffer->record_result = ret;
712
return VK_NULL_HANDLE;
713
}
714
}
715
716
if (*pipeline == VK_NULL_HANDLE) {
717
VkResult ret = create_depthstencil_pipeline(
718
cmd_buffer->device, aspects, 1u << samples_log2, index, unrestricted, pipeline,
719
cmd_buffer->device->meta_state.clear[samples_log2].depthstencil_rp);
720
if (ret != VK_SUCCESS) {
721
cmd_buffer->record_result = ret;
722
return VK_NULL_HANDLE;
723
}
724
}
725
return *pipeline;
726
}
727
728
static void
729
emit_depthstencil_clear(struct radv_cmd_buffer *cmd_buffer, const VkClearAttachment *clear_att,
730
const VkClearRect *clear_rect, struct radv_subpass_attachment *ds_att,
731
uint32_t view_mask)
732
{
733
struct radv_device *device = cmd_buffer->device;
734
struct radv_meta_state *meta_state = &device->meta_state;
735
const struct radv_subpass *subpass = cmd_buffer->state.subpass;
736
const uint32_t pass_att = ds_att->attachment;
737
VkClearDepthStencilValue clear_value = clear_att->clearValue.depthStencil;
738
VkImageAspectFlags aspects = clear_att->aspectMask;
739
const struct radv_image_view *iview =
740
cmd_buffer->state.attachments ? cmd_buffer->state.attachments[pass_att].iview : NULL;
741
uint32_t samples, samples_log2;
742
VkCommandBuffer cmd_buffer_h = radv_cmd_buffer_to_handle(cmd_buffer);
743
744
/* When a framebuffer is bound to the current command buffer, get the
745
* number of samples from it. Otherwise, get the number of samples from
746
* the render pass because it's likely a secondary command buffer.
747
*/
748
if (iview) {
749
samples = iview->image->info.samples;
750
} else {
751
samples = cmd_buffer->state.pass->attachments[pass_att].samples;
752
}
753
754
samples_log2 = ffs(samples) - 1;
755
756
assert(pass_att != VK_ATTACHMENT_UNUSED);
757
758
if (!(aspects & VK_IMAGE_ASPECT_DEPTH_BIT))
759
clear_value.depth = 1.0f;
760
761
if (cmd_buffer->device->vk.enabled_extensions.EXT_depth_range_unrestricted) {
762
radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
763
device->meta_state.clear_depth_unrestricted_p_layout,
764
VK_SHADER_STAGE_FRAGMENT_BIT, 0, 4, &clear_value.depth);
765
} else {
766
radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
767
device->meta_state.clear_depth_p_layout, VK_SHADER_STAGE_VERTEX_BIT, 0,
768
4, &clear_value.depth);
769
}
770
771
uint32_t prev_reference = cmd_buffer->state.dynamic.stencil_reference.front;
772
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
773
radv_CmdSetStencilReference(cmd_buffer_h, VK_STENCIL_FACE_FRONT_BIT, clear_value.stencil);
774
}
775
776
VkPipeline pipeline =
777
pick_depthstencil_pipeline(cmd_buffer, meta_state, iview, samples_log2, aspects,
778
ds_att->layout, ds_att->in_render_loop, clear_rect, clear_value);
779
if (!pipeline)
780
return;
781
782
struct radv_subpass clear_subpass = {
783
.color_count = 0,
784
.color_attachments = NULL,
785
.depth_stencil_attachment = ds_att,
786
};
787
788
radv_cmd_buffer_set_subpass(cmd_buffer, &clear_subpass);
789
790
radv_CmdBindPipeline(cmd_buffer_h, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
791
792
if (depth_view_can_fast_clear(cmd_buffer, iview, aspects, ds_att->layout, ds_att->in_render_loop,
793
clear_rect, clear_value))
794
radv_update_ds_clear_metadata(cmd_buffer, iview, clear_value, aspects);
795
796
radv_CmdSetViewport(radv_cmd_buffer_to_handle(cmd_buffer), 0, 1,
797
&(VkViewport){.x = clear_rect->rect.offset.x,
798
.y = clear_rect->rect.offset.y,
799
.width = clear_rect->rect.extent.width,
800
.height = clear_rect->rect.extent.height,
801
.minDepth = 0.0f,
802
.maxDepth = 1.0f});
803
804
radv_CmdSetScissor(radv_cmd_buffer_to_handle(cmd_buffer), 0, 1, &clear_rect->rect);
805
806
if (view_mask) {
807
u_foreach_bit(i, view_mask) radv_CmdDraw(cmd_buffer_h, 3, 1, 0, i);
808
} else {
809
radv_CmdDraw(cmd_buffer_h, 3, clear_rect->layerCount, 0, clear_rect->baseArrayLayer);
810
}
811
812
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
813
radv_CmdSetStencilReference(cmd_buffer_h, VK_STENCIL_FACE_FRONT_BIT, prev_reference);
814
}
815
816
radv_cmd_buffer_set_subpass(cmd_buffer, subpass);
817
}
818
819
static uint32_t
820
clear_htile_mask(struct radv_cmd_buffer *cmd_buffer, const struct radv_image *image,
821
struct radeon_winsys_bo *bo, uint64_t offset, uint64_t size, uint32_t htile_value,
822
uint32_t htile_mask)
823
{
824
struct radv_device *device = cmd_buffer->device;
825
struct radv_meta_state *state = &device->meta_state;
826
uint64_t block_count = round_up_u64(size, 1024);
827
struct radv_meta_saved_state saved_state;
828
829
radv_meta_save(
830
&saved_state, cmd_buffer,
831
RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_CONSTANTS | RADV_META_SAVE_DESCRIPTORS);
832
833
struct radv_buffer dst_buffer = {.bo = bo, .offset = offset, .size = size};
834
835
radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE,
836
state->clear_htile_mask_pipeline);
837
838
radv_meta_push_descriptor_set(
839
cmd_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, state->clear_htile_mask_p_layout, 0, /* set */
840
1, /* descriptorWriteCount */
841
(VkWriteDescriptorSet[]){
842
{.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
843
.dstBinding = 0,
844
.dstArrayElement = 0,
845
.descriptorCount = 1,
846
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
847
.pBufferInfo = &(VkDescriptorBufferInfo){.buffer = radv_buffer_to_handle(&dst_buffer),
848
.offset = 0,
849
.range = size}}});
850
851
const unsigned constants[2] = {
852
htile_value & htile_mask,
853
~htile_mask,
854
};
855
856
radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer), state->clear_htile_mask_p_layout,
857
VK_SHADER_STAGE_COMPUTE_BIT, 0, 8, constants);
858
859
radv_CmdDispatch(radv_cmd_buffer_to_handle(cmd_buffer), block_count, 1, 1);
860
861
radv_meta_restore(&saved_state, cmd_buffer);
862
863
return RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_INV_VCACHE |
864
radv_src_access_flush(cmd_buffer, VK_ACCESS_SHADER_WRITE_BIT, image);
865
}
866
867
static uint32_t
868
radv_get_htile_fast_clear_value(const struct radv_device *device, const struct radv_image *image,
869
VkClearDepthStencilValue value)
870
{
871
uint32_t max_zval = 0x3fff; /* maximum 14-bit value. */
872
uint32_t zmask = 0, smem = 0;
873
uint32_t htile_value;
874
uint32_t zmin, zmax;
875
876
/* Convert the depth value to 14-bit zmin/zmax values. */
877
zmin = ((value.depth * max_zval) + 0.5f);
878
zmax = zmin;
879
880
if (radv_image_tile_stencil_disabled(device, image)) {
881
/* Z only (no stencil):
882
*
883
* |31 18|17 4|3 0|
884
* +---------+---------+-------+
885
* | Max Z | Min Z | ZMask |
886
*/
887
htile_value = (((zmax & 0x3fff) << 18) |
888
((zmin & 0x3fff) << 4) |
889
((zmask & 0xf) << 0));
890
} else {
891
892
/* Z and stencil:
893
*
894
* |31 12|11 10|9 8|7 6|5 4|3 0|
895
* +-----------+-----+------+-----+-----+-------+
896
* | Z Range | | SMem | SR1 | SR0 | ZMask |
897
*
898
* Z, stencil, 4 bit VRS encoding:
899
* |31 12| 11 10 |9 8|7 6 |5 4|3 0|
900
* +-----------+------------+------+------------+-----+-------+
901
* | Z Range | VRS Y-rate | SMem | VRS X-rate | SR0 | ZMask |
902
*/
903
uint32_t delta = 0;
904
uint32_t zrange = ((zmax << 6) | delta);
905
uint32_t sresults = 0xf; /* SR0/SR1 both as 0x3. */
906
907
if (radv_image_has_vrs_htile(device, image))
908
sresults = 0x3;
909
910
htile_value = (((zrange & 0xfffff) << 12) |
911
((smem & 0x3) << 8) |
912
((sresults & 0xf) << 4) |
913
((zmask & 0xf) << 0));
914
}
915
916
return htile_value;
917
}
918
919
static uint32_t
920
radv_get_htile_mask(const struct radv_device *device, const struct radv_image *image,
921
VkImageAspectFlags aspects)
922
{
923
uint32_t mask = 0;
924
925
if (radv_image_tile_stencil_disabled(device, image)) {
926
/* All the HTILE buffer is used when there is no stencil. */
927
mask = UINT32_MAX;
928
} else {
929
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
930
mask |= 0xfffffc0f;
931
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
932
mask |= 0x000003f0;
933
}
934
935
return mask;
936
}
937
938
static bool
939
radv_is_fast_clear_depth_allowed(VkClearDepthStencilValue value)
940
{
941
return value.depth == 1.0f || value.depth == 0.0f;
942
}
943
944
static bool
945
radv_is_fast_clear_stencil_allowed(VkClearDepthStencilValue value)
946
{
947
return value.stencil == 0;
948
}
949
950
static bool
951
radv_can_fast_clear_depth(struct radv_cmd_buffer *cmd_buffer, const struct radv_image_view *iview,
952
VkImageLayout image_layout, bool in_render_loop,
953
VkImageAspectFlags aspects, const VkClearRect *clear_rect,
954
const VkClearDepthStencilValue clear_value, uint32_t view_mask)
955
{
956
if (!iview || !iview->support_fast_clear)
957
return false;
958
959
if (!radv_layout_is_htile_compressed(
960
cmd_buffer->device, iview->image, image_layout, in_render_loop,
961
radv_image_queue_family_mask(iview->image, cmd_buffer->queue_family_index,
962
cmd_buffer->queue_family_index)))
963
return false;
964
965
if (clear_rect->rect.offset.x || clear_rect->rect.offset.y ||
966
clear_rect->rect.extent.width != iview->image->info.width ||
967
clear_rect->rect.extent.height != iview->image->info.height)
968
return false;
969
970
if (view_mask && (iview->image->info.array_size >= 32 ||
971
(1u << iview->image->info.array_size) - 1u != view_mask))
972
return false;
973
if (!view_mask && clear_rect->baseArrayLayer != 0)
974
return false;
975
if (!view_mask && clear_rect->layerCount != iview->image->info.array_size)
976
return false;
977
978
if (cmd_buffer->device->vk.enabled_extensions.EXT_depth_range_unrestricted &&
979
(aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
980
(clear_value.depth < 0.0 || clear_value.depth > 1.0))
981
return false;
982
983
if (radv_image_is_tc_compat_htile(iview->image) &&
984
(((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) && !radv_is_fast_clear_depth_allowed(clear_value)) ||
985
((aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
986
!radv_is_fast_clear_stencil_allowed(clear_value))))
987
return false;
988
989
return true;
990
}
991
992
static void
993
radv_fast_clear_depth(struct radv_cmd_buffer *cmd_buffer, const struct radv_image_view *iview,
994
const VkClearAttachment *clear_att, enum radv_cmd_flush_bits *pre_flush,
995
enum radv_cmd_flush_bits *post_flush)
996
{
997
VkClearDepthStencilValue clear_value = clear_att->clearValue.depthStencil;
998
VkImageAspectFlags aspects = clear_att->aspectMask;
999
uint32_t clear_word, flush_bits;
1000
1001
clear_word = radv_get_htile_fast_clear_value(cmd_buffer->device, iview->image, clear_value);
1002
1003
if (pre_flush) {
1004
enum radv_cmd_flush_bits bits =
1005
radv_src_access_flush(cmd_buffer, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
1006
iview->image) |
1007
radv_dst_access_flush(cmd_buffer, VK_ACCESS_SHADER_WRITE_BIT |
1008
VK_ACCESS_SHADER_READ_BIT, iview->image);
1009
cmd_buffer->state.flush_bits |= bits & ~*pre_flush;
1010
*pre_flush |= cmd_buffer->state.flush_bits;
1011
}
1012
1013
VkImageSubresourceRange range = {
1014
.aspectMask = aspects,
1015
.baseMipLevel = iview->base_mip,
1016
.levelCount = iview->level_count,
1017
.baseArrayLayer = iview->base_layer,
1018
.layerCount = iview->layer_count,
1019
};
1020
1021
flush_bits = radv_clear_htile(cmd_buffer, iview->image, &range, clear_word);
1022
1023
if (iview->image->planes[0].surface.has_stencil &&
1024
!(aspects == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT))) {
1025
/* Synchronize after performing a depth-only or a stencil-only
1026
* fast clear because the driver uses an optimized path which
1027
* performs a read-modify-write operation, and the two separate
1028
* aspects might use the same HTILE memory.
1029
*/
1030
cmd_buffer->state.flush_bits |= flush_bits;
1031
}
1032
1033
radv_update_ds_clear_metadata(cmd_buffer, iview, clear_value, aspects);
1034
if (post_flush) {
1035
*post_flush |= flush_bits;
1036
}
1037
}
1038
1039
static nir_shader *
1040
build_clear_htile_mask_shader()
1041
{
1042
nir_builder b =
1043
nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "meta_clear_htile_mask");
1044
b.shader->info.workgroup_size[0] = 64;
1045
b.shader->info.workgroup_size[1] = 1;
1046
b.shader->info.workgroup_size[2] = 1;
1047
1048
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
1049
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
1050
nir_ssa_def *block_size =
1051
nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
1052
b.shader->info.workgroup_size[2], 0);
1053
1054
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
1055
1056
nir_ssa_def *offset = nir_imul(&b, global_id, nir_imm_int(&b, 16));
1057
offset = nir_channel(&b, offset, 0);
1058
1059
nir_ssa_def *buf = radv_meta_load_descriptor(&b, 0, 0);
1060
1061
nir_ssa_def *constants = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 0), .range = 8);
1062
1063
nir_ssa_def *load = nir_load_ssbo(&b, 4, 32, buf, offset, .align_mul = 16);
1064
1065
/* data = (data & ~htile_mask) | (htile_value & htile_mask) */
1066
nir_ssa_def *data = nir_iand(&b, load, nir_channel(&b, constants, 1));
1067
data = nir_ior(&b, data, nir_channel(&b, constants, 0));
1068
1069
nir_store_ssbo(&b, data, buf, offset, .write_mask = 0xf, .access = ACCESS_NON_READABLE,
1070
.align_mul = 16);
1071
1072
return b.shader;
1073
}
1074
1075
static VkResult
1076
init_meta_clear_htile_mask_state(struct radv_device *device)
1077
{
1078
struct radv_meta_state *state = &device->meta_state;
1079
VkResult result;
1080
nir_shader *cs = build_clear_htile_mask_shader();
1081
1082
VkDescriptorSetLayoutCreateInfo ds_layout_info = {
1083
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1084
.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
1085
.bindingCount = 1,
1086
.pBindings = (VkDescriptorSetLayoutBinding[]){
1087
{.binding = 0,
1088
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
1089
.descriptorCount = 1,
1090
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
1091
.pImmutableSamplers = NULL},
1092
}};
1093
1094
result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device), &ds_layout_info,
1095
&state->alloc, &state->clear_htile_mask_ds_layout);
1096
if (result != VK_SUCCESS)
1097
goto fail;
1098
1099
VkPipelineLayoutCreateInfo p_layout_info = {
1100
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
1101
.setLayoutCount = 1,
1102
.pSetLayouts = &state->clear_htile_mask_ds_layout,
1103
.pushConstantRangeCount = 1,
1104
.pPushConstantRanges =
1105
&(VkPushConstantRange){
1106
VK_SHADER_STAGE_COMPUTE_BIT,
1107
0,
1108
8,
1109
},
1110
};
1111
1112
result = radv_CreatePipelineLayout(radv_device_to_handle(device), &p_layout_info, &state->alloc,
1113
&state->clear_htile_mask_p_layout);
1114
if (result != VK_SUCCESS)
1115
goto fail;
1116
1117
VkPipelineShaderStageCreateInfo shader_stage = {
1118
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
1119
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
1120
.module = vk_shader_module_handle_from_nir(cs),
1121
.pName = "main",
1122
.pSpecializationInfo = NULL,
1123
};
1124
1125
VkComputePipelineCreateInfo pipeline_info = {
1126
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
1127
.stage = shader_stage,
1128
.flags = 0,
1129
.layout = state->clear_htile_mask_p_layout,
1130
};
1131
1132
result = radv_CreateComputePipelines(radv_device_to_handle(device),
1133
radv_pipeline_cache_to_handle(&state->cache), 1,
1134
&pipeline_info, NULL, &state->clear_htile_mask_pipeline);
1135
1136
ralloc_free(cs);
1137
return result;
1138
fail:
1139
ralloc_free(cs);
1140
return result;
1141
}
1142
1143
VkResult
1144
radv_device_init_meta_clear_state(struct radv_device *device, bool on_demand)
1145
{
1146
VkResult res;
1147
struct radv_meta_state *state = &device->meta_state;
1148
1149
VkPipelineLayoutCreateInfo pl_color_create_info = {
1150
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
1151
.setLayoutCount = 0,
1152
.pushConstantRangeCount = 1,
1153
.pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_FRAGMENT_BIT, 0, 16},
1154
};
1155
1156
res = radv_CreatePipelineLayout(radv_device_to_handle(device), &pl_color_create_info,
1157
&device->meta_state.alloc,
1158
&device->meta_state.clear_color_p_layout);
1159
if (res != VK_SUCCESS)
1160
goto fail;
1161
1162
VkPipelineLayoutCreateInfo pl_depth_create_info = {
1163
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
1164
.setLayoutCount = 0,
1165
.pushConstantRangeCount = 1,
1166
.pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_VERTEX_BIT, 0, 4},
1167
};
1168
1169
res = radv_CreatePipelineLayout(radv_device_to_handle(device), &pl_depth_create_info,
1170
&device->meta_state.alloc,
1171
&device->meta_state.clear_depth_p_layout);
1172
if (res != VK_SUCCESS)
1173
goto fail;
1174
1175
VkPipelineLayoutCreateInfo pl_depth_unrestricted_create_info = {
1176
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
1177
.setLayoutCount = 0,
1178
.pushConstantRangeCount = 1,
1179
.pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_FRAGMENT_BIT, 0, 4},
1180
};
1181
1182
res = radv_CreatePipelineLayout(radv_device_to_handle(device),
1183
&pl_depth_unrestricted_create_info, &device->meta_state.alloc,
1184
&device->meta_state.clear_depth_unrestricted_p_layout);
1185
if (res != VK_SUCCESS)
1186
goto fail;
1187
1188
res = init_meta_clear_htile_mask_state(device);
1189
if (res != VK_SUCCESS)
1190
goto fail;
1191
1192
if (on_demand)
1193
return VK_SUCCESS;
1194
1195
for (uint32_t i = 0; i < ARRAY_SIZE(state->clear); ++i) {
1196
uint32_t samples = 1 << i;
1197
for (uint32_t j = 0; j < NUM_META_FS_KEYS; ++j) {
1198
VkFormat format = radv_fs_key_format_exemplars[j];
1199
unsigned fs_key = radv_format_meta_fs_key(device, format);
1200
assert(!state->clear[i].color_pipelines[fs_key]);
1201
1202
res =
1203
create_color_renderpass(device, format, samples, &state->clear[i].render_pass[fs_key]);
1204
if (res != VK_SUCCESS)
1205
goto fail;
1206
1207
res = create_color_pipeline(device, samples, 0, &state->clear[i].color_pipelines[fs_key],
1208
state->clear[i].render_pass[fs_key]);
1209
if (res != VK_SUCCESS)
1210
goto fail;
1211
}
1212
1213
res = create_depthstencil_renderpass(device, samples, &state->clear[i].depthstencil_rp);
1214
if (res != VK_SUCCESS)
1215
goto fail;
1216
1217
for (uint32_t j = 0; j < NUM_DEPTH_CLEAR_PIPELINES; j++) {
1218
res = create_depthstencil_pipeline(device, VK_IMAGE_ASPECT_DEPTH_BIT, samples, j, false,
1219
&state->clear[i].depth_only_pipeline[j],
1220
state->clear[i].depthstencil_rp);
1221
if (res != VK_SUCCESS)
1222
goto fail;
1223
1224
res = create_depthstencil_pipeline(device, VK_IMAGE_ASPECT_STENCIL_BIT, samples, j, false,
1225
&state->clear[i].stencil_only_pipeline[j],
1226
state->clear[i].depthstencil_rp);
1227
if (res != VK_SUCCESS)
1228
goto fail;
1229
1230
res = create_depthstencil_pipeline(
1231
device, VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT, samples, j, false,
1232
&state->clear[i].depthstencil_pipeline[j], state->clear[i].depthstencil_rp);
1233
if (res != VK_SUCCESS)
1234
goto fail;
1235
1236
res = create_depthstencil_pipeline(device, VK_IMAGE_ASPECT_DEPTH_BIT, samples, j, true,
1237
&state->clear[i].depth_only_unrestricted_pipeline[j],
1238
state->clear[i].depthstencil_rp);
1239
if (res != VK_SUCCESS)
1240
goto fail;
1241
1242
res = create_depthstencil_pipeline(device, VK_IMAGE_ASPECT_STENCIL_BIT, samples, j, true,
1243
&state->clear[i].stencil_only_unrestricted_pipeline[j],
1244
state->clear[i].depthstencil_rp);
1245
if (res != VK_SUCCESS)
1246
goto fail;
1247
1248
res = create_depthstencil_pipeline(
1249
device, VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT, samples, j, true,
1250
&state->clear[i].depthstencil_unrestricted_pipeline[j],
1251
state->clear[i].depthstencil_rp);
1252
if (res != VK_SUCCESS)
1253
goto fail;
1254
}
1255
}
1256
return VK_SUCCESS;
1257
1258
fail:
1259
radv_device_finish_meta_clear_state(device);
1260
return res;
1261
}
1262
1263
static uint32_t
1264
radv_get_cmask_fast_clear_value(const struct radv_image *image)
1265
{
1266
uint32_t value = 0; /* Default value when no DCC. */
1267
1268
/* The fast-clear value is different for images that have both DCC and
1269
* CMASK metadata.
1270
*/
1271
if (radv_image_has_dcc(image)) {
1272
/* DCC fast clear with MSAA should clear CMASK to 0xC. */
1273
return image->info.samples > 1 ? 0xcccccccc : 0xffffffff;
1274
}
1275
1276
return value;
1277
}
1278
1279
uint32_t
1280
radv_clear_cmask(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image,
1281
const VkImageSubresourceRange *range, uint32_t value)
1282
{
1283
uint64_t offset = image->offset + image->planes[0].surface.cmask_offset;
1284
uint64_t size;
1285
1286
if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
1287
/* TODO: clear layers. */
1288
size = image->planes[0].surface.cmask_size;
1289
} else {
1290
unsigned slice_size = image->planes[0].surface.cmask_slice_size;
1291
1292
offset += slice_size * range->baseArrayLayer;
1293
size = slice_size * radv_get_layerCount(image, range);
1294
}
1295
1296
return radv_fill_buffer(cmd_buffer, image, image->bo, offset, size, value);
1297
}
1298
1299
uint32_t
1300
radv_clear_fmask(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image,
1301
const VkImageSubresourceRange *range, uint32_t value)
1302
{
1303
uint64_t offset = image->offset + image->planes[0].surface.fmask_offset;
1304
unsigned slice_size = image->planes[0].surface.fmask_slice_size;
1305
uint64_t size;
1306
1307
/* MSAA images do not support mipmap levels. */
1308
assert(range->baseMipLevel == 0 && radv_get_levelCount(image, range) == 1);
1309
1310
offset += slice_size * range->baseArrayLayer;
1311
size = slice_size * radv_get_layerCount(image, range);
1312
1313
return radv_fill_buffer(cmd_buffer, image, image->bo, offset, size, value);
1314
}
1315
1316
uint32_t
1317
radv_clear_dcc(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image,
1318
const VkImageSubresourceRange *range, uint32_t value)
1319
{
1320
uint32_t level_count = radv_get_levelCount(image, range);
1321
uint32_t layer_count = radv_get_layerCount(image, range);
1322
uint32_t flush_bits = 0;
1323
1324
/* Mark the image as being compressed. */
1325
radv_update_dcc_metadata(cmd_buffer, image, range, true);
1326
1327
for (uint32_t l = 0; l < level_count; l++) {
1328
uint64_t offset = image->offset + image->planes[0].surface.meta_offset;
1329
uint32_t level = range->baseMipLevel + l;
1330
uint64_t size;
1331
1332
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
1333
/* DCC for mipmaps+layers is currently disabled. */
1334
offset += image->planes[0].surface.meta_slice_size * range->baseArrayLayer +
1335
image->planes[0].surface.u.gfx9.meta_levels[level].offset;
1336
size = image->planes[0].surface.u.gfx9.meta_levels[level].size * layer_count;
1337
} else if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
1338
/* Mipmap levels and layers aren't implemented. */
1339
assert(level == 0);
1340
size = image->planes[0].surface.meta_size;
1341
} else {
1342
const struct legacy_surf_dcc_level *dcc_level =
1343
&image->planes[0].surface.u.legacy.color.dcc_level[level];
1344
1345
/* If dcc_fast_clear_size is 0 (which might happens for
1346
* mipmaps) the fill buffer operation below is a no-op.
1347
* This can only happen during initialization as the
1348
* fast clear path fallbacks to slow clears if one
1349
* level can't be fast cleared.
1350
*/
1351
offset +=
1352
dcc_level->dcc_offset + dcc_level->dcc_slice_fast_clear_size * range->baseArrayLayer;
1353
size = dcc_level->dcc_slice_fast_clear_size * radv_get_layerCount(image, range);
1354
}
1355
1356
/* Do not clear this level if it can't be compressed. */
1357
if (!size)
1358
continue;
1359
1360
flush_bits |= radv_fill_buffer(cmd_buffer, image, image->bo, offset, size, value);
1361
}
1362
1363
return flush_bits;
1364
}
1365
1366
uint32_t
1367
radv_clear_htile(struct radv_cmd_buffer *cmd_buffer, const struct radv_image *image,
1368
const VkImageSubresourceRange *range, uint32_t value)
1369
{
1370
uint32_t level_count = radv_get_levelCount(image, range);
1371
uint32_t flush_bits = 0;
1372
uint32_t htile_mask;
1373
1374
htile_mask = radv_get_htile_mask(cmd_buffer->device, image, range->aspectMask);
1375
1376
if (level_count != image->info.levels) {
1377
assert(cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10);
1378
1379
/* Clear individuals levels separately. */
1380
for (uint32_t l = 0; l < level_count; l++) {
1381
uint32_t level = range->baseMipLevel + l;
1382
uint64_t offset = image->offset + image->planes[0].surface.meta_offset +
1383
image->planes[0].surface.u.gfx9.meta_levels[level].offset;
1384
uint32_t size = image->planes[0].surface.u.gfx9.meta_levels[level].size;
1385
1386
/* Do not clear this level if it can be compressed. */
1387
if (!size)
1388
continue;
1389
1390
if (htile_mask == UINT_MAX) {
1391
/* Clear the whole HTILE buffer. */
1392
flush_bits |= radv_fill_buffer(cmd_buffer, image, image->bo, offset, size, value);
1393
} else {
1394
/* Only clear depth or stencil bytes in the HTILE buffer. */
1395
flush_bits |=
1396
clear_htile_mask(cmd_buffer, image, image->bo, offset, size, value, htile_mask);
1397
}
1398
}
1399
} else {
1400
unsigned layer_count = radv_get_layerCount(image, range);
1401
uint64_t size = image->planes[0].surface.meta_slice_size * layer_count;
1402
uint64_t offset = image->offset + image->planes[0].surface.meta_offset +
1403
image->planes[0].surface.meta_slice_size * range->baseArrayLayer;
1404
1405
if (htile_mask == UINT_MAX) {
1406
/* Clear the whole HTILE buffer. */
1407
flush_bits = radv_fill_buffer(cmd_buffer, image, image->bo, offset, size, value);
1408
} else {
1409
/* Only clear depth or stencil bytes in the HTILE buffer. */
1410
flush_bits =
1411
clear_htile_mask(cmd_buffer, image, image->bo, offset, size, value, htile_mask);
1412
}
1413
}
1414
1415
return flush_bits;
1416
}
1417
1418
enum {
1419
RADV_DCC_CLEAR_REG = 0x20202020U,
1420
RADV_DCC_CLEAR_MAIN_1 = 0x80808080U,
1421
RADV_DCC_CLEAR_SECONDARY_1 = 0x40404040U
1422
};
1423
1424
static void
1425
vi_get_fast_clear_parameters(struct radv_device *device, VkFormat image_format,
1426
VkFormat view_format, const VkClearColorValue *clear_value,
1427
uint32_t *reset_value, bool *can_avoid_fast_clear_elim)
1428
{
1429
bool values[4] = {0};
1430
int extra_channel;
1431
bool main_value = false;
1432
bool extra_value = false;
1433
bool has_color = false;
1434
bool has_alpha = false;
1435
*can_avoid_fast_clear_elim = false;
1436
1437
*reset_value = RADV_DCC_CLEAR_REG;
1438
1439
const struct util_format_description *desc = vk_format_description(view_format);
1440
if (view_format == VK_FORMAT_B10G11R11_UFLOAT_PACK32 ||
1441
view_format == VK_FORMAT_R5G6B5_UNORM_PACK16 || view_format == VK_FORMAT_B5G6R5_UNORM_PACK16)
1442
extra_channel = -1;
1443
else if (desc->layout == UTIL_FORMAT_LAYOUT_PLAIN) {
1444
if (vi_alpha_is_on_msb(device, view_format))
1445
extra_channel = desc->nr_channels - 1;
1446
else
1447
extra_channel = 0;
1448
} else
1449
return;
1450
1451
for (int i = 0; i < 4; i++) {
1452
int index = desc->swizzle[i] - PIPE_SWIZZLE_X;
1453
if (desc->swizzle[i] < PIPE_SWIZZLE_X || desc->swizzle[i] > PIPE_SWIZZLE_W)
1454
continue;
1455
1456
if (desc->channel[i].pure_integer && desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
1457
/* Use the maximum value for clamping the clear color. */
1458
int max = u_bit_consecutive(0, desc->channel[i].size - 1);
1459
1460
values[i] = clear_value->int32[i] != 0;
1461
if (clear_value->int32[i] != 0 && MIN2(clear_value->int32[i], max) != max)
1462
return;
1463
} else if (desc->channel[i].pure_integer &&
1464
desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) {
1465
/* Use the maximum value for clamping the clear color. */
1466
unsigned max = u_bit_consecutive(0, desc->channel[i].size);
1467
1468
values[i] = clear_value->uint32[i] != 0U;
1469
if (clear_value->uint32[i] != 0U && MIN2(clear_value->uint32[i], max) != max)
1470
return;
1471
} else {
1472
values[i] = clear_value->float32[i] != 0.0F;
1473
if (clear_value->float32[i] != 0.0F && clear_value->float32[i] != 1.0F)
1474
return;
1475
}
1476
1477
if (index == extra_channel) {
1478
extra_value = values[i];
1479
has_alpha = true;
1480
} else {
1481
main_value = values[i];
1482
has_color = true;
1483
}
1484
}
1485
1486
/* If alpha isn't present, make it the same as color, and vice versa. */
1487
if (!has_alpha)
1488
extra_value = main_value;
1489
else if (!has_color)
1490
main_value = extra_value;
1491
1492
for (int i = 0; i < 4; ++i)
1493
if (values[i] != main_value && desc->swizzle[i] - PIPE_SWIZZLE_X != extra_channel &&
1494
desc->swizzle[i] >= PIPE_SWIZZLE_X && desc->swizzle[i] <= PIPE_SWIZZLE_W)
1495
return;
1496
1497
*can_avoid_fast_clear_elim = true;
1498
*reset_value = 0;
1499
if (main_value)
1500
*reset_value |= RADV_DCC_CLEAR_MAIN_1;
1501
1502
if (extra_value)
1503
*reset_value |= RADV_DCC_CLEAR_SECONDARY_1;
1504
return;
1505
}
1506
1507
static bool
1508
radv_can_fast_clear_color(struct radv_cmd_buffer *cmd_buffer, const struct radv_image_view *iview,
1509
VkImageLayout image_layout, bool in_render_loop,
1510
const VkClearRect *clear_rect, VkClearColorValue clear_value,
1511
uint32_t view_mask)
1512
{
1513
uint32_t clear_color[2];
1514
1515
if (!iview || !iview->support_fast_clear)
1516
return false;
1517
1518
if (!radv_layout_can_fast_clear(
1519
cmd_buffer->device, iview->image, iview->base_mip, image_layout, in_render_loop,
1520
radv_image_queue_family_mask(iview->image, cmd_buffer->queue_family_index,
1521
cmd_buffer->queue_family_index)))
1522
return false;
1523
1524
if (clear_rect->rect.offset.x || clear_rect->rect.offset.y ||
1525
clear_rect->rect.extent.width != iview->image->info.width ||
1526
clear_rect->rect.extent.height != iview->image->info.height)
1527
return false;
1528
1529
if (view_mask && (iview->image->info.array_size >= 32 ||
1530
(1u << iview->image->info.array_size) - 1u != view_mask))
1531
return false;
1532
if (!view_mask && clear_rect->baseArrayLayer != 0)
1533
return false;
1534
if (!view_mask && clear_rect->layerCount != iview->image->info.array_size)
1535
return false;
1536
1537
/* DCC */
1538
if (!radv_format_pack_clear_color(iview->vk_format, clear_color, &clear_value))
1539
return false;
1540
1541
if (!radv_image_has_clear_value(iview->image) && (clear_color[0] != 0 || clear_color[1] != 0))
1542
return false;
1543
1544
if (radv_dcc_enabled(iview->image, iview->base_mip)) {
1545
bool can_avoid_fast_clear_elim;
1546
uint32_t reset_value;
1547
1548
vi_get_fast_clear_parameters(cmd_buffer->device, iview->image->vk_format, iview->vk_format,
1549
&clear_value, &reset_value, &can_avoid_fast_clear_elim);
1550
1551
if (iview->image->info.samples > 1) {
1552
/* DCC fast clear with MSAA should clear CMASK. */
1553
/* FIXME: This doesn't work for now. There is a
1554
* hardware bug with fast clears and DCC for MSAA
1555
* textures. AMDVLK has a workaround but it doesn't
1556
* seem to work here. Note that we might emit useless
1557
* CB flushes but that shouldn't matter.
1558
*/
1559
if (!can_avoid_fast_clear_elim)
1560
return false;
1561
}
1562
1563
if (iview->image->info.levels > 1) {
1564
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1565
uint32_t last_level = iview->base_mip + iview->level_count - 1;
1566
if (last_level >= iview->image->planes[0].surface.num_meta_levels) {
1567
/* Do not fast clears if one level can't be fast cleard. */
1568
return false;
1569
}
1570
} else {
1571
for (uint32_t l = 0; l < iview->level_count; l++) {
1572
uint32_t level = iview->base_mip + l;
1573
struct legacy_surf_dcc_level *dcc_level =
1574
&iview->image->planes[0].surface.u.legacy.color.dcc_level[level];
1575
1576
/* Do not fast clears if one level can't be
1577
* fast cleared.
1578
*/
1579
if (!dcc_level->dcc_fast_clear_size)
1580
return false;
1581
}
1582
}
1583
}
1584
}
1585
1586
return true;
1587
}
1588
1589
static void
1590
radv_fast_clear_color(struct radv_cmd_buffer *cmd_buffer, const struct radv_image_view *iview,
1591
const VkClearAttachment *clear_att, uint32_t subpass_att,
1592
enum radv_cmd_flush_bits *pre_flush, enum radv_cmd_flush_bits *post_flush)
1593
{
1594
VkClearColorValue clear_value = clear_att->clearValue.color;
1595
uint32_t clear_color[2], flush_bits = 0;
1596
uint32_t cmask_clear_value;
1597
VkImageSubresourceRange range = {
1598
.aspectMask = iview->aspect_mask,
1599
.baseMipLevel = iview->base_mip,
1600
.levelCount = iview->level_count,
1601
.baseArrayLayer = iview->base_layer,
1602
.layerCount = iview->layer_count,
1603
};
1604
1605
if (pre_flush) {
1606
enum radv_cmd_flush_bits bits =
1607
radv_src_access_flush(cmd_buffer, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, iview->image) |
1608
radv_dst_access_flush(cmd_buffer, VK_ACCESS_SHADER_WRITE_BIT, iview->image);
1609
cmd_buffer->state.flush_bits |= bits & ~*pre_flush;
1610
*pre_flush |= cmd_buffer->state.flush_bits;
1611
}
1612
1613
/* DCC */
1614
radv_format_pack_clear_color(iview->vk_format, clear_color, &clear_value);
1615
1616
cmask_clear_value = radv_get_cmask_fast_clear_value(iview->image);
1617
1618
/* clear cmask buffer */
1619
bool need_decompress_pass = false;
1620
if (radv_dcc_enabled(iview->image, iview->base_mip)) {
1621
uint32_t reset_value;
1622
bool can_avoid_fast_clear_elim;
1623
1624
vi_get_fast_clear_parameters(cmd_buffer->device, iview->image->vk_format, iview->vk_format,
1625
&clear_value, &reset_value, &can_avoid_fast_clear_elim);
1626
1627
if (radv_image_has_cmask(iview->image)) {
1628
flush_bits = radv_clear_cmask(cmd_buffer, iview->image, &range, cmask_clear_value);
1629
}
1630
1631
if (!can_avoid_fast_clear_elim)
1632
need_decompress_pass = true;
1633
1634
flush_bits |= radv_clear_dcc(cmd_buffer, iview->image, &range, reset_value);
1635
} else {
1636
flush_bits = radv_clear_cmask(cmd_buffer, iview->image, &range, cmask_clear_value);
1637
1638
/* Fast clearing with CMASK should always be eliminated. */
1639
need_decompress_pass = true;
1640
}
1641
1642
if (post_flush) {
1643
*post_flush |= flush_bits;
1644
}
1645
1646
/* Update the FCE predicate to perform a fast-clear eliminate. */
1647
radv_update_fce_metadata(cmd_buffer, iview->image, &range, need_decompress_pass);
1648
1649
radv_update_color_clear_metadata(cmd_buffer, iview, subpass_att, clear_color);
1650
}
1651
1652
/**
1653
* The parameters mean that same as those in vkCmdClearAttachments.
1654
*/
1655
static void
1656
emit_clear(struct radv_cmd_buffer *cmd_buffer, const VkClearAttachment *clear_att,
1657
const VkClearRect *clear_rect, enum radv_cmd_flush_bits *pre_flush,
1658
enum radv_cmd_flush_bits *post_flush, uint32_t view_mask, bool ds_resolve_clear)
1659
{
1660
const struct radv_framebuffer *fb = cmd_buffer->state.framebuffer;
1661
const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1662
VkImageAspectFlags aspects = clear_att->aspectMask;
1663
1664
if (aspects & VK_IMAGE_ASPECT_COLOR_BIT) {
1665
const uint32_t subpass_att = clear_att->colorAttachment;
1666
assert(subpass_att < subpass->color_count);
1667
const uint32_t pass_att = subpass->color_attachments[subpass_att].attachment;
1668
if (pass_att == VK_ATTACHMENT_UNUSED)
1669
return;
1670
1671
VkImageLayout image_layout = subpass->color_attachments[subpass_att].layout;
1672
bool in_render_loop = subpass->color_attachments[subpass_att].in_render_loop;
1673
const struct radv_image_view *iview =
1674
fb ? cmd_buffer->state.attachments[pass_att].iview : NULL;
1675
VkClearColorValue clear_value = clear_att->clearValue.color;
1676
1677
if (radv_can_fast_clear_color(cmd_buffer, iview, image_layout, in_render_loop, clear_rect,
1678
clear_value, view_mask)) {
1679
radv_fast_clear_color(cmd_buffer, iview, clear_att, subpass_att, pre_flush, post_flush);
1680
} else {
1681
emit_color_clear(cmd_buffer, clear_att, clear_rect, view_mask);
1682
}
1683
} else {
1684
struct radv_subpass_attachment *ds_att = subpass->depth_stencil_attachment;
1685
1686
if (ds_resolve_clear)
1687
ds_att = subpass->ds_resolve_attachment;
1688
1689
if (!ds_att || ds_att->attachment == VK_ATTACHMENT_UNUSED)
1690
return;
1691
1692
VkImageLayout image_layout = ds_att->layout;
1693
bool in_render_loop = ds_att->in_render_loop;
1694
const struct radv_image_view *iview =
1695
fb ? cmd_buffer->state.attachments[ds_att->attachment].iview : NULL;
1696
VkClearDepthStencilValue clear_value = clear_att->clearValue.depthStencil;
1697
1698
assert(aspects & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT));
1699
1700
if (radv_can_fast_clear_depth(cmd_buffer, iview, image_layout, in_render_loop, aspects,
1701
clear_rect, clear_value, view_mask)) {
1702
radv_fast_clear_depth(cmd_buffer, iview, clear_att, pre_flush, post_flush);
1703
} else {
1704
emit_depthstencil_clear(cmd_buffer, clear_att, clear_rect, ds_att, view_mask);
1705
}
1706
}
1707
}
1708
1709
static inline bool
1710
radv_attachment_needs_clear(struct radv_cmd_state *cmd_state, uint32_t a)
1711
{
1712
uint32_t view_mask = cmd_state->subpass->view_mask;
1713
return (a != VK_ATTACHMENT_UNUSED && cmd_state->attachments[a].pending_clear_aspects &&
1714
(!view_mask || (view_mask & ~cmd_state->attachments[a].cleared_views)));
1715
}
1716
1717
static bool
1718
radv_subpass_needs_clear(struct radv_cmd_buffer *cmd_buffer)
1719
{
1720
struct radv_cmd_state *cmd_state = &cmd_buffer->state;
1721
uint32_t a;
1722
1723
if (!cmd_state->subpass)
1724
return false;
1725
1726
for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1727
a = cmd_state->subpass->color_attachments[i].attachment;
1728
if (radv_attachment_needs_clear(cmd_state, a))
1729
return true;
1730
}
1731
1732
if (cmd_state->subpass->depth_stencil_attachment) {
1733
a = cmd_state->subpass->depth_stencil_attachment->attachment;
1734
if (radv_attachment_needs_clear(cmd_state, a))
1735
return true;
1736
}
1737
1738
if (!cmd_state->subpass->ds_resolve_attachment)
1739
return false;
1740
1741
a = cmd_state->subpass->ds_resolve_attachment->attachment;
1742
return radv_attachment_needs_clear(cmd_state, a);
1743
}
1744
1745
static void
1746
radv_subpass_clear_attachment(struct radv_cmd_buffer *cmd_buffer,
1747
struct radv_attachment_state *attachment,
1748
const VkClearAttachment *clear_att,
1749
enum radv_cmd_flush_bits *pre_flush,
1750
enum radv_cmd_flush_bits *post_flush, bool ds_resolve_clear)
1751
{
1752
struct radv_cmd_state *cmd_state = &cmd_buffer->state;
1753
uint32_t view_mask = cmd_state->subpass->view_mask;
1754
1755
VkClearRect clear_rect = {
1756
.rect = cmd_state->render_area,
1757
.baseArrayLayer = 0,
1758
.layerCount = cmd_state->framebuffer->layers,
1759
};
1760
1761
radv_describe_begin_render_pass_clear(cmd_buffer, clear_att->aspectMask);
1762
1763
emit_clear(cmd_buffer, clear_att, &clear_rect, pre_flush, post_flush,
1764
view_mask & ~attachment->cleared_views, ds_resolve_clear);
1765
if (view_mask)
1766
attachment->cleared_views |= view_mask;
1767
else
1768
attachment->pending_clear_aspects = 0;
1769
1770
radv_describe_end_render_pass_clear(cmd_buffer);
1771
}
1772
1773
/**
1774
* Emit any pending attachment clears for the current subpass.
1775
*
1776
* @see radv_attachment_state::pending_clear_aspects
1777
*/
1778
void
1779
radv_cmd_buffer_clear_subpass(struct radv_cmd_buffer *cmd_buffer)
1780
{
1781
struct radv_cmd_state *cmd_state = &cmd_buffer->state;
1782
struct radv_meta_saved_state saved_state;
1783
enum radv_cmd_flush_bits pre_flush = 0;
1784
enum radv_cmd_flush_bits post_flush = 0;
1785
1786
if (!radv_subpass_needs_clear(cmd_buffer))
1787
return;
1788
1789
radv_meta_save(&saved_state, cmd_buffer,
1790
RADV_META_SAVE_GRAPHICS_PIPELINE | RADV_META_SAVE_CONSTANTS);
1791
1792
for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1793
uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1794
1795
if (!radv_attachment_needs_clear(cmd_state, a))
1796
continue;
1797
1798
assert(cmd_state->attachments[a].pending_clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1799
1800
VkClearAttachment clear_att = {
1801
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1802
.colorAttachment = i, /* Use attachment index relative to subpass */
1803
.clearValue = cmd_state->attachments[a].clear_value,
1804
};
1805
1806
radv_subpass_clear_attachment(cmd_buffer, &cmd_state->attachments[a], &clear_att, &pre_flush,
1807
&post_flush, false);
1808
}
1809
1810
if (cmd_state->subpass->depth_stencil_attachment) {
1811
uint32_t ds = cmd_state->subpass->depth_stencil_attachment->attachment;
1812
if (radv_attachment_needs_clear(cmd_state, ds)) {
1813
VkClearAttachment clear_att = {
1814
.aspectMask = cmd_state->attachments[ds].pending_clear_aspects,
1815
.clearValue = cmd_state->attachments[ds].clear_value,
1816
};
1817
1818
radv_subpass_clear_attachment(cmd_buffer, &cmd_state->attachments[ds], &clear_att,
1819
&pre_flush, &post_flush, false);
1820
}
1821
}
1822
1823
if (cmd_state->subpass->ds_resolve_attachment) {
1824
uint32_t ds_resolve = cmd_state->subpass->ds_resolve_attachment->attachment;
1825
if (radv_attachment_needs_clear(cmd_state, ds_resolve)) {
1826
VkClearAttachment clear_att = {
1827
.aspectMask = cmd_state->attachments[ds_resolve].pending_clear_aspects,
1828
.clearValue = cmd_state->attachments[ds_resolve].clear_value,
1829
};
1830
1831
radv_subpass_clear_attachment(cmd_buffer, &cmd_state->attachments[ds_resolve], &clear_att,
1832
&pre_flush, &post_flush, true);
1833
}
1834
}
1835
1836
radv_meta_restore(&saved_state, cmd_buffer);
1837
cmd_buffer->state.flush_bits |= post_flush;
1838
}
1839
1840
static void
1841
radv_clear_image_layer(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image,
1842
VkImageLayout image_layout, const VkImageSubresourceRange *range,
1843
VkFormat format, int level, int layer, const VkClearValue *clear_val)
1844
{
1845
VkDevice device_h = radv_device_to_handle(cmd_buffer->device);
1846
struct radv_image_view iview;
1847
uint32_t width = radv_minify(image->info.width, range->baseMipLevel + level);
1848
uint32_t height = radv_minify(image->info.height, range->baseMipLevel + level);
1849
1850
radv_image_view_init(&iview, cmd_buffer->device,
1851
&(VkImageViewCreateInfo){
1852
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
1853
.image = radv_image_to_handle(image),
1854
.viewType = radv_meta_get_view_type(image),
1855
.format = format,
1856
.subresourceRange = {.aspectMask = range->aspectMask,
1857
.baseMipLevel = range->baseMipLevel + level,
1858
.levelCount = 1,
1859
.baseArrayLayer = range->baseArrayLayer + layer,
1860
.layerCount = 1},
1861
},
1862
NULL);
1863
1864
VkFramebuffer fb;
1865
radv_CreateFramebuffer(
1866
device_h,
1867
&(VkFramebufferCreateInfo){.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
1868
.attachmentCount = 1,
1869
.pAttachments =
1870
(VkImageView[]){
1871
radv_image_view_to_handle(&iview),
1872
},
1873
.width = width,
1874
.height = height,
1875
.layers = 1},
1876
&cmd_buffer->pool->alloc, &fb);
1877
1878
VkAttachmentDescription2 att_desc = {
1879
.sType = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2,
1880
.format = iview.vk_format,
1881
.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
1882
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
1883
.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
1884
.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE,
1885
.initialLayout = image_layout,
1886
.finalLayout = image_layout,
1887
};
1888
1889
VkSubpassDescription2 subpass_desc = {
1890
.sType = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2,
1891
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
1892
.inputAttachmentCount = 0,
1893
.colorAttachmentCount = 0,
1894
.pColorAttachments = NULL,
1895
.pResolveAttachments = NULL,
1896
.pDepthStencilAttachment = NULL,
1897
.preserveAttachmentCount = 0,
1898
.pPreserveAttachments = NULL,
1899
};
1900
1901
const VkAttachmentReference2 att_ref = {
1902
.sType = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2,
1903
.attachment = 0,
1904
.layout = image_layout,
1905
};
1906
1907
if (range->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
1908
subpass_desc.colorAttachmentCount = 1;
1909
subpass_desc.pColorAttachments = &att_ref;
1910
} else {
1911
subpass_desc.pDepthStencilAttachment = &att_ref;
1912
}
1913
1914
VkRenderPass pass;
1915
radv_CreateRenderPass2(
1916
device_h,
1917
&(VkRenderPassCreateInfo2){
1918
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2,
1919
.attachmentCount = 1,
1920
.pAttachments = &att_desc,
1921
.subpassCount = 1,
1922
.pSubpasses = &subpass_desc,
1923
.dependencyCount = 2,
1924
.pDependencies =
1925
(VkSubpassDependency2[]){{.sType = VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2,
1926
.srcSubpass = VK_SUBPASS_EXTERNAL,
1927
.dstSubpass = 0,
1928
.srcStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
1929
.dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
1930
.srcAccessMask = 0,
1931
.dstAccessMask = 0,
1932
.dependencyFlags = 0},
1933
{.sType = VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2,
1934
.srcSubpass = 0,
1935
.dstSubpass = VK_SUBPASS_EXTERNAL,
1936
.srcStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
1937
.dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
1938
.srcAccessMask = 0,
1939
.dstAccessMask = 0,
1940
.dependencyFlags = 0}}},
1941
&cmd_buffer->pool->alloc, &pass);
1942
1943
radv_cmd_buffer_begin_render_pass(cmd_buffer,
1944
&(VkRenderPassBeginInfo){
1945
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
1946
.renderArea =
1947
{
1948
.offset =
1949
{
1950
0,
1951
0,
1952
},
1953
.extent =
1954
{
1955
.width = width,
1956
.height = height,
1957
},
1958
},
1959
.renderPass = pass,
1960
.framebuffer = fb,
1961
.clearValueCount = 0,
1962
.pClearValues = NULL,
1963
},
1964
NULL);
1965
1966
radv_cmd_buffer_set_subpass(cmd_buffer, &cmd_buffer->state.pass->subpasses[0]);
1967
1968
VkClearAttachment clear_att = {
1969
.aspectMask = range->aspectMask,
1970
.colorAttachment = 0,
1971
.clearValue = *clear_val,
1972
};
1973
1974
VkClearRect clear_rect = {
1975
.rect =
1976
{
1977
.offset = {0, 0},
1978
.extent = {width, height},
1979
},
1980
.baseArrayLayer = range->baseArrayLayer,
1981
.layerCount = 1, /* FINISHME: clear multi-layer framebuffer */
1982
};
1983
1984
emit_clear(cmd_buffer, &clear_att, &clear_rect, NULL, NULL, 0, false);
1985
1986
radv_cmd_buffer_end_render_pass(cmd_buffer);
1987
radv_DestroyRenderPass(device_h, pass, &cmd_buffer->pool->alloc);
1988
radv_DestroyFramebuffer(device_h, fb, &cmd_buffer->pool->alloc);
1989
}
1990
1991
/**
1992
* Return TRUE if a fast color or depth clear has been performed.
1993
*/
1994
static bool
1995
radv_fast_clear_range(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, VkFormat format,
1996
VkImageLayout image_layout, bool in_render_loop,
1997
const VkImageSubresourceRange *range, const VkClearValue *clear_val)
1998
{
1999
struct radv_image_view iview;
2000
2001
radv_image_view_init(&iview, cmd_buffer->device,
2002
&(VkImageViewCreateInfo){
2003
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
2004
.image = radv_image_to_handle(image),
2005
.viewType = radv_meta_get_view_type(image),
2006
.format = image->vk_format,
2007
.subresourceRange =
2008
{
2009
.aspectMask = range->aspectMask,
2010
.baseMipLevel = range->baseMipLevel,
2011
.levelCount = range->levelCount,
2012
.baseArrayLayer = range->baseArrayLayer,
2013
.layerCount = range->layerCount,
2014
},
2015
},
2016
NULL);
2017
2018
VkClearRect clear_rect = {
2019
.rect =
2020
{
2021
.offset = {0, 0},
2022
.extent =
2023
{
2024
radv_minify(image->info.width, range->baseMipLevel),
2025
radv_minify(image->info.height, range->baseMipLevel),
2026
},
2027
},
2028
.baseArrayLayer = range->baseArrayLayer,
2029
.layerCount = range->layerCount,
2030
};
2031
2032
VkClearAttachment clear_att = {
2033
.aspectMask = range->aspectMask,
2034
.colorAttachment = 0,
2035
.clearValue = *clear_val,
2036
};
2037
2038
if (vk_format_is_color(format)) {
2039
if (radv_can_fast_clear_color(cmd_buffer, &iview, image_layout, in_render_loop, &clear_rect,
2040
clear_att.clearValue.color, 0)) {
2041
radv_fast_clear_color(cmd_buffer, &iview, &clear_att, clear_att.colorAttachment, NULL,
2042
NULL);
2043
return true;
2044
}
2045
} else {
2046
if (radv_can_fast_clear_depth(cmd_buffer, &iview, image_layout, in_render_loop,
2047
range->aspectMask, &clear_rect,
2048
clear_att.clearValue.depthStencil, 0)) {
2049
radv_fast_clear_depth(cmd_buffer, &iview, &clear_att, NULL, NULL);
2050
return true;
2051
}
2052
}
2053
2054
return false;
2055
}
2056
2057
static void
2058
radv_cmd_clear_image(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image,
2059
VkImageLayout image_layout, const VkClearValue *clear_value,
2060
uint32_t range_count, const VkImageSubresourceRange *ranges, bool cs)
2061
{
2062
VkFormat format = image->vk_format;
2063
VkClearValue internal_clear_value;
2064
2065
if (ranges->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT)
2066
internal_clear_value.color = clear_value->color;
2067
else
2068
internal_clear_value.depthStencil = clear_value->depthStencil;
2069
2070
bool disable_compression = false;
2071
2072
if (format == VK_FORMAT_E5B9G9R9_UFLOAT_PACK32) {
2073
bool blendable;
2074
if (cs ? !radv_is_storage_image_format_supported(cmd_buffer->device->physical_device, format)
2075
: !radv_is_colorbuffer_format_supported(cmd_buffer->device->physical_device, format,
2076
&blendable)) {
2077
format = VK_FORMAT_R32_UINT;
2078
internal_clear_value.color.uint32[0] = float3_to_rgb9e5(clear_value->color.float32);
2079
2080
uint32_t queue_mask = radv_image_queue_family_mask(image, cmd_buffer->queue_family_index,
2081
cmd_buffer->queue_family_index);
2082
2083
for (uint32_t r = 0; r < range_count; r++) {
2084
const VkImageSubresourceRange *range = &ranges[r];
2085
2086
/* Don't use compressed image stores because they will use an incompatible format. */
2087
if (radv_layout_dcc_compressed(cmd_buffer->device, image, range->baseMipLevel,
2088
image_layout, false, queue_mask)) {
2089
disable_compression = cs;
2090
break;
2091
}
2092
}
2093
}
2094
}
2095
2096
if (format == VK_FORMAT_R4G4_UNORM_PACK8) {
2097
uint8_t r, g;
2098
format = VK_FORMAT_R8_UINT;
2099
r = float_to_ubyte(clear_value->color.float32[0]) >> 4;
2100
g = float_to_ubyte(clear_value->color.float32[1]) >> 4;
2101
internal_clear_value.color.uint32[0] = (r << 4) | (g & 0xf);
2102
}
2103
2104
for (uint32_t r = 0; r < range_count; r++) {
2105
const VkImageSubresourceRange *range = &ranges[r];
2106
2107
/* Try to perform a fast clear first, otherwise fallback to
2108
* the legacy path.
2109
*/
2110
if (!cs && radv_fast_clear_range(cmd_buffer, image, format, image_layout, false, range,
2111
&internal_clear_value)) {
2112
continue;
2113
}
2114
2115
for (uint32_t l = 0; l < radv_get_levelCount(image, range); ++l) {
2116
const uint32_t layer_count = image->type == VK_IMAGE_TYPE_3D
2117
? radv_minify(image->info.depth, range->baseMipLevel + l)
2118
: radv_get_layerCount(image, range);
2119
for (uint32_t s = 0; s < layer_count; ++s) {
2120
2121
if (cs) {
2122
struct radv_meta_blit2d_surf surf;
2123
surf.format = format;
2124
surf.image = image;
2125
surf.level = range->baseMipLevel + l;
2126
surf.layer = range->baseArrayLayer + s;
2127
surf.aspect_mask = range->aspectMask;
2128
surf.disable_compression = disable_compression;
2129
radv_meta_clear_image_cs(cmd_buffer, &surf, &internal_clear_value.color);
2130
} else {
2131
assert(!disable_compression);
2132
radv_clear_image_layer(cmd_buffer, image, image_layout, range, format, l, s,
2133
&internal_clear_value);
2134
}
2135
}
2136
}
2137
}
2138
2139
if (disable_compression) {
2140
enum radv_cmd_flush_bits flush_bits = 0;
2141
for (unsigned i = 0; i < range_count; i++) {
2142
if (radv_dcc_enabled(image, ranges[i].baseMipLevel))
2143
flush_bits |= radv_clear_dcc(cmd_buffer, image, &ranges[i], 0xffffffffu);
2144
}
2145
cmd_buffer->state.flush_bits |= flush_bits;
2146
}
2147
}
2148
2149
void
2150
radv_CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image_h, VkImageLayout imageLayout,
2151
const VkClearColorValue *pColor, uint32_t rangeCount,
2152
const VkImageSubresourceRange *pRanges)
2153
{
2154
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2155
RADV_FROM_HANDLE(radv_image, image, image_h);
2156
struct radv_meta_saved_state saved_state;
2157
bool cs;
2158
2159
cs = cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE ||
2160
!radv_image_is_renderable(cmd_buffer->device, image);
2161
2162
if (cs) {
2163
radv_meta_save(
2164
&saved_state, cmd_buffer,
2165
RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_CONSTANTS | RADV_META_SAVE_DESCRIPTORS);
2166
} else {
2167
radv_meta_save(&saved_state, cmd_buffer,
2168
RADV_META_SAVE_GRAPHICS_PIPELINE | RADV_META_SAVE_CONSTANTS);
2169
}
2170
2171
radv_cmd_clear_image(cmd_buffer, image, imageLayout, (const VkClearValue *)pColor, rangeCount,
2172
pRanges, cs);
2173
2174
radv_meta_restore(&saved_state, cmd_buffer);
2175
}
2176
2177
void
2178
radv_CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image_h,
2179
VkImageLayout imageLayout,
2180
const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
2181
const VkImageSubresourceRange *pRanges)
2182
{
2183
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2184
RADV_FROM_HANDLE(radv_image, image, image_h);
2185
struct radv_meta_saved_state saved_state;
2186
2187
radv_meta_save(&saved_state, cmd_buffer,
2188
RADV_META_SAVE_GRAPHICS_PIPELINE | RADV_META_SAVE_CONSTANTS);
2189
2190
radv_cmd_clear_image(cmd_buffer, image, imageLayout, (const VkClearValue *)pDepthStencil,
2191
rangeCount, pRanges, false);
2192
2193
radv_meta_restore(&saved_state, cmd_buffer);
2194
}
2195
2196
void
2197
radv_CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
2198
const VkClearAttachment *pAttachments, uint32_t rectCount,
2199
const VkClearRect *pRects)
2200
{
2201
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2202
struct radv_meta_saved_state saved_state;
2203
enum radv_cmd_flush_bits pre_flush = 0;
2204
enum radv_cmd_flush_bits post_flush = 0;
2205
2206
if (!cmd_buffer->state.subpass)
2207
return;
2208
2209
radv_meta_save(&saved_state, cmd_buffer,
2210
RADV_META_SAVE_GRAPHICS_PIPELINE | RADV_META_SAVE_CONSTANTS);
2211
2212
/* FINISHME: We can do better than this dumb loop. It thrashes too much
2213
* state.
2214
*/
2215
for (uint32_t a = 0; a < attachmentCount; ++a) {
2216
for (uint32_t r = 0; r < rectCount; ++r) {
2217
emit_clear(cmd_buffer, &pAttachments[a], &pRects[r], &pre_flush, &post_flush,
2218
cmd_buffer->state.subpass->view_mask, false);
2219
}
2220
}
2221
2222
radv_meta_restore(&saved_state, cmd_buffer);
2223
cmd_buffer->state.flush_bits |= post_flush;
2224
}
2225
2226