Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/broadcom/vulkan/v3dvx_cmd_buffer.c
4560 views
1
/*
2
* Copyright © 2021 Raspberry Pi
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include "v3dv_private.h"
25
#include "broadcom/common/v3d_macros.h"
26
#include "broadcom/cle/v3dx_pack.h"
27
#include "broadcom/compiler/v3d_compiler.h"
28
29
#include "util/half_float.h"
30
#include "vulkan/util/vk_format.h"
31
#include "util/u_pack_color.h"
32
33
#include "vk_format_info.h"
34
35
void
36
v3dX(job_emit_binning_flush)(struct v3dv_job *job)
37
{
38
assert(job);
39
40
v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(FLUSH));
41
v3dv_return_if_oom(NULL, job);
42
43
cl_emit(&job->bcl, FLUSH, flush);
44
}
45
46
void
47
v3dX(job_emit_binning_prolog)(struct v3dv_job *job,
48
const struct v3dv_frame_tiling *tiling,
49
uint32_t layers)
50
{
51
/* This must go before the binning mode configuration. It is
52
* required for layered framebuffers to work.
53
*/
54
cl_emit(&job->bcl, NUMBER_OF_LAYERS, config) {
55
config.number_of_layers = layers;
56
}
57
58
cl_emit(&job->bcl, TILE_BINNING_MODE_CFG, config) {
59
config.width_in_pixels = tiling->width;
60
config.height_in_pixels = tiling->height;
61
config.number_of_render_targets = MAX2(tiling->render_target_count, 1);
62
config.multisample_mode_4x = tiling->msaa;
63
config.maximum_bpp_of_all_render_targets = tiling->internal_bpp;
64
}
65
66
/* There's definitely nothing in the VCD cache we want. */
67
cl_emit(&job->bcl, FLUSH_VCD_CACHE, bin);
68
69
/* "Binning mode lists must have a Start Tile Binning item (6) after
70
* any prefix state data before the binning list proper starts."
71
*/
72
cl_emit(&job->bcl, START_TILE_BINNING, bin);
73
}
74
75
void
76
v3dX(cmd_buffer_end_render_pass_secondary)(struct v3dv_cmd_buffer *cmd_buffer)
77
{
78
assert(cmd_buffer->state.job);
79
v3dv_cl_ensure_space_with_branch(&cmd_buffer->state.job->bcl,
80
cl_packet_length(RETURN_FROM_SUB_LIST));
81
v3dv_return_if_oom(cmd_buffer, NULL);
82
cl_emit(&cmd_buffer->state.job->bcl, RETURN_FROM_SUB_LIST, ret);
83
}
84
85
void
86
v3dX(job_emit_clip_window)(struct v3dv_job *job, const VkRect2D *rect)
87
{
88
assert(job);
89
90
v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(CLIP_WINDOW));
91
v3dv_return_if_oom(NULL, job);
92
93
cl_emit(&job->bcl, CLIP_WINDOW, clip) {
94
clip.clip_window_left_pixel_coordinate = rect->offset.x;
95
clip.clip_window_bottom_pixel_coordinate = rect->offset.y;
96
clip.clip_window_width_in_pixels = rect->extent.width;
97
clip.clip_window_height_in_pixels = rect->extent.height;
98
}
99
}
100
101
static void
102
cmd_buffer_render_pass_emit_load(struct v3dv_cmd_buffer *cmd_buffer,
103
struct v3dv_cl *cl,
104
struct v3dv_image_view *iview,
105
uint32_t layer,
106
uint32_t buffer)
107
{
108
const struct v3dv_image *image = iview->image;
109
const struct v3d_resource_slice *slice = &image->slices[iview->base_level];
110
uint32_t layer_offset = v3dv_layer_offset(image,
111
iview->base_level,
112
iview->first_layer + layer);
113
114
cl_emit(cl, LOAD_TILE_BUFFER_GENERAL, load) {
115
load.buffer_to_load = buffer;
116
load.address = v3dv_cl_address(image->mem->bo, layer_offset);
117
118
load.input_image_format = iview->format->rt_type;
119
load.r_b_swap = iview->swap_rb;
120
load.memory_format = slice->tiling;
121
122
if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
123
slice->tiling == V3D_TILING_UIF_XOR) {
124
load.height_in_ub_or_stride =
125
slice->padded_height_of_output_image_in_uif_blocks;
126
} else if (slice->tiling == V3D_TILING_RASTER) {
127
load.height_in_ub_or_stride = slice->stride;
128
}
129
130
if (image->samples > VK_SAMPLE_COUNT_1_BIT)
131
load.decimate_mode = V3D_DECIMATE_MODE_ALL_SAMPLES;
132
else
133
load.decimate_mode = V3D_DECIMATE_MODE_SAMPLE_0;
134
}
135
}
136
137
static bool
138
check_needs_load(const struct v3dv_cmd_buffer_state *state,
139
VkImageAspectFlags aspect,
140
uint32_t att_first_subpass_idx,
141
VkAttachmentLoadOp load_op)
142
{
143
/* We call this with image->aspects & aspect, so 0 means the aspect we are
144
* testing does not exist in the image.
145
*/
146
if (!aspect)
147
return false;
148
149
/* Attachment load operations apply on the first subpass that uses the
150
* attachment, otherwise we always need to load.
151
*/
152
if (state->job->first_subpass > att_first_subpass_idx)
153
return true;
154
155
/* If the job is continuing a subpass started in another job, we always
156
* need to load.
157
*/
158
if (state->job->is_subpass_continue)
159
return true;
160
161
/* If the area is not aligned to tile boundaries, we always need to load */
162
if (!state->tile_aligned_render_area)
163
return true;
164
165
/* The attachment load operations must be LOAD */
166
return load_op == VK_ATTACHMENT_LOAD_OP_LOAD;
167
}
168
169
static inline uint32_t
170
v3dv_zs_buffer(bool depth, bool stencil)
171
{
172
if (depth && stencil)
173
return ZSTENCIL;
174
else if (depth)
175
return Z;
176
else if (stencil)
177
return STENCIL;
178
return NONE;
179
}
180
181
static void
182
cmd_buffer_render_pass_emit_loads(struct v3dv_cmd_buffer *cmd_buffer,
183
struct v3dv_cl *cl,
184
uint32_t layer)
185
{
186
const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
187
const struct v3dv_framebuffer *framebuffer = state->framebuffer;
188
const struct v3dv_render_pass *pass = state->pass;
189
const struct v3dv_subpass *subpass = &pass->subpasses[state->subpass_idx];
190
191
for (uint32_t i = 0; i < subpass->color_count; i++) {
192
uint32_t attachment_idx = subpass->color_attachments[i].attachment;
193
194
if (attachment_idx == VK_ATTACHMENT_UNUSED)
195
continue;
196
197
const struct v3dv_render_pass_attachment *attachment =
198
&state->pass->attachments[attachment_idx];
199
200
/* According to the Vulkan spec:
201
*
202
* "The load operation for each sample in an attachment happens before
203
* any recorded command which accesses the sample in the first subpass
204
* where the attachment is used."
205
*
206
* If the load operation is CLEAR, we must only clear once on the first
207
* subpass that uses the attachment (and in that case we don't LOAD).
208
* After that, we always want to load so we don't lose any rendering done
209
* by a previous subpass to the same attachment. We also want to load
210
* if the current job is continuing subpass work started by a previous
211
* job, for the same reason.
212
*
213
* If the render area is not aligned to tile boundaries then we have
214
* tiles which are partially covered by it. In this case, we need to
215
* load the tiles so we can preserve the pixels that are outside the
216
* render area for any such tiles.
217
*/
218
bool needs_load = check_needs_load(state,
219
VK_IMAGE_ASPECT_COLOR_BIT,
220
attachment->first_subpass,
221
attachment->desc.loadOp);
222
if (needs_load) {
223
struct v3dv_image_view *iview = framebuffer->attachments[attachment_idx];
224
cmd_buffer_render_pass_emit_load(cmd_buffer, cl, iview,
225
layer, RENDER_TARGET_0 + i);
226
}
227
}
228
229
uint32_t ds_attachment_idx = subpass->ds_attachment.attachment;
230
if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
231
const struct v3dv_render_pass_attachment *ds_attachment =
232
&state->pass->attachments[ds_attachment_idx];
233
234
const VkImageAspectFlags ds_aspects =
235
vk_format_aspects(ds_attachment->desc.format);
236
237
const bool needs_depth_load =
238
check_needs_load(state,
239
ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
240
ds_attachment->first_subpass,
241
ds_attachment->desc.loadOp);
242
243
const bool needs_stencil_load =
244
check_needs_load(state,
245
ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
246
ds_attachment->first_subpass,
247
ds_attachment->desc.stencilLoadOp);
248
249
if (needs_depth_load || needs_stencil_load) {
250
struct v3dv_image_view *iview =
251
framebuffer->attachments[ds_attachment_idx];
252
/* From the Vulkan spec:
253
*
254
* "When an image view of a depth/stencil image is used as a
255
* depth/stencil framebuffer attachment, the aspectMask is ignored
256
* and both depth and stencil image subresources are used."
257
*
258
* So we ignore the aspects from the subresource range of the image
259
* view for the depth/stencil attachment, but we still need to restrict
260
* the to aspects compatible with the render pass and the image.
261
*/
262
const uint32_t zs_buffer =
263
v3dv_zs_buffer(needs_depth_load, needs_stencil_load);
264
cmd_buffer_render_pass_emit_load(cmd_buffer, cl,
265
iview, layer, zs_buffer);
266
}
267
}
268
269
cl_emit(cl, END_OF_LOADS, end);
270
}
271
272
static void
273
cmd_buffer_render_pass_emit_store(struct v3dv_cmd_buffer *cmd_buffer,
274
struct v3dv_cl *cl,
275
uint32_t attachment_idx,
276
uint32_t layer,
277
uint32_t buffer,
278
bool clear,
279
bool is_multisample_resolve)
280
{
281
const struct v3dv_image_view *iview =
282
cmd_buffer->state.framebuffer->attachments[attachment_idx];
283
const struct v3dv_image *image = iview->image;
284
const struct v3d_resource_slice *slice = &image->slices[iview->base_level];
285
uint32_t layer_offset = v3dv_layer_offset(image,
286
iview->base_level,
287
iview->first_layer + layer);
288
289
cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
290
store.buffer_to_store = buffer;
291
store.address = v3dv_cl_address(image->mem->bo, layer_offset);
292
store.clear_buffer_being_stored = clear;
293
294
store.output_image_format = iview->format->rt_type;
295
store.r_b_swap = iview->swap_rb;
296
store.memory_format = slice->tiling;
297
298
if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
299
slice->tiling == V3D_TILING_UIF_XOR) {
300
store.height_in_ub_or_stride =
301
slice->padded_height_of_output_image_in_uif_blocks;
302
} else if (slice->tiling == V3D_TILING_RASTER) {
303
store.height_in_ub_or_stride = slice->stride;
304
}
305
306
if (image->samples > VK_SAMPLE_COUNT_1_BIT)
307
store.decimate_mode = V3D_DECIMATE_MODE_ALL_SAMPLES;
308
else if (is_multisample_resolve)
309
store.decimate_mode = V3D_DECIMATE_MODE_4X;
310
else
311
store.decimate_mode = V3D_DECIMATE_MODE_SAMPLE_0;
312
}
313
}
314
315
static bool
316
check_needs_clear(const struct v3dv_cmd_buffer_state *state,
317
VkImageAspectFlags aspect,
318
uint32_t att_first_subpass_idx,
319
VkAttachmentLoadOp load_op,
320
bool do_clear_with_draw)
321
{
322
/* We call this with image->aspects & aspect, so 0 means the aspect we are
323
* testing does not exist in the image.
324
*/
325
if (!aspect)
326
return false;
327
328
/* If the aspect needs to be cleared with a draw call then we won't emit
329
* the clear here.
330
*/
331
if (do_clear_with_draw)
332
return false;
333
334
/* If this is resuming a subpass started with another job, then attachment
335
* load operations don't apply.
336
*/
337
if (state->job->is_subpass_continue)
338
return false;
339
340
/* If the render area is not aligned to tile boudaries we can't use the
341
* TLB for a clear.
342
*/
343
if (!state->tile_aligned_render_area)
344
return false;
345
346
/* If this job is running in a subpass other than the first subpass in
347
* which this attachment is used then attachment load operations don't apply.
348
*/
349
if (state->job->first_subpass != att_first_subpass_idx)
350
return false;
351
352
/* The attachment load operation must be CLEAR */
353
return load_op == VK_ATTACHMENT_LOAD_OP_CLEAR;
354
}
355
356
static bool
357
check_needs_store(const struct v3dv_cmd_buffer_state *state,
358
VkImageAspectFlags aspect,
359
uint32_t att_last_subpass_idx,
360
VkAttachmentStoreOp store_op)
361
{
362
/* We call this with image->aspects & aspect, so 0 means the aspect we are
363
* testing does not exist in the image.
364
*/
365
if (!aspect)
366
return false;
367
368
/* Attachment store operations only apply on the last subpass where the
369
* attachment is used, in other subpasses we always need to store.
370
*/
371
if (state->subpass_idx < att_last_subpass_idx)
372
return true;
373
374
/* Attachment store operations only apply on the last job we emit on the the
375
* last subpass where the attachment is used, otherwise we always need to
376
* store.
377
*/
378
if (!state->job->is_subpass_finish)
379
return true;
380
381
/* The attachment store operation must be STORE */
382
return store_op == VK_ATTACHMENT_STORE_OP_STORE;
383
}
384
385
static void
386
cmd_buffer_render_pass_emit_stores(struct v3dv_cmd_buffer *cmd_buffer,
387
struct v3dv_cl *cl,
388
uint32_t layer)
389
{
390
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
391
const struct v3dv_subpass *subpass =
392
&state->pass->subpasses[state->subpass_idx];
393
394
bool has_stores = false;
395
bool use_global_zs_clear = false;
396
bool use_global_rt_clear = false;
397
398
/* FIXME: separate stencil */
399
uint32_t ds_attachment_idx = subpass->ds_attachment.attachment;
400
if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
401
const struct v3dv_render_pass_attachment *ds_attachment =
402
&state->pass->attachments[ds_attachment_idx];
403
404
assert(state->job->first_subpass >= ds_attachment->first_subpass);
405
assert(state->subpass_idx >= ds_attachment->first_subpass);
406
assert(state->subpass_idx <= ds_attachment->last_subpass);
407
408
/* From the Vulkan spec, VkImageSubresourceRange:
409
*
410
* "When an image view of a depth/stencil image is used as a
411
* depth/stencil framebuffer attachment, the aspectMask is ignored
412
* and both depth and stencil image subresources are used."
413
*
414
* So we ignore the aspects from the subresource range of the image
415
* view for the depth/stencil attachment, but we still need to restrict
416
* the to aspects compatible with the render pass and the image.
417
*/
418
const VkImageAspectFlags aspects =
419
vk_format_aspects(ds_attachment->desc.format);
420
421
/* Only clear once on the first subpass that uses the attachment */
422
bool needs_depth_clear =
423
check_needs_clear(state,
424
aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
425
ds_attachment->first_subpass,
426
ds_attachment->desc.loadOp,
427
subpass->do_depth_clear_with_draw);
428
429
bool needs_stencil_clear =
430
check_needs_clear(state,
431
aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
432
ds_attachment->first_subpass,
433
ds_attachment->desc.stencilLoadOp,
434
subpass->do_stencil_clear_with_draw);
435
436
/* Skip the last store if it is not required */
437
bool needs_depth_store =
438
check_needs_store(state,
439
aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
440
ds_attachment->last_subpass,
441
ds_attachment->desc.storeOp);
442
443
bool needs_stencil_store =
444
check_needs_store(state,
445
aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
446
ds_attachment->last_subpass,
447
ds_attachment->desc.stencilStoreOp);
448
449
/* GFXH-1689: The per-buffer store command's clear buffer bit is broken
450
* for depth/stencil.
451
*
452
* There used to be some confusion regarding the Clear Tile Buffers
453
* Z/S bit also being broken, but we confirmed with Broadcom that this
454
* is not the case, it was just that some other hardware bugs (that we
455
* need to work around, such as GFXH-1461) could cause this bit to behave
456
* incorrectly.
457
*
458
* There used to be another issue where the RTs bit in the Clear Tile
459
* Buffers packet also cleared Z/S, but Broadcom confirmed this is
460
* fixed since V3D 4.1.
461
*
462
* So if we have to emit a clear of depth or stencil we don't use
463
* the per-buffer store clear bit, even if we need to store the buffers,
464
* instead we always have to use the Clear Tile Buffers Z/S bit.
465
* If we have configured the job to do early Z/S clearing, then we
466
* don't want to emit any Clear Tile Buffers command at all here.
467
*
468
* Note that GFXH-1689 is not reproduced in the simulator, where
469
* using the clear buffer bit in depth/stencil stores works fine.
470
*/
471
use_global_zs_clear = !state->job->early_zs_clear &&
472
(needs_depth_clear || needs_stencil_clear);
473
if (needs_depth_store || needs_stencil_store) {
474
const uint32_t zs_buffer =
475
v3dv_zs_buffer(needs_depth_store, needs_stencil_store);
476
cmd_buffer_render_pass_emit_store(cmd_buffer, cl,
477
ds_attachment_idx, layer,
478
zs_buffer, false, false);
479
has_stores = true;
480
}
481
}
482
483
for (uint32_t i = 0; i < subpass->color_count; i++) {
484
uint32_t attachment_idx = subpass->color_attachments[i].attachment;
485
486
if (attachment_idx == VK_ATTACHMENT_UNUSED)
487
continue;
488
489
const struct v3dv_render_pass_attachment *attachment =
490
&state->pass->attachments[attachment_idx];
491
492
assert(state->job->first_subpass >= attachment->first_subpass);
493
assert(state->subpass_idx >= attachment->first_subpass);
494
assert(state->subpass_idx <= attachment->last_subpass);
495
496
/* Only clear once on the first subpass that uses the attachment */
497
bool needs_clear =
498
check_needs_clear(state,
499
VK_IMAGE_ASPECT_COLOR_BIT,
500
attachment->first_subpass,
501
attachment->desc.loadOp,
502
false);
503
504
/* Skip the last store if it is not required */
505
bool needs_store =
506
check_needs_store(state,
507
VK_IMAGE_ASPECT_COLOR_BIT,
508
attachment->last_subpass,
509
attachment->desc.storeOp);
510
511
/* If we need to resolve this attachment emit that store first. Notice
512
* that we must not request a tile buffer clear here in that case, since
513
* that would clear the tile buffer before we get to emit the actual
514
* color attachment store below, since the clear happens after the
515
* store is completed.
516
*
517
* If the attachment doesn't support TLB resolves then we will have to
518
* fallback to doing the resolve in a shader separately after this
519
* job, so we will need to store the multisampled sttachment even if that
520
* wansn't requested by the client.
521
*/
522
const bool needs_resolve =
523
subpass->resolve_attachments &&
524
subpass->resolve_attachments[i].attachment != VK_ATTACHMENT_UNUSED;
525
if (needs_resolve && attachment->use_tlb_resolve) {
526
const uint32_t resolve_attachment_idx =
527
subpass->resolve_attachments[i].attachment;
528
cmd_buffer_render_pass_emit_store(cmd_buffer, cl,
529
resolve_attachment_idx, layer,
530
RENDER_TARGET_0 + i,
531
false, true);
532
has_stores = true;
533
} else if (needs_resolve) {
534
needs_store = true;
535
}
536
537
/* Emit the color attachment store if needed */
538
if (needs_store) {
539
cmd_buffer_render_pass_emit_store(cmd_buffer, cl,
540
attachment_idx, layer,
541
RENDER_TARGET_0 + i,
542
needs_clear && !use_global_rt_clear,
543
false);
544
has_stores = true;
545
} else if (needs_clear) {
546
use_global_rt_clear = true;
547
}
548
}
549
550
/* We always need to emit at least one dummy store */
551
if (!has_stores) {
552
cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
553
store.buffer_to_store = NONE;
554
}
555
}
556
557
/* If we have any depth/stencil clears we can't use the per-buffer clear
558
* bit and instead we have to emit a single clear of all tile buffers.
559
*/
560
if (use_global_zs_clear || use_global_rt_clear) {
561
cl_emit(cl, CLEAR_TILE_BUFFERS, clear) {
562
clear.clear_z_stencil_buffer = use_global_zs_clear;
563
clear.clear_all_render_targets = use_global_rt_clear;
564
}
565
}
566
}
567
568
static void
569
cmd_buffer_render_pass_emit_per_tile_rcl(struct v3dv_cmd_buffer *cmd_buffer,
570
uint32_t layer)
571
{
572
struct v3dv_job *job = cmd_buffer->state.job;
573
assert(job);
574
575
/* Emit the generic list in our indirect state -- the rcl will just
576
* have pointers into it.
577
*/
578
struct v3dv_cl *cl = &job->indirect;
579
v3dv_cl_ensure_space(cl, 200, 1);
580
v3dv_return_if_oom(cmd_buffer, NULL);
581
582
struct v3dv_cl_reloc tile_list_start = v3dv_cl_get_address(cl);
583
584
cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
585
586
cmd_buffer_render_pass_emit_loads(cmd_buffer, cl, layer);
587
588
/* The binner starts out writing tiles assuming that the initial mode
589
* is triangles, so make sure that's the case.
590
*/
591
cl_emit(cl, PRIM_LIST_FORMAT, fmt) {
592
fmt.primitive_type = LIST_TRIANGLES;
593
}
594
595
/* PTB assumes that value to be 0, but hw will not set it. */
596
cl_emit(cl, SET_INSTANCEID, set) {
597
set.instance_id = 0;
598
}
599
600
cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
601
602
cmd_buffer_render_pass_emit_stores(cmd_buffer, cl, layer);
603
604
cl_emit(cl, END_OF_TILE_MARKER, end);
605
606
cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
607
608
cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
609
branch.start = tile_list_start;
610
branch.end = v3dv_cl_get_address(cl);
611
}
612
}
613
614
static void
615
cmd_buffer_emit_render_pass_layer_rcl(struct v3dv_cmd_buffer *cmd_buffer,
616
uint32_t layer)
617
{
618
const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
619
620
struct v3dv_job *job = cmd_buffer->state.job;
621
struct v3dv_cl *rcl = &job->rcl;
622
623
/* If doing multicore binning, we would need to initialize each
624
* core's tile list here.
625
*/
626
const struct v3dv_frame_tiling *tiling = &job->frame_tiling;
627
const uint32_t tile_alloc_offset =
628
64 * layer * tiling->draw_tiles_x * tiling->draw_tiles_y;
629
cl_emit(rcl, MULTICORE_RENDERING_TILE_LIST_SET_BASE, list) {
630
list.address = v3dv_cl_address(job->tile_alloc, tile_alloc_offset);
631
}
632
633
cl_emit(rcl, MULTICORE_RENDERING_SUPERTILE_CFG, config) {
634
config.number_of_bin_tile_lists = 1;
635
config.total_frame_width_in_tiles = tiling->draw_tiles_x;
636
config.total_frame_height_in_tiles = tiling->draw_tiles_y;
637
638
config.supertile_width_in_tiles = tiling->supertile_width;
639
config.supertile_height_in_tiles = tiling->supertile_height;
640
641
config.total_frame_width_in_supertiles =
642
tiling->frame_width_in_supertiles;
643
config.total_frame_height_in_supertiles =
644
tiling->frame_height_in_supertiles;
645
}
646
647
/* Start by clearing the tile buffer. */
648
cl_emit(rcl, TILE_COORDINATES, coords) {
649
coords.tile_column_number = 0;
650
coords.tile_row_number = 0;
651
}
652
653
/* Emit an initial clear of the tile buffers. This is necessary
654
* for any buffers that should be cleared (since clearing
655
* normally happens at the *end* of the generic tile list), but
656
* it's also nice to clear everything so the first tile doesn't
657
* inherit any contents from some previous frame.
658
*
659
* Also, implement the GFXH-1742 workaround. There's a race in
660
* the HW between the RCL updating the TLB's internal type/size
661
* and the spawning of the QPU instances using the TLB's current
662
* internal type/size. To make sure the QPUs get the right
663
* state, we need 1 dummy store in between internal type/size
664
* changes on V3D 3.x, and 2 dummy stores on 4.x.
665
*/
666
for (int i = 0; i < 2; i++) {
667
if (i > 0)
668
cl_emit(rcl, TILE_COORDINATES, coords);
669
cl_emit(rcl, END_OF_LOADS, end);
670
cl_emit(rcl, STORE_TILE_BUFFER_GENERAL, store) {
671
store.buffer_to_store = NONE;
672
}
673
if (i == 0 && cmd_buffer->state.tile_aligned_render_area) {
674
cl_emit(rcl, CLEAR_TILE_BUFFERS, clear) {
675
clear.clear_z_stencil_buffer = !job->early_zs_clear;
676
clear.clear_all_render_targets = true;
677
}
678
}
679
cl_emit(rcl, END_OF_TILE_MARKER, end);
680
}
681
682
cl_emit(rcl, FLUSH_VCD_CACHE, flush);
683
684
cmd_buffer_render_pass_emit_per_tile_rcl(cmd_buffer, layer);
685
686
uint32_t supertile_w_in_pixels =
687
tiling->tile_width * tiling->supertile_width;
688
uint32_t supertile_h_in_pixels =
689
tiling->tile_height * tiling->supertile_height;
690
const uint32_t min_x_supertile =
691
state->render_area.offset.x / supertile_w_in_pixels;
692
const uint32_t min_y_supertile =
693
state->render_area.offset.y / supertile_h_in_pixels;
694
695
uint32_t max_render_x = state->render_area.offset.x;
696
if (state->render_area.extent.width > 0)
697
max_render_x += state->render_area.extent.width - 1;
698
uint32_t max_render_y = state->render_area.offset.y;
699
if (state->render_area.extent.height > 0)
700
max_render_y += state->render_area.extent.height - 1;
701
const uint32_t max_x_supertile = max_render_x / supertile_w_in_pixels;
702
const uint32_t max_y_supertile = max_render_y / supertile_h_in_pixels;
703
704
for (int y = min_y_supertile; y <= max_y_supertile; y++) {
705
for (int x = min_x_supertile; x <= max_x_supertile; x++) {
706
cl_emit(rcl, SUPERTILE_COORDINATES, coords) {
707
coords.column_number_in_supertiles = x;
708
coords.row_number_in_supertiles = y;
709
}
710
}
711
}
712
}
713
714
static void
715
set_rcl_early_z_config(struct v3dv_job *job,
716
bool *early_z_disable,
717
uint32_t *early_z_test_and_update_direction)
718
{
719
/* If this is true then we have not emitted any draw calls in this job
720
* and we don't get any benefits form early Z.
721
*/
722
if (!job->decided_global_ez_enable) {
723
assert(job->draw_count == 0);
724
*early_z_disable = true;
725
return;
726
}
727
728
switch (job->first_ez_state) {
729
case V3D_EZ_UNDECIDED:
730
case V3D_EZ_LT_LE:
731
*early_z_disable = false;
732
*early_z_test_and_update_direction = EARLY_Z_DIRECTION_LT_LE;
733
break;
734
case V3D_EZ_GT_GE:
735
*early_z_disable = false;
736
*early_z_test_and_update_direction = EARLY_Z_DIRECTION_GT_GE;
737
break;
738
case V3D_EZ_DISABLED:
739
*early_z_disable = true;
740
break;
741
}
742
}
743
744
void
745
v3dX(cmd_buffer_emit_render_pass_rcl)(struct v3dv_cmd_buffer *cmd_buffer)
746
{
747
struct v3dv_job *job = cmd_buffer->state.job;
748
assert(job);
749
750
const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
751
const struct v3dv_framebuffer *framebuffer = state->framebuffer;
752
753
/* We can't emit the RCL until we have a framebuffer, which we may not have
754
* if we are recording a secondary command buffer. In that case, we will
755
* have to wait until vkCmdExecuteCommands is called from a primary command
756
* buffer.
757
*/
758
if (!framebuffer) {
759
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
760
return;
761
}
762
763
const struct v3dv_frame_tiling *tiling = &job->frame_tiling;
764
765
const uint32_t fb_layers = framebuffer->layers;
766
v3dv_cl_ensure_space_with_branch(&job->rcl, 200 +
767
MAX2(fb_layers, 1) * 256 *
768
cl_packet_length(SUPERTILE_COORDINATES));
769
v3dv_return_if_oom(cmd_buffer, NULL);
770
771
assert(state->subpass_idx < state->pass->subpass_count);
772
const struct v3dv_render_pass *pass = state->pass;
773
const struct v3dv_subpass *subpass = &pass->subpasses[state->subpass_idx];
774
struct v3dv_cl *rcl = &job->rcl;
775
776
/* Comon config must be the first TILE_RENDERING_MODE_CFG and
777
* Z_STENCIL_CLEAR_VALUES must be last. The ones in between are optional
778
* updates to the previous HW state.
779
*/
780
bool do_early_zs_clear = false;
781
const uint32_t ds_attachment_idx = subpass->ds_attachment.attachment;
782
cl_emit(rcl, TILE_RENDERING_MODE_CFG_COMMON, config) {
783
config.image_width_pixels = framebuffer->width;
784
config.image_height_pixels = framebuffer->height;
785
config.number_of_render_targets = MAX2(subpass->color_count, 1);
786
config.multisample_mode_4x = tiling->msaa;
787
config.maximum_bpp_of_all_render_targets = tiling->internal_bpp;
788
789
if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
790
const struct v3dv_image_view *iview =
791
framebuffer->attachments[ds_attachment_idx];
792
config.internal_depth_type = iview->internal_type;
793
794
set_rcl_early_z_config(job,
795
&config.early_z_disable,
796
&config.early_z_test_and_update_direction);
797
798
/* Early-Z/S clear can be enabled if the job is clearing and not
799
* storing (or loading) depth. If a stencil aspect is also present
800
* we have the same requirements for it, however, in this case we
801
* can accept stencil loadOp DONT_CARE as well, so instead of
802
* checking that stencil is cleared we check that is not loaded.
803
*
804
* Early-Z/S clearing is independent of Early Z/S testing, so it is
805
* possible to enable one but not the other so long as their
806
* respective requirements are met.
807
*/
808
struct v3dv_render_pass_attachment *ds_attachment =
809
&pass->attachments[ds_attachment_idx];
810
811
const VkImageAspectFlags ds_aspects =
812
vk_format_aspects(ds_attachment->desc.format);
813
814
bool needs_depth_clear =
815
check_needs_clear(state,
816
ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
817
ds_attachment->first_subpass,
818
ds_attachment->desc.loadOp,
819
subpass->do_depth_clear_with_draw);
820
821
bool needs_depth_store =
822
check_needs_store(state,
823
ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
824
ds_attachment->last_subpass,
825
ds_attachment->desc.storeOp);
826
827
do_early_zs_clear = needs_depth_clear && !needs_depth_store;
828
if (do_early_zs_clear &&
829
vk_format_has_stencil(ds_attachment->desc.format)) {
830
bool needs_stencil_load =
831
check_needs_load(state,
832
ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
833
ds_attachment->first_subpass,
834
ds_attachment->desc.stencilLoadOp);
835
836
bool needs_stencil_store =
837
check_needs_store(state,
838
ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
839
ds_attachment->last_subpass,
840
ds_attachment->desc.stencilStoreOp);
841
842
do_early_zs_clear = !needs_stencil_load && !needs_stencil_store;
843
}
844
845
config.early_depth_stencil_clear = do_early_zs_clear;
846
} else {
847
config.early_z_disable = true;
848
}
849
}
850
851
/* If we enabled early Z/S clear, then we can't emit any "Clear Tile Buffers"
852
* commands with the Z/S bit set, so keep track of whether we enabled this
853
* in the job so we can skip these later.
854
*/
855
job->early_zs_clear = do_early_zs_clear;
856
857
for (uint32_t i = 0; i < subpass->color_count; i++) {
858
uint32_t attachment_idx = subpass->color_attachments[i].attachment;
859
if (attachment_idx == VK_ATTACHMENT_UNUSED)
860
continue;
861
862
struct v3dv_image_view *iview =
863
state->framebuffer->attachments[attachment_idx];
864
865
const struct v3dv_image *image = iview->image;
866
const struct v3d_resource_slice *slice = &image->slices[iview->base_level];
867
868
const uint32_t *clear_color =
869
&state->attachments[attachment_idx].clear_value.color[0];
870
871
uint32_t clear_pad = 0;
872
if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
873
slice->tiling == V3D_TILING_UIF_XOR) {
874
int uif_block_height = v3d_utile_height(image->cpp) * 2;
875
876
uint32_t implicit_padded_height =
877
align(framebuffer->height, uif_block_height) / uif_block_height;
878
879
if (slice->padded_height_of_output_image_in_uif_blocks -
880
implicit_padded_height >= 15) {
881
clear_pad = slice->padded_height_of_output_image_in_uif_blocks;
882
}
883
}
884
885
cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART1, clear) {
886
clear.clear_color_low_32_bits = clear_color[0];
887
clear.clear_color_next_24_bits = clear_color[1] & 0xffffff;
888
clear.render_target_number = i;
889
};
890
891
if (iview->internal_bpp >= V3D_INTERNAL_BPP_64) {
892
cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART2, clear) {
893
clear.clear_color_mid_low_32_bits =
894
((clear_color[1] >> 24) | (clear_color[2] << 8));
895
clear.clear_color_mid_high_24_bits =
896
((clear_color[2] >> 24) | ((clear_color[3] & 0xffff) << 8));
897
clear.render_target_number = i;
898
};
899
}
900
901
if (iview->internal_bpp >= V3D_INTERNAL_BPP_128 || clear_pad) {
902
cl_emit(rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART3, clear) {
903
clear.uif_padded_height_in_uif_blocks = clear_pad;
904
clear.clear_color_high_16_bits = clear_color[3] >> 16;
905
clear.render_target_number = i;
906
};
907
}
908
}
909
910
cl_emit(rcl, TILE_RENDERING_MODE_CFG_COLOR, rt) {
911
v3dX(cmd_buffer_render_pass_setup_render_target)
912
(cmd_buffer, 0, &rt.render_target_0_internal_bpp,
913
&rt.render_target_0_internal_type, &rt.render_target_0_clamp);
914
v3dX(cmd_buffer_render_pass_setup_render_target)
915
(cmd_buffer, 1, &rt.render_target_1_internal_bpp,
916
&rt.render_target_1_internal_type, &rt.render_target_1_clamp);
917
v3dX(cmd_buffer_render_pass_setup_render_target)
918
(cmd_buffer, 2, &rt.render_target_2_internal_bpp,
919
&rt.render_target_2_internal_type, &rt.render_target_2_clamp);
920
v3dX(cmd_buffer_render_pass_setup_render_target)
921
(cmd_buffer, 3, &rt.render_target_3_internal_bpp,
922
&rt.render_target_3_internal_type, &rt.render_target_3_clamp);
923
}
924
925
/* Ends rendering mode config. */
926
if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
927
cl_emit(rcl, TILE_RENDERING_MODE_CFG_ZS_CLEAR_VALUES, clear) {
928
clear.z_clear_value =
929
state->attachments[ds_attachment_idx].clear_value.z;
930
clear.stencil_clear_value =
931
state->attachments[ds_attachment_idx].clear_value.s;
932
};
933
} else {
934
cl_emit(rcl, TILE_RENDERING_MODE_CFG_ZS_CLEAR_VALUES, clear) {
935
clear.z_clear_value = 1.0f;
936
clear.stencil_clear_value = 0;
937
};
938
}
939
940
/* Always set initial block size before the first branch, which needs
941
* to match the value from binning mode config.
942
*/
943
cl_emit(rcl, TILE_LIST_INITIAL_BLOCK_SIZE, init) {
944
init.use_auto_chained_tile_lists = true;
945
init.size_of_first_block_in_chained_tile_lists =
946
TILE_ALLOCATION_BLOCK_SIZE_64B;
947
}
948
949
for (int layer = 0; layer < MAX2(1, fb_layers); layer++)
950
cmd_buffer_emit_render_pass_layer_rcl(cmd_buffer, layer);
951
952
cl_emit(rcl, END_OF_RENDERING, end);
953
}
954
955
void
956
v3dX(cmd_buffer_emit_viewport)(struct v3dv_cmd_buffer *cmd_buffer)
957
{
958
struct v3dv_dynamic_state *dynamic = &cmd_buffer->state.dynamic;
959
/* FIXME: right now we only support one viewport. viewporst[0] would work
960
* now, would need to change if we allow multiple viewports
961
*/
962
float *vptranslate = dynamic->viewport.translate[0];
963
float *vpscale = dynamic->viewport.scale[0];
964
965
struct v3dv_job *job = cmd_buffer->state.job;
966
assert(job);
967
968
const uint32_t required_cl_size =
969
cl_packet_length(CLIPPER_XY_SCALING) +
970
cl_packet_length(CLIPPER_Z_SCALE_AND_OFFSET) +
971
cl_packet_length(CLIPPER_Z_MIN_MAX_CLIPPING_PLANES) +
972
cl_packet_length(VIEWPORT_OFFSET);
973
v3dv_cl_ensure_space_with_branch(&job->bcl, required_cl_size);
974
v3dv_return_if_oom(cmd_buffer, NULL);
975
976
cl_emit(&job->bcl, CLIPPER_XY_SCALING, clip) {
977
clip.viewport_half_width_in_1_256th_of_pixel = vpscale[0] * 256.0f;
978
clip.viewport_half_height_in_1_256th_of_pixel = vpscale[1] * 256.0f;
979
}
980
981
cl_emit(&job->bcl, CLIPPER_Z_SCALE_AND_OFFSET, clip) {
982
clip.viewport_z_offset_zc_to_zs = vptranslate[2];
983
clip.viewport_z_scale_zc_to_zs = vpscale[2];
984
}
985
cl_emit(&job->bcl, CLIPPER_Z_MIN_MAX_CLIPPING_PLANES, clip) {
986
/* Vulkan's Z NDC is [0..1], unlile OpenGL which is [-1, 1] */
987
float z1 = vptranslate[2];
988
float z2 = vptranslate[2] + vpscale[2];
989
clip.minimum_zw = MIN2(z1, z2);
990
clip.maximum_zw = MAX2(z1, z2);
991
}
992
993
cl_emit(&job->bcl, VIEWPORT_OFFSET, vp) {
994
vp.viewport_centre_x_coordinate = vptranslate[0];
995
vp.viewport_centre_y_coordinate = vptranslate[1];
996
}
997
998
cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_VIEWPORT;
999
}
1000
1001
void
1002
v3dX(cmd_buffer_emit_stencil)(struct v3dv_cmd_buffer *cmd_buffer)
1003
{
1004
struct v3dv_job *job = cmd_buffer->state.job;
1005
assert(job);
1006
1007
struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
1008
struct v3dv_dynamic_state *dynamic_state = &cmd_buffer->state.dynamic;
1009
1010
const uint32_t dynamic_stencil_states = V3DV_DYNAMIC_STENCIL_COMPARE_MASK |
1011
V3DV_DYNAMIC_STENCIL_WRITE_MASK |
1012
V3DV_DYNAMIC_STENCIL_REFERENCE;
1013
1014
v3dv_cl_ensure_space_with_branch(&job->bcl,
1015
2 * cl_packet_length(STENCIL_CFG));
1016
v3dv_return_if_oom(cmd_buffer, NULL);
1017
1018
bool emitted_stencil = false;
1019
for (uint32_t i = 0; i < 2; i++) {
1020
if (pipeline->emit_stencil_cfg[i]) {
1021
if (dynamic_state->mask & dynamic_stencil_states) {
1022
cl_emit_with_prepacked(&job->bcl, STENCIL_CFG,
1023
pipeline->stencil_cfg[i], config) {
1024
if (dynamic_state->mask & V3DV_DYNAMIC_STENCIL_COMPARE_MASK) {
1025
config.stencil_test_mask =
1026
i == 0 ? dynamic_state->stencil_compare_mask.front :
1027
dynamic_state->stencil_compare_mask.back;
1028
}
1029
if (dynamic_state->mask & V3DV_DYNAMIC_STENCIL_WRITE_MASK) {
1030
config.stencil_write_mask =
1031
i == 0 ? dynamic_state->stencil_write_mask.front :
1032
dynamic_state->stencil_write_mask.back;
1033
}
1034
if (dynamic_state->mask & V3DV_DYNAMIC_STENCIL_REFERENCE) {
1035
config.stencil_ref_value =
1036
i == 0 ? dynamic_state->stencil_reference.front :
1037
dynamic_state->stencil_reference.back;
1038
}
1039
}
1040
} else {
1041
cl_emit_prepacked(&job->bcl, &pipeline->stencil_cfg[i]);
1042
}
1043
1044
emitted_stencil = true;
1045
}
1046
}
1047
1048
if (emitted_stencil) {
1049
const uint32_t dynamic_stencil_dirty_flags =
1050
V3DV_CMD_DIRTY_STENCIL_COMPARE_MASK |
1051
V3DV_CMD_DIRTY_STENCIL_WRITE_MASK |
1052
V3DV_CMD_DIRTY_STENCIL_REFERENCE;
1053
cmd_buffer->state.dirty &= ~dynamic_stencil_dirty_flags;
1054
}
1055
}
1056
1057
void
1058
v3dX(cmd_buffer_emit_depth_bias)(struct v3dv_cmd_buffer *cmd_buffer)
1059
{
1060
struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
1061
assert(pipeline);
1062
1063
if (!pipeline->depth_bias.enabled)
1064
return;
1065
1066
struct v3dv_job *job = cmd_buffer->state.job;
1067
assert(job);
1068
1069
v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(DEPTH_OFFSET));
1070
v3dv_return_if_oom(cmd_buffer, NULL);
1071
1072
struct v3dv_dynamic_state *dynamic = &cmd_buffer->state.dynamic;
1073
cl_emit(&job->bcl, DEPTH_OFFSET, bias) {
1074
bias.depth_offset_factor = dynamic->depth_bias.slope_factor;
1075
bias.depth_offset_units = dynamic->depth_bias.constant_factor;
1076
if (pipeline->depth_bias.is_z16)
1077
bias.depth_offset_units *= 256.0f;
1078
bias.limit = dynamic->depth_bias.depth_bias_clamp;
1079
}
1080
1081
cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_DEPTH_BIAS;
1082
}
1083
1084
void
1085
v3dX(cmd_buffer_emit_line_width)(struct v3dv_cmd_buffer *cmd_buffer)
1086
{
1087
struct v3dv_job *job = cmd_buffer->state.job;
1088
assert(job);
1089
1090
v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(LINE_WIDTH));
1091
v3dv_return_if_oom(cmd_buffer, NULL);
1092
1093
cl_emit(&job->bcl, LINE_WIDTH, line) {
1094
line.line_width = cmd_buffer->state.dynamic.line_width;
1095
}
1096
1097
cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_LINE_WIDTH;
1098
}
1099
1100
void
1101
v3dX(cmd_buffer_emit_sample_state)(struct v3dv_cmd_buffer *cmd_buffer)
1102
{
1103
struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
1104
assert(pipeline);
1105
1106
struct v3dv_job *job = cmd_buffer->state.job;
1107
assert(job);
1108
1109
v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(SAMPLE_STATE));
1110
v3dv_return_if_oom(cmd_buffer, NULL);
1111
1112
cl_emit(&job->bcl, SAMPLE_STATE, state) {
1113
state.coverage = 1.0f;
1114
state.mask = pipeline->sample_mask;
1115
}
1116
}
1117
1118
void
1119
v3dX(cmd_buffer_emit_blend)(struct v3dv_cmd_buffer *cmd_buffer)
1120
{
1121
struct v3dv_job *job = cmd_buffer->state.job;
1122
assert(job);
1123
1124
struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
1125
assert(pipeline);
1126
1127
const uint32_t blend_packets_size =
1128
cl_packet_length(BLEND_ENABLES) +
1129
cl_packet_length(BLEND_CONSTANT_COLOR) +
1130
cl_packet_length(BLEND_CFG) * V3D_MAX_DRAW_BUFFERS +
1131
cl_packet_length(COLOR_WRITE_MASKS);
1132
1133
v3dv_cl_ensure_space_with_branch(&job->bcl, blend_packets_size);
1134
v3dv_return_if_oom(cmd_buffer, NULL);
1135
1136
if (cmd_buffer->state.dirty & V3DV_CMD_DIRTY_PIPELINE) {
1137
if (pipeline->blend.enables) {
1138
cl_emit(&job->bcl, BLEND_ENABLES, enables) {
1139
enables.mask = pipeline->blend.enables;
1140
}
1141
}
1142
1143
for (uint32_t i = 0; i < V3D_MAX_DRAW_BUFFERS; i++) {
1144
if (pipeline->blend.enables & (1 << i))
1145
cl_emit_prepacked(&job->bcl, &pipeline->blend.cfg[i]);
1146
}
1147
1148
cl_emit(&job->bcl, COLOR_WRITE_MASKS, mask) {
1149
mask.mask = pipeline->blend.color_write_masks;
1150
}
1151
}
1152
1153
if (pipeline->blend.needs_color_constants &&
1154
cmd_buffer->state.dirty & V3DV_CMD_DIRTY_BLEND_CONSTANTS) {
1155
struct v3dv_dynamic_state *dynamic = &cmd_buffer->state.dynamic;
1156
cl_emit(&job->bcl, BLEND_CONSTANT_COLOR, color) {
1157
color.red_f16 = _mesa_float_to_half(dynamic->blend_constants[0]);
1158
color.green_f16 = _mesa_float_to_half(dynamic->blend_constants[1]);
1159
color.blue_f16 = _mesa_float_to_half(dynamic->blend_constants[2]);
1160
color.alpha_f16 = _mesa_float_to_half(dynamic->blend_constants[3]);
1161
}
1162
cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_BLEND_CONSTANTS;
1163
}
1164
}
1165
1166
static void
1167
emit_flat_shade_flags(struct v3dv_job *job,
1168
int varying_offset,
1169
uint32_t varyings,
1170
enum V3DX(Varying_Flags_Action) lower,
1171
enum V3DX(Varying_Flags_Action) higher)
1172
{
1173
v3dv_cl_ensure_space_with_branch(&job->bcl,
1174
cl_packet_length(FLAT_SHADE_FLAGS));
1175
v3dv_return_if_oom(NULL, job);
1176
1177
cl_emit(&job->bcl, FLAT_SHADE_FLAGS, flags) {
1178
flags.varying_offset_v0 = varying_offset;
1179
flags.flat_shade_flags_for_varyings_v024 = varyings;
1180
flags.action_for_flat_shade_flags_of_lower_numbered_varyings = lower;
1181
flags.action_for_flat_shade_flags_of_higher_numbered_varyings = higher;
1182
}
1183
}
1184
1185
static void
1186
emit_noperspective_flags(struct v3dv_job *job,
1187
int varying_offset,
1188
uint32_t varyings,
1189
enum V3DX(Varying_Flags_Action) lower,
1190
enum V3DX(Varying_Flags_Action) higher)
1191
{
1192
v3dv_cl_ensure_space_with_branch(&job->bcl,
1193
cl_packet_length(NON_PERSPECTIVE_FLAGS));
1194
v3dv_return_if_oom(NULL, job);
1195
1196
cl_emit(&job->bcl, NON_PERSPECTIVE_FLAGS, flags) {
1197
flags.varying_offset_v0 = varying_offset;
1198
flags.non_perspective_flags_for_varyings_v024 = varyings;
1199
flags.action_for_non_perspective_flags_of_lower_numbered_varyings = lower;
1200
flags.action_for_non_perspective_flags_of_higher_numbered_varyings = higher;
1201
}
1202
}
1203
1204
static void
1205
emit_centroid_flags(struct v3dv_job *job,
1206
int varying_offset,
1207
uint32_t varyings,
1208
enum V3DX(Varying_Flags_Action) lower,
1209
enum V3DX(Varying_Flags_Action) higher)
1210
{
1211
v3dv_cl_ensure_space_with_branch(&job->bcl,
1212
cl_packet_length(CENTROID_FLAGS));
1213
v3dv_return_if_oom(NULL, job);
1214
1215
cl_emit(&job->bcl, CENTROID_FLAGS, flags) {
1216
flags.varying_offset_v0 = varying_offset;
1217
flags.centroid_flags_for_varyings_v024 = varyings;
1218
flags.action_for_centroid_flags_of_lower_numbered_varyings = lower;
1219
flags.action_for_centroid_flags_of_higher_numbered_varyings = higher;
1220
}
1221
}
1222
1223
static bool
1224
emit_varying_flags(struct v3dv_job *job,
1225
uint32_t num_flags,
1226
const uint32_t *flags,
1227
void (*flag_emit_callback)(struct v3dv_job *job,
1228
int varying_offset,
1229
uint32_t flags,
1230
enum V3DX(Varying_Flags_Action) lower,
1231
enum V3DX(Varying_Flags_Action) higher))
1232
{
1233
bool emitted_any = false;
1234
for (int i = 0; i < num_flags; i++) {
1235
if (!flags[i])
1236
continue;
1237
1238
if (emitted_any) {
1239
flag_emit_callback(job, i, flags[i],
1240
V3D_VARYING_FLAGS_ACTION_UNCHANGED,
1241
V3D_VARYING_FLAGS_ACTION_UNCHANGED);
1242
} else if (i == 0) {
1243
flag_emit_callback(job, i, flags[i],
1244
V3D_VARYING_FLAGS_ACTION_UNCHANGED,
1245
V3D_VARYING_FLAGS_ACTION_ZEROED);
1246
} else {
1247
flag_emit_callback(job, i, flags[i],
1248
V3D_VARYING_FLAGS_ACTION_ZEROED,
1249
V3D_VARYING_FLAGS_ACTION_ZEROED);
1250
}
1251
1252
emitted_any = true;
1253
}
1254
1255
return emitted_any;
1256
}
1257
1258
void
1259
v3dX(cmd_buffer_emit_varyings_state)(struct v3dv_cmd_buffer *cmd_buffer)
1260
{
1261
struct v3dv_job *job = cmd_buffer->state.job;
1262
struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
1263
1264
struct v3d_fs_prog_data *prog_data_fs =
1265
pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT]->prog_data.fs;
1266
1267
const uint32_t num_flags =
1268
ARRAY_SIZE(prog_data_fs->flat_shade_flags);
1269
const uint32_t *flat_shade_flags = prog_data_fs->flat_shade_flags;
1270
const uint32_t *noperspective_flags = prog_data_fs->noperspective_flags;
1271
const uint32_t *centroid_flags = prog_data_fs->centroid_flags;
1272
1273
if (!emit_varying_flags(job, num_flags, flat_shade_flags,
1274
emit_flat_shade_flags)) {
1275
v3dv_cl_ensure_space_with_branch(
1276
&job->bcl, cl_packet_length(ZERO_ALL_FLAT_SHADE_FLAGS));
1277
v3dv_return_if_oom(cmd_buffer, NULL);
1278
1279
cl_emit(&job->bcl, ZERO_ALL_FLAT_SHADE_FLAGS, flags);
1280
}
1281
1282
if (!emit_varying_flags(job, num_flags, noperspective_flags,
1283
emit_noperspective_flags)) {
1284
v3dv_cl_ensure_space_with_branch(
1285
&job->bcl, cl_packet_length(ZERO_ALL_NON_PERSPECTIVE_FLAGS));
1286
v3dv_return_if_oom(cmd_buffer, NULL);
1287
1288
cl_emit(&job->bcl, ZERO_ALL_NON_PERSPECTIVE_FLAGS, flags);
1289
}
1290
1291
if (!emit_varying_flags(job, num_flags, centroid_flags,
1292
emit_centroid_flags)) {
1293
v3dv_cl_ensure_space_with_branch(
1294
&job->bcl, cl_packet_length(ZERO_ALL_CENTROID_FLAGS));
1295
v3dv_return_if_oom(cmd_buffer, NULL);
1296
1297
cl_emit(&job->bcl, ZERO_ALL_CENTROID_FLAGS, flags);
1298
}
1299
}
1300
1301
static void
1302
job_update_ez_state(struct v3dv_job *job,
1303
struct v3dv_pipeline *pipeline,
1304
struct v3dv_cmd_buffer *cmd_buffer)
1305
{
1306
/* If first_ez_state is V3D_EZ_DISABLED it means that we have already
1307
* determined that we should disable EZ completely for all draw calls in
1308
* this job. This will cause us to disable EZ for the entire job in the
1309
* Tile Rendering Mode RCL packet and when we do that we need to make sure
1310
* we never emit a draw call in the job with EZ enabled in the CFG_BITS
1311
* packet, so ez_state must also be V3D_EZ_DISABLED;
1312
*/
1313
if (job->first_ez_state == V3D_EZ_DISABLED) {
1314
assert(job->ez_state == V3D_EZ_DISABLED);
1315
return;
1316
}
1317
1318
/* This is part of the pre draw call handling, so we should be inside a
1319
* render pass.
1320
*/
1321
assert(cmd_buffer->state.pass);
1322
1323
/* If this is the first time we update EZ state for this job we first check
1324
* if there is anything that requires disabling it completely for the entire
1325
* job (based on state that is not related to the current draw call and
1326
* pipeline state).
1327
*/
1328
if (!job->decided_global_ez_enable) {
1329
job->decided_global_ez_enable = true;
1330
1331
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1332
assert(state->subpass_idx < state->pass->subpass_count);
1333
struct v3dv_subpass *subpass = &state->pass->subpasses[state->subpass_idx];
1334
if (subpass->ds_attachment.attachment == VK_ATTACHMENT_UNUSED) {
1335
job->first_ez_state = V3D_EZ_DISABLED;
1336
job->ez_state = V3D_EZ_DISABLED;
1337
return;
1338
}
1339
1340
/* GFXH-1918: the early-z buffer may load incorrect depth values
1341
* if the frame has odd width or height.
1342
*
1343
* So we need to disable EZ in this case.
1344
*/
1345
const struct v3dv_render_pass_attachment *ds_attachment =
1346
&state->pass->attachments[subpass->ds_attachment.attachment];
1347
1348
const VkImageAspectFlags ds_aspects =
1349
vk_format_aspects(ds_attachment->desc.format);
1350
1351
bool needs_depth_load =
1352
check_needs_load(state,
1353
ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
1354
ds_attachment->first_subpass,
1355
ds_attachment->desc.loadOp);
1356
1357
if (needs_depth_load) {
1358
struct v3dv_framebuffer *fb = state->framebuffer;
1359
1360
if (!fb) {
1361
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
1362
perf_debug("Loading depth aspect in a secondary command buffer "
1363
"without framebuffer info disables early-z tests.\n");
1364
job->first_ez_state = V3D_EZ_DISABLED;
1365
job->ez_state = V3D_EZ_DISABLED;
1366
return;
1367
}
1368
1369
if (((fb->width % 2) != 0 || (fb->height % 2) != 0)) {
1370
perf_debug("Loading depth aspect for framebuffer with odd width "
1371
"or height disables early-Z tests.\n");
1372
job->first_ez_state = V3D_EZ_DISABLED;
1373
job->ez_state = V3D_EZ_DISABLED;
1374
return;
1375
}
1376
}
1377
}
1378
1379
/* Otherwise, we can decide to selectively enable or disable EZ for draw
1380
* calls using the CFG_BITS packet based on the bound pipeline state.
1381
*/
1382
1383
/* If the FS writes Z, then it may update against the chosen EZ direction */
1384
struct v3dv_shader_variant *fs_variant =
1385
pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT];
1386
if (fs_variant->prog_data.fs->writes_z) {
1387
job->ez_state = V3D_EZ_DISABLED;
1388
return;
1389
}
1390
1391
switch (pipeline->ez_state) {
1392
case V3D_EZ_UNDECIDED:
1393
/* If the pipeline didn't pick a direction but didn't disable, then go
1394
* along with the current EZ state. This allows EZ optimization for Z
1395
* func == EQUAL or NEVER.
1396
*/
1397
break;
1398
1399
case V3D_EZ_LT_LE:
1400
case V3D_EZ_GT_GE:
1401
/* If the pipeline picked a direction, then it needs to match the current
1402
* direction if we've decided on one.
1403
*/
1404
if (job->ez_state == V3D_EZ_UNDECIDED)
1405
job->ez_state = pipeline->ez_state;
1406
else if (job->ez_state != pipeline->ez_state)
1407
job->ez_state = V3D_EZ_DISABLED;
1408
break;
1409
1410
case V3D_EZ_DISABLED:
1411
/* If the pipeline disables EZ because of a bad Z func or stencil
1412
* operation, then we can't do any more EZ in this frame.
1413
*/
1414
job->ez_state = V3D_EZ_DISABLED;
1415
break;
1416
}
1417
1418
if (job->first_ez_state == V3D_EZ_UNDECIDED &&
1419
job->ez_state != V3D_EZ_DISABLED) {
1420
job->first_ez_state = job->ez_state;
1421
}
1422
}
1423
1424
void
1425
v3dX(cmd_buffer_emit_configuration_bits)(struct v3dv_cmd_buffer *cmd_buffer)
1426
{
1427
struct v3dv_job *job = cmd_buffer->state.job;
1428
assert(job);
1429
1430
struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
1431
assert(pipeline);
1432
1433
job_update_ez_state(job, pipeline, cmd_buffer);
1434
1435
v3dv_cl_ensure_space_with_branch(&job->bcl, cl_packet_length(CFG_BITS));
1436
v3dv_return_if_oom(cmd_buffer, NULL);
1437
1438
cl_emit_with_prepacked(&job->bcl, CFG_BITS, pipeline->cfg_bits, config) {
1439
config.early_z_enable = job->ez_state != V3D_EZ_DISABLED;
1440
config.early_z_updates_enable = config.early_z_enable &&
1441
pipeline->z_updates_enable;
1442
}
1443
}
1444
1445
void
1446
v3dX(cmd_buffer_emit_occlusion_query)(struct v3dv_cmd_buffer *cmd_buffer)
1447
{
1448
struct v3dv_job *job = cmd_buffer->state.job;
1449
assert(job);
1450
1451
v3dv_cl_ensure_space_with_branch(&job->bcl,
1452
cl_packet_length(OCCLUSION_QUERY_COUNTER));
1453
v3dv_return_if_oom(cmd_buffer, NULL);
1454
1455
cl_emit(&job->bcl, OCCLUSION_QUERY_COUNTER, counter) {
1456
if (cmd_buffer->state.query.active_query.bo) {
1457
counter.address =
1458
v3dv_cl_address(cmd_buffer->state.query.active_query.bo,
1459
cmd_buffer->state.query.active_query.offset);
1460
}
1461
}
1462
1463
cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_OCCLUSION_QUERY;
1464
}
1465
1466
static struct v3dv_job *
1467
cmd_buffer_subpass_split_for_barrier(struct v3dv_cmd_buffer *cmd_buffer,
1468
bool is_bcl_barrier)
1469
{
1470
assert(cmd_buffer->state.subpass_idx != -1);
1471
v3dv_cmd_buffer_finish_job(cmd_buffer);
1472
struct v3dv_job *job =
1473
v3dv_cmd_buffer_subpass_resume(cmd_buffer,
1474
cmd_buffer->state.subpass_idx);
1475
if (!job)
1476
return NULL;
1477
1478
job->serialize = true;
1479
job->needs_bcl_sync = is_bcl_barrier;
1480
return job;
1481
}
1482
1483
static void
1484
cmd_buffer_copy_secondary_end_query_state(struct v3dv_cmd_buffer *primary,
1485
struct v3dv_cmd_buffer *secondary)
1486
{
1487
struct v3dv_cmd_buffer_state *p_state = &primary->state;
1488
struct v3dv_cmd_buffer_state *s_state = &secondary->state;
1489
1490
const uint32_t total_state_count =
1491
p_state->query.end.used_count + s_state->query.end.used_count;
1492
v3dv_cmd_buffer_ensure_array_state(primary,
1493
sizeof(struct v3dv_end_query_cpu_job_info),
1494
total_state_count,
1495
&p_state->query.end.alloc_count,
1496
(void **) &p_state->query.end.states);
1497
v3dv_return_if_oom(primary, NULL);
1498
1499
for (uint32_t i = 0; i < s_state->query.end.used_count; i++) {
1500
const struct v3dv_end_query_cpu_job_info *s_qstate =
1501
&secondary->state.query.end.states[i];
1502
1503
struct v3dv_end_query_cpu_job_info *p_qstate =
1504
&p_state->query.end.states[p_state->query.end.used_count++];
1505
1506
p_qstate->pool = s_qstate->pool;
1507
p_qstate->query = s_qstate->query;
1508
}
1509
}
1510
1511
void
1512
v3dX(cmd_buffer_execute_inside_pass)(struct v3dv_cmd_buffer *primary,
1513
uint32_t cmd_buffer_count,
1514
const VkCommandBuffer *cmd_buffers)
1515
{
1516
assert(primary->state.job);
1517
1518
/* Emit occlusion query state if needed so the draw calls inside our
1519
* secondaries update the counters.
1520
*/
1521
bool has_occlusion_query =
1522
primary->state.dirty & V3DV_CMD_DIRTY_OCCLUSION_QUERY;
1523
if (has_occlusion_query)
1524
v3dX(cmd_buffer_emit_occlusion_query)(primary);
1525
1526
/* FIXME: if our primary job tiling doesn't enable MSSA but any of the
1527
* pipelines used by the secondaries do, we need to re-start the primary
1528
* job to enable MSAA. See cmd_buffer_restart_job_for_msaa_if_needed.
1529
*/
1530
bool pending_barrier = false;
1531
bool pending_bcl_barrier = false;
1532
for (uint32_t i = 0; i < cmd_buffer_count; i++) {
1533
V3DV_FROM_HANDLE(v3dv_cmd_buffer, secondary, cmd_buffers[i]);
1534
1535
assert(secondary->usage_flags &
1536
VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT);
1537
1538
list_for_each_entry(struct v3dv_job, secondary_job,
1539
&secondary->jobs, list_link) {
1540
if (secondary_job->type == V3DV_JOB_TYPE_GPU_CL_SECONDARY) {
1541
/* If the job is a CL, then we branch to it from the primary BCL.
1542
* In this case the secondary's BCL is finished with a
1543
* RETURN_FROM_SUB_LIST command to return back to the primary BCL
1544
* once we are done executing it.
1545
*/
1546
assert(v3dv_cl_offset(&secondary_job->rcl) == 0);
1547
assert(secondary_job->bcl.bo);
1548
1549
/* Sanity check that secondary BCL ends with RETURN_FROM_SUB_LIST */
1550
STATIC_ASSERT(cl_packet_length(RETURN_FROM_SUB_LIST) == 1);
1551
assert(v3dv_cl_offset(&secondary_job->bcl) >= 1);
1552
assert(*(((uint8_t *)secondary_job->bcl.next) - 1) ==
1553
V3DX(RETURN_FROM_SUB_LIST_opcode));
1554
1555
/* If this secondary has any barriers (or we had any pending barrier
1556
* to apply), then we can't just branch to it from the primary, we
1557
* need to split the primary to create a new job that can consume
1558
* the barriers first.
1559
*
1560
* FIXME: in this case, maybe just copy the secondary BCL without
1561
* the RETURN_FROM_SUB_LIST into the primary job to skip the
1562
* branch?
1563
*/
1564
struct v3dv_job *primary_job = primary->state.job;
1565
if (!primary_job || secondary_job->serialize || pending_barrier) {
1566
const bool needs_bcl_barrier =
1567
secondary_job->needs_bcl_sync || pending_bcl_barrier;
1568
primary_job =
1569
cmd_buffer_subpass_split_for_barrier(primary,
1570
needs_bcl_barrier);
1571
v3dv_return_if_oom(primary, NULL);
1572
1573
/* Since we have created a new primary we need to re-emit
1574
* occlusion query state.
1575
*/
1576
if (has_occlusion_query)
1577
v3dX(cmd_buffer_emit_occlusion_query)(primary);
1578
}
1579
1580
/* Make sure our primary job has all required BO references */
1581
set_foreach(secondary_job->bos, entry) {
1582
struct v3dv_bo *bo = (struct v3dv_bo *)entry->key;
1583
v3dv_job_add_bo(primary_job, bo);
1584
}
1585
1586
/* Emit required branch instructions. We expect each of these
1587
* to end with a corresponding 'return from sub list' item.
1588
*/
1589
list_for_each_entry(struct v3dv_bo, bcl_bo,
1590
&secondary_job->bcl.bo_list, list_link) {
1591
v3dv_cl_ensure_space_with_branch(&primary_job->bcl,
1592
cl_packet_length(BRANCH_TO_SUB_LIST));
1593
v3dv_return_if_oom(primary, NULL);
1594
cl_emit(&primary_job->bcl, BRANCH_TO_SUB_LIST, branch) {
1595
branch.address = v3dv_cl_address(bcl_bo, 0);
1596
}
1597
}
1598
1599
primary_job->tmu_dirty_rcl |= secondary_job->tmu_dirty_rcl;
1600
} else {
1601
/* This is a regular job (CPU or GPU), so just finish the current
1602
* primary job (if any) and then add the secondary job to the
1603
* primary's job list right after it.
1604
*/
1605
v3dv_cmd_buffer_finish_job(primary);
1606
v3dv_job_clone_in_cmd_buffer(secondary_job, primary);
1607
if (pending_barrier) {
1608
secondary_job->serialize = true;
1609
if (pending_bcl_barrier)
1610
secondary_job->needs_bcl_sync = true;
1611
}
1612
}
1613
1614
pending_barrier = false;
1615
pending_bcl_barrier = false;
1616
}
1617
1618
/* If the secondary has recorded any vkCmdEndQuery commands, we need to
1619
* copy this state to the primary so it is processed properly when the
1620
* current primary job is finished.
1621
*/
1622
cmd_buffer_copy_secondary_end_query_state(primary, secondary);
1623
1624
/* If this secondary had any pending barrier state we will need that
1625
* barrier state consumed with whatever comes next in the primary.
1626
*/
1627
assert(secondary->state.has_barrier || !secondary->state.has_bcl_barrier);
1628
pending_barrier = secondary->state.has_barrier;
1629
pending_bcl_barrier = secondary->state.has_bcl_barrier;
1630
}
1631
1632
if (pending_barrier) {
1633
primary->state.has_barrier = true;
1634
primary->state.has_bcl_barrier |= pending_bcl_barrier;
1635
}
1636
}
1637
1638
static void
1639
emit_gs_shader_state_record(struct v3dv_job *job,
1640
struct v3dv_bo *assembly_bo,
1641
struct v3dv_shader_variant *gs_bin,
1642
struct v3dv_cl_reloc gs_bin_uniforms,
1643
struct v3dv_shader_variant *gs,
1644
struct v3dv_cl_reloc gs_render_uniforms)
1645
{
1646
cl_emit(&job->indirect, GEOMETRY_SHADER_STATE_RECORD, shader) {
1647
shader.geometry_bin_mode_shader_code_address =
1648
v3dv_cl_address(assembly_bo, gs_bin->assembly_offset);
1649
shader.geometry_bin_mode_shader_4_way_threadable =
1650
gs_bin->prog_data.gs->base.threads == 4;
1651
shader.geometry_bin_mode_shader_start_in_final_thread_section =
1652
gs_bin->prog_data.gs->base.single_seg;
1653
shader.geometry_bin_mode_shader_propagate_nans = true;
1654
shader.geometry_bin_mode_shader_uniforms_address =
1655
gs_bin_uniforms;
1656
1657
shader.geometry_render_mode_shader_code_address =
1658
v3dv_cl_address(assembly_bo, gs->assembly_offset);
1659
shader.geometry_render_mode_shader_4_way_threadable =
1660
gs->prog_data.gs->base.threads == 4;
1661
shader.geometry_render_mode_shader_start_in_final_thread_section =
1662
gs->prog_data.gs->base.single_seg;
1663
shader.geometry_render_mode_shader_propagate_nans = true;
1664
shader.geometry_render_mode_shader_uniforms_address =
1665
gs_render_uniforms;
1666
}
1667
}
1668
1669
static uint8_t
1670
v3d_gs_output_primitive(uint32_t prim_type)
1671
{
1672
switch (prim_type) {
1673
case GL_POINTS:
1674
return GEOMETRY_SHADER_POINTS;
1675
case GL_LINE_STRIP:
1676
return GEOMETRY_SHADER_LINE_STRIP;
1677
case GL_TRIANGLE_STRIP:
1678
return GEOMETRY_SHADER_TRI_STRIP;
1679
default:
1680
unreachable("Unsupported primitive type");
1681
}
1682
}
1683
1684
static void
1685
emit_tes_gs_common_params(struct v3dv_job *job,
1686
uint8_t gs_out_prim_type,
1687
uint8_t gs_num_invocations)
1688
{
1689
cl_emit(&job->indirect, TESSELLATION_GEOMETRY_COMMON_PARAMS, shader) {
1690
shader.tessellation_type = TESSELLATION_TYPE_TRIANGLE;
1691
shader.tessellation_point_mode = false;
1692
shader.tessellation_edge_spacing = TESSELLATION_EDGE_SPACING_EVEN;
1693
shader.tessellation_clockwise = true;
1694
shader.tessellation_invocations = 1;
1695
1696
shader.geometry_shader_output_format =
1697
v3d_gs_output_primitive(gs_out_prim_type);
1698
shader.geometry_shader_instances = gs_num_invocations & 0x1F;
1699
}
1700
}
1701
1702
static uint8_t
1703
simd_width_to_gs_pack_mode(uint32_t width)
1704
{
1705
switch (width) {
1706
case 16:
1707
return V3D_PACK_MODE_16_WAY;
1708
case 8:
1709
return V3D_PACK_MODE_8_WAY;
1710
case 4:
1711
return V3D_PACK_MODE_4_WAY;
1712
case 1:
1713
return V3D_PACK_MODE_1_WAY;
1714
default:
1715
unreachable("Invalid SIMD width");
1716
};
1717
}
1718
1719
static void
1720
emit_tes_gs_shader_params(struct v3dv_job *job,
1721
uint32_t gs_simd,
1722
uint32_t gs_vpm_output_size,
1723
uint32_t gs_max_vpm_input_size_per_batch)
1724
{
1725
cl_emit(&job->indirect, TESSELLATION_GEOMETRY_SHADER_PARAMS, shader) {
1726
shader.tcs_batch_flush_mode = V3D_TCS_FLUSH_MODE_FULLY_PACKED;
1727
shader.per_patch_data_column_depth = 1;
1728
shader.tcs_output_segment_size_in_sectors = 1;
1729
shader.tcs_output_segment_pack_mode = V3D_PACK_MODE_16_WAY;
1730
shader.tes_output_segment_size_in_sectors = 1;
1731
shader.tes_output_segment_pack_mode = V3D_PACK_MODE_16_WAY;
1732
shader.gs_output_segment_size_in_sectors = gs_vpm_output_size;
1733
shader.gs_output_segment_pack_mode =
1734
simd_width_to_gs_pack_mode(gs_simd);
1735
shader.tbg_max_patches_per_tcs_batch = 1;
1736
shader.tbg_max_extra_vertex_segs_for_patches_after_first = 0;
1737
shader.tbg_min_tcs_output_segments_required_in_play = 1;
1738
shader.tbg_min_per_patch_data_segments_required_in_play = 1;
1739
shader.tpg_max_patches_per_tes_batch = 1;
1740
shader.tpg_max_vertex_segments_per_tes_batch = 0;
1741
shader.tpg_max_tcs_output_segments_per_tes_batch = 1;
1742
shader.tpg_min_tes_output_segments_required_in_play = 1;
1743
shader.gbg_max_tes_output_vertex_segments_per_gs_batch =
1744
gs_max_vpm_input_size_per_batch;
1745
shader.gbg_min_gs_output_segments_required_in_play = 1;
1746
}
1747
}
1748
1749
void
1750
v3dX(cmd_buffer_emit_gl_shader_state)(struct v3dv_cmd_buffer *cmd_buffer)
1751
{
1752
struct v3dv_job *job = cmd_buffer->state.job;
1753
assert(job);
1754
1755
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
1756
struct v3dv_pipeline *pipeline = state->gfx.pipeline;
1757
assert(pipeline);
1758
1759
struct v3dv_shader_variant *vs_variant =
1760
pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX];
1761
struct v3d_vs_prog_data *prog_data_vs = vs_variant->prog_data.vs;
1762
1763
struct v3dv_shader_variant *vs_bin_variant =
1764
pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX_BIN];
1765
struct v3d_vs_prog_data *prog_data_vs_bin = vs_bin_variant->prog_data.vs;
1766
1767
struct v3dv_shader_variant *fs_variant =
1768
pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT];
1769
struct v3d_fs_prog_data *prog_data_fs = fs_variant->prog_data.fs;
1770
1771
struct v3dv_shader_variant *gs_variant = NULL;
1772
struct v3dv_shader_variant *gs_bin_variant = NULL;
1773
struct v3d_gs_prog_data *prog_data_gs = NULL;
1774
struct v3d_gs_prog_data *prog_data_gs_bin = NULL;
1775
if (pipeline->has_gs) {
1776
gs_variant =
1777
pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY];
1778
prog_data_gs = gs_variant->prog_data.gs;
1779
1780
gs_bin_variant =
1781
pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY_BIN];
1782
prog_data_gs_bin = gs_bin_variant->prog_data.gs;
1783
}
1784
1785
/* Update the cache dirty flag based on the shader progs data */
1786
job->tmu_dirty_rcl |= prog_data_vs_bin->base.tmu_dirty_rcl;
1787
job->tmu_dirty_rcl |= prog_data_vs->base.tmu_dirty_rcl;
1788
job->tmu_dirty_rcl |= prog_data_fs->base.tmu_dirty_rcl;
1789
if (pipeline->has_gs) {
1790
job->tmu_dirty_rcl |= prog_data_gs_bin->base.tmu_dirty_rcl;
1791
job->tmu_dirty_rcl |= prog_data_gs->base.tmu_dirty_rcl;
1792
}
1793
1794
/* See GFXH-930 workaround below */
1795
uint32_t num_elements_to_emit = MAX2(pipeline->va_count, 1);
1796
1797
uint32_t shader_state_record_length =
1798
cl_packet_length(GL_SHADER_STATE_RECORD);
1799
if (pipeline->has_gs) {
1800
shader_state_record_length +=
1801
cl_packet_length(GEOMETRY_SHADER_STATE_RECORD) +
1802
cl_packet_length(TESSELLATION_GEOMETRY_COMMON_PARAMS) +
1803
2 * cl_packet_length(TESSELLATION_GEOMETRY_SHADER_PARAMS);
1804
}
1805
1806
uint32_t shader_rec_offset =
1807
v3dv_cl_ensure_space(&job->indirect,
1808
shader_state_record_length +
1809
num_elements_to_emit *
1810
cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD),
1811
32);
1812
v3dv_return_if_oom(cmd_buffer, NULL);
1813
1814
struct v3dv_bo *assembly_bo = pipeline->shared_data->assembly_bo;
1815
1816
if (pipeline->has_gs) {
1817
emit_gs_shader_state_record(job,
1818
assembly_bo,
1819
gs_bin_variant,
1820
cmd_buffer->state.uniforms.gs_bin,
1821
gs_variant,
1822
cmd_buffer->state.uniforms.gs);
1823
1824
emit_tes_gs_common_params(job,
1825
prog_data_gs->out_prim_type,
1826
prog_data_gs->num_invocations);
1827
1828
emit_tes_gs_shader_params(job,
1829
pipeline->vpm_cfg_bin.gs_width,
1830
pipeline->vpm_cfg_bin.Gd,
1831
pipeline->vpm_cfg_bin.Gv);
1832
1833
emit_tes_gs_shader_params(job,
1834
pipeline->vpm_cfg.gs_width,
1835
pipeline->vpm_cfg.Gd,
1836
pipeline->vpm_cfg.Gv);
1837
}
1838
1839
struct v3dv_bo *default_attribute_values =
1840
pipeline->default_attribute_values != NULL ?
1841
pipeline->default_attribute_values :
1842
pipeline->device->default_attribute_float;
1843
1844
cl_emit_with_prepacked(&job->indirect, GL_SHADER_STATE_RECORD,
1845
pipeline->shader_state_record, shader) {
1846
1847
/* FIXME: we are setting this values here and during the
1848
* prepacking. This is because both cl_emit_with_prepacked and v3dvx_pack
1849
* asserts for minimum values of these. It would be good to get
1850
* v3dvx_pack to assert on the final value if possible
1851
*/
1852
shader.min_coord_shader_input_segments_required_in_play =
1853
pipeline->vpm_cfg_bin.As;
1854
shader.min_vertex_shader_input_segments_required_in_play =
1855
pipeline->vpm_cfg.As;
1856
1857
shader.coordinate_shader_code_address =
1858
v3dv_cl_address(assembly_bo, vs_bin_variant->assembly_offset);
1859
shader.vertex_shader_code_address =
1860
v3dv_cl_address(assembly_bo, vs_variant->assembly_offset);
1861
shader.fragment_shader_code_address =
1862
v3dv_cl_address(assembly_bo, fs_variant->assembly_offset);
1863
1864
shader.coordinate_shader_uniforms_address = cmd_buffer->state.uniforms.vs_bin;
1865
shader.vertex_shader_uniforms_address = cmd_buffer->state.uniforms.vs;
1866
shader.fragment_shader_uniforms_address = cmd_buffer->state.uniforms.fs;
1867
1868
shader.address_of_default_attribute_values =
1869
v3dv_cl_address(default_attribute_values, 0);
1870
1871
shader.any_shader_reads_hardware_written_primitive_id =
1872
(pipeline->has_gs && prog_data_gs->uses_pid) || prog_data_fs->uses_pid;
1873
shader.insert_primitive_id_as_first_varying_to_fragment_shader =
1874
!pipeline->has_gs && prog_data_fs->uses_pid;
1875
}
1876
1877
/* Upload vertex element attributes (SHADER_STATE_ATTRIBUTE_RECORD) */
1878
bool cs_loaded_any = false;
1879
const bool cs_uses_builtins = prog_data_vs_bin->uses_iid ||
1880
prog_data_vs_bin->uses_biid ||
1881
prog_data_vs_bin->uses_vid;
1882
const uint32_t packet_length =
1883
cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD);
1884
1885
uint32_t emitted_va_count = 0;
1886
for (uint32_t i = 0; emitted_va_count < pipeline->va_count; i++) {
1887
assert(i < MAX_VERTEX_ATTRIBS);
1888
1889
if (pipeline->va[i].vk_format == VK_FORMAT_UNDEFINED)
1890
continue;
1891
1892
const uint32_t binding = pipeline->va[i].binding;
1893
1894
/* We store each vertex attribute in the array using its driver location
1895
* as index.
1896
*/
1897
const uint32_t location = i;
1898
1899
struct v3dv_vertex_binding *c_vb = &cmd_buffer->state.vertex_bindings[binding];
1900
1901
cl_emit_with_prepacked(&job->indirect, GL_SHADER_STATE_ATTRIBUTE_RECORD,
1902
&pipeline->vertex_attrs[i * packet_length], attr) {
1903
1904
assert(c_vb->buffer->mem->bo);
1905
attr.address = v3dv_cl_address(c_vb->buffer->mem->bo,
1906
c_vb->buffer->mem_offset +
1907
pipeline->va[i].offset +
1908
c_vb->offset);
1909
1910
attr.number_of_values_read_by_coordinate_shader =
1911
prog_data_vs_bin->vattr_sizes[location];
1912
attr.number_of_values_read_by_vertex_shader =
1913
prog_data_vs->vattr_sizes[location];
1914
1915
/* GFXH-930: At least one attribute must be enabled and read by CS
1916
* and VS. If we have attributes being consumed by the VS but not
1917
* the CS, then set up a dummy load of the last attribute into the
1918
* CS's VPM inputs. (Since CS is just dead-code-elimination compared
1919
* to VS, we can't have CS loading but not VS).
1920
*
1921
* GFXH-1602: first attribute must be active if using builtins.
1922
*/
1923
if (prog_data_vs_bin->vattr_sizes[location])
1924
cs_loaded_any = true;
1925
1926
if (i == 0 && cs_uses_builtins && !cs_loaded_any) {
1927
attr.number_of_values_read_by_coordinate_shader = 1;
1928
cs_loaded_any = true;
1929
} else if (i == pipeline->va_count - 1 && !cs_loaded_any) {
1930
attr.number_of_values_read_by_coordinate_shader = 1;
1931
cs_loaded_any = true;
1932
}
1933
1934
attr.maximum_index = 0xffffff;
1935
}
1936
1937
emitted_va_count++;
1938
}
1939
1940
if (pipeline->va_count == 0) {
1941
/* GFXH-930: At least one attribute must be enabled and read
1942
* by CS and VS. If we have no attributes being consumed by
1943
* the shader, set up a dummy to be loaded into the VPM.
1944
*/
1945
cl_emit(&job->indirect, GL_SHADER_STATE_ATTRIBUTE_RECORD, attr) {
1946
/* Valid address of data whose value will be unused. */
1947
attr.address = v3dv_cl_address(job->indirect.bo, 0);
1948
1949
attr.type = ATTRIBUTE_FLOAT;
1950
attr.stride = 0;
1951
attr.vec_size = 1;
1952
1953
attr.number_of_values_read_by_coordinate_shader = 1;
1954
attr.number_of_values_read_by_vertex_shader = 1;
1955
}
1956
}
1957
1958
if (cmd_buffer->state.dirty & V3DV_CMD_DIRTY_PIPELINE) {
1959
v3dv_cl_ensure_space_with_branch(&job->bcl,
1960
sizeof(pipeline->vcm_cache_size));
1961
v3dv_return_if_oom(cmd_buffer, NULL);
1962
1963
cl_emit_prepacked(&job->bcl, &pipeline->vcm_cache_size);
1964
}
1965
1966
v3dv_cl_ensure_space_with_branch(&job->bcl,
1967
cl_packet_length(GL_SHADER_STATE));
1968
v3dv_return_if_oom(cmd_buffer, NULL);
1969
1970
if (pipeline->has_gs) {
1971
cl_emit(&job->bcl, GL_SHADER_STATE_INCLUDING_GS, state) {
1972
state.address = v3dv_cl_address(job->indirect.bo, shader_rec_offset);
1973
state.number_of_attribute_arrays = num_elements_to_emit;
1974
}
1975
} else {
1976
cl_emit(&job->bcl, GL_SHADER_STATE, state) {
1977
state.address = v3dv_cl_address(job->indirect.bo, shader_rec_offset);
1978
state.number_of_attribute_arrays = num_elements_to_emit;
1979
}
1980
}
1981
1982
cmd_buffer->state.dirty &= ~(V3DV_CMD_DIRTY_VERTEX_BUFFER |
1983
V3DV_CMD_DIRTY_DESCRIPTOR_SETS |
1984
V3DV_CMD_DIRTY_PUSH_CONSTANTS);
1985
cmd_buffer->state.dirty_descriptor_stages &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
1986
cmd_buffer->state.dirty_push_constants_stages &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
1987
}
1988
1989
/* FIXME: C&P from v3dx_draw. Refactor to common place? */
1990
static uint32_t
1991
v3d_hw_prim_type(enum pipe_prim_type prim_type)
1992
{
1993
switch (prim_type) {
1994
case PIPE_PRIM_POINTS:
1995
case PIPE_PRIM_LINES:
1996
case PIPE_PRIM_LINE_LOOP:
1997
case PIPE_PRIM_LINE_STRIP:
1998
case PIPE_PRIM_TRIANGLES:
1999
case PIPE_PRIM_TRIANGLE_STRIP:
2000
case PIPE_PRIM_TRIANGLE_FAN:
2001
return prim_type;
2002
2003
case PIPE_PRIM_LINES_ADJACENCY:
2004
case PIPE_PRIM_LINE_STRIP_ADJACENCY:
2005
case PIPE_PRIM_TRIANGLES_ADJACENCY:
2006
case PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY:
2007
return 8 + (prim_type - PIPE_PRIM_LINES_ADJACENCY);
2008
2009
default:
2010
unreachable("Unsupported primitive type");
2011
}
2012
}
2013
2014
void
2015
v3dX(cmd_buffer_emit_draw)(struct v3dv_cmd_buffer *cmd_buffer,
2016
struct v3dv_draw_info *info)
2017
{
2018
struct v3dv_job *job = cmd_buffer->state.job;
2019
assert(job);
2020
2021
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
2022
struct v3dv_pipeline *pipeline = state->gfx.pipeline;
2023
2024
assert(pipeline);
2025
2026
uint32_t hw_prim_type = v3d_hw_prim_type(pipeline->topology);
2027
2028
if (info->first_instance > 0) {
2029
v3dv_cl_ensure_space_with_branch(
2030
&job->bcl, cl_packet_length(BASE_VERTEX_BASE_INSTANCE));
2031
v3dv_return_if_oom(cmd_buffer, NULL);
2032
2033
cl_emit(&job->bcl, BASE_VERTEX_BASE_INSTANCE, base) {
2034
base.base_instance = info->first_instance;
2035
base.base_vertex = 0;
2036
}
2037
}
2038
2039
if (info->instance_count > 1) {
2040
v3dv_cl_ensure_space_with_branch(
2041
&job->bcl, cl_packet_length(VERTEX_ARRAY_INSTANCED_PRIMS));
2042
v3dv_return_if_oom(cmd_buffer, NULL);
2043
2044
cl_emit(&job->bcl, VERTEX_ARRAY_INSTANCED_PRIMS, prim) {
2045
prim.mode = hw_prim_type;
2046
prim.index_of_first_vertex = info->first_vertex;
2047
prim.number_of_instances = info->instance_count;
2048
prim.instance_length = info->vertex_count;
2049
}
2050
} else {
2051
v3dv_cl_ensure_space_with_branch(
2052
&job->bcl, cl_packet_length(VERTEX_ARRAY_PRIMS));
2053
v3dv_return_if_oom(cmd_buffer, NULL);
2054
cl_emit(&job->bcl, VERTEX_ARRAY_PRIMS, prim) {
2055
prim.mode = hw_prim_type;
2056
prim.length = info->vertex_count;
2057
prim.index_of_first_vertex = info->first_vertex;
2058
}
2059
}
2060
}
2061
2062
void
2063
v3dX(cmd_buffer_emit_index_buffer)(struct v3dv_cmd_buffer *cmd_buffer)
2064
{
2065
struct v3dv_job *job = cmd_buffer->state.job;
2066
assert(job);
2067
2068
/* We flag all state as dirty when we create a new job so make sure we
2069
* have a valid index buffer before attempting to emit state for it.
2070
*/
2071
struct v3dv_buffer *ibuffer =
2072
v3dv_buffer_from_handle(cmd_buffer->state.index_buffer.buffer);
2073
if (ibuffer) {
2074
v3dv_cl_ensure_space_with_branch(
2075
&job->bcl, cl_packet_length(INDEX_BUFFER_SETUP));
2076
v3dv_return_if_oom(cmd_buffer, NULL);
2077
2078
const uint32_t offset = cmd_buffer->state.index_buffer.offset;
2079
cl_emit(&job->bcl, INDEX_BUFFER_SETUP, ib) {
2080
ib.address = v3dv_cl_address(ibuffer->mem->bo,
2081
ibuffer->mem_offset + offset);
2082
ib.size = ibuffer->mem->bo->size;
2083
}
2084
}
2085
2086
cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_INDEX_BUFFER;
2087
}
2088
2089
void
2090
v3dX(cmd_buffer_emit_draw_indexed)(struct v3dv_cmd_buffer *cmd_buffer,
2091
uint32_t indexCount,
2092
uint32_t instanceCount,
2093
uint32_t firstIndex,
2094
int32_t vertexOffset,
2095
uint32_t firstInstance)
2096
{
2097
v3dv_cmd_buffer_emit_pre_draw(cmd_buffer);
2098
2099
struct v3dv_job *job = cmd_buffer->state.job;
2100
assert(job);
2101
2102
const struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
2103
uint32_t hw_prim_type = v3d_hw_prim_type(pipeline->topology);
2104
uint8_t index_type = ffs(cmd_buffer->state.index_buffer.index_size) - 1;
2105
uint32_t index_offset = firstIndex * cmd_buffer->state.index_buffer.index_size;
2106
2107
if (vertexOffset != 0 || firstInstance != 0) {
2108
v3dv_cl_ensure_space_with_branch(
2109
&job->bcl, cl_packet_length(BASE_VERTEX_BASE_INSTANCE));
2110
v3dv_return_if_oom(cmd_buffer, NULL);
2111
2112
cl_emit(&job->bcl, BASE_VERTEX_BASE_INSTANCE, base) {
2113
base.base_instance = firstInstance;
2114
base.base_vertex = vertexOffset;
2115
}
2116
}
2117
2118
if (instanceCount == 1) {
2119
v3dv_cl_ensure_space_with_branch(
2120
&job->bcl, cl_packet_length(INDEXED_PRIM_LIST));
2121
v3dv_return_if_oom(cmd_buffer, NULL);
2122
2123
cl_emit(&job->bcl, INDEXED_PRIM_LIST, prim) {
2124
prim.index_type = index_type;
2125
prim.length = indexCount;
2126
prim.index_offset = index_offset;
2127
prim.mode = hw_prim_type;
2128
prim.enable_primitive_restarts = pipeline->primitive_restart;
2129
}
2130
} else if (instanceCount > 1) {
2131
v3dv_cl_ensure_space_with_branch(
2132
&job->bcl, cl_packet_length(INDEXED_INSTANCED_PRIM_LIST));
2133
v3dv_return_if_oom(cmd_buffer, NULL);
2134
2135
cl_emit(&job->bcl, INDEXED_INSTANCED_PRIM_LIST, prim) {
2136
prim.index_type = index_type;
2137
prim.index_offset = index_offset;
2138
prim.mode = hw_prim_type;
2139
prim.enable_primitive_restarts = pipeline->primitive_restart;
2140
prim.number_of_instances = instanceCount;
2141
prim.instance_length = indexCount;
2142
}
2143
}
2144
}
2145
2146
void
2147
v3dX(cmd_buffer_emit_draw_indirect)(struct v3dv_cmd_buffer *cmd_buffer,
2148
struct v3dv_buffer *buffer,
2149
VkDeviceSize offset,
2150
uint32_t drawCount,
2151
uint32_t stride)
2152
{
2153
v3dv_cmd_buffer_emit_pre_draw(cmd_buffer);
2154
2155
struct v3dv_job *job = cmd_buffer->state.job;
2156
assert(job);
2157
2158
const struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
2159
uint32_t hw_prim_type = v3d_hw_prim_type(pipeline->topology);
2160
2161
v3dv_cl_ensure_space_with_branch(
2162
&job->bcl, cl_packet_length(INDIRECT_VERTEX_ARRAY_INSTANCED_PRIMS));
2163
v3dv_return_if_oom(cmd_buffer, NULL);
2164
2165
cl_emit(&job->bcl, INDIRECT_VERTEX_ARRAY_INSTANCED_PRIMS, prim) {
2166
prim.mode = hw_prim_type;
2167
prim.number_of_draw_indirect_array_records = drawCount;
2168
prim.stride_in_multiples_of_4_bytes = stride >> 2;
2169
prim.address = v3dv_cl_address(buffer->mem->bo,
2170
buffer->mem_offset + offset);
2171
}
2172
}
2173
2174
void
2175
v3dX(cmd_buffer_emit_indexed_indirect)(struct v3dv_cmd_buffer *cmd_buffer,
2176
struct v3dv_buffer *buffer,
2177
VkDeviceSize offset,
2178
uint32_t drawCount,
2179
uint32_t stride)
2180
{
2181
v3dv_cmd_buffer_emit_pre_draw(cmd_buffer);
2182
2183
struct v3dv_job *job = cmd_buffer->state.job;
2184
assert(job);
2185
2186
const struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
2187
uint32_t hw_prim_type = v3d_hw_prim_type(pipeline->topology);
2188
uint8_t index_type = ffs(cmd_buffer->state.index_buffer.index_size) - 1;
2189
2190
v3dv_cl_ensure_space_with_branch(
2191
&job->bcl, cl_packet_length(INDIRECT_INDEXED_INSTANCED_PRIM_LIST));
2192
v3dv_return_if_oom(cmd_buffer, NULL);
2193
2194
cl_emit(&job->bcl, INDIRECT_INDEXED_INSTANCED_PRIM_LIST, prim) {
2195
prim.index_type = index_type;
2196
prim.mode = hw_prim_type;
2197
prim.enable_primitive_restarts = pipeline->primitive_restart;
2198
prim.number_of_draw_indirect_indexed_records = drawCount;
2199
prim.stride_in_multiples_of_4_bytes = stride >> 2;
2200
prim.address = v3dv_cl_address(buffer->mem->bo,
2201
buffer->mem_offset + offset);
2202
}
2203
}
2204
2205
void
2206
v3dX(cmd_buffer_render_pass_setup_render_target)(struct v3dv_cmd_buffer *cmd_buffer,
2207
int rt,
2208
uint32_t *rt_bpp,
2209
uint32_t *rt_type,
2210
uint32_t *rt_clamp)
2211
{
2212
const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
2213
2214
assert(state->subpass_idx < state->pass->subpass_count);
2215
const struct v3dv_subpass *subpass =
2216
&state->pass->subpasses[state->subpass_idx];
2217
2218
if (rt >= subpass->color_count)
2219
return;
2220
2221
struct v3dv_subpass_attachment *attachment = &subpass->color_attachments[rt];
2222
const uint32_t attachment_idx = attachment->attachment;
2223
if (attachment_idx == VK_ATTACHMENT_UNUSED)
2224
return;
2225
2226
const struct v3dv_framebuffer *framebuffer = state->framebuffer;
2227
assert(attachment_idx < framebuffer->attachment_count);
2228
struct v3dv_image_view *iview = framebuffer->attachments[attachment_idx];
2229
assert(iview->aspects & VK_IMAGE_ASPECT_COLOR_BIT);
2230
2231
*rt_bpp = iview->internal_bpp;
2232
*rt_type = iview->internal_type;
2233
if (vk_format_is_int(iview->vk_format))
2234
*rt_clamp = V3D_RENDER_TARGET_CLAMP_INT;
2235
else if (vk_format_is_srgb(iview->vk_format))
2236
*rt_clamp = V3D_RENDER_TARGET_CLAMP_NORM;
2237
else
2238
*rt_clamp = V3D_RENDER_TARGET_CLAMP_NONE;
2239
}
2240
2241