Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/intel/vulkan/genX_cmd_buffer.c
4547 views
1
/*
2
* Copyright © 2015 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include <assert.h>
25
#include <stdbool.h>
26
27
#include "anv_private.h"
28
#include "anv_measure.h"
29
#include "vk_format.h"
30
#include "vk_util.h"
31
#include "util/fast_idiv_by_const.h"
32
33
#include "common/intel_aux_map.h"
34
#include "common/intel_l3_config.h"
35
#include "genxml/gen_macros.h"
36
#include "genxml/genX_pack.h"
37
#include "genxml/gen_rt_pack.h"
38
39
#include "nir/nir_xfb_info.h"
40
41
/* We reserve :
42
* - GPR 14 for secondary command buffer returns
43
* - GPR 15 for conditional rendering
44
*/
45
#define MI_BUILDER_NUM_ALLOC_GPRS 14
46
#define __gen_get_batch_dwords anv_batch_emit_dwords
47
#define __gen_address_offset anv_address_add
48
#define __gen_get_batch_address(b, a) anv_batch_address(b, a)
49
#include "common/mi_builder.h"
50
51
static void genX(flush_pipeline_select)(struct anv_cmd_buffer *cmd_buffer,
52
uint32_t pipeline);
53
54
static enum anv_pipe_bits
55
convert_pc_to_bits(struct GENX(PIPE_CONTROL) *pc) {
56
enum anv_pipe_bits bits = 0;
57
bits |= (pc->DepthCacheFlushEnable) ? ANV_PIPE_DEPTH_CACHE_FLUSH_BIT : 0;
58
bits |= (pc->DCFlushEnable) ? ANV_PIPE_DATA_CACHE_FLUSH_BIT : 0;
59
#if GFX_VER >= 12
60
bits |= (pc->TileCacheFlushEnable) ? ANV_PIPE_TILE_CACHE_FLUSH_BIT : 0;
61
bits |= (pc->HDCPipelineFlushEnable) ? ANV_PIPE_HDC_PIPELINE_FLUSH_BIT : 0;
62
#endif
63
bits |= (pc->RenderTargetCacheFlushEnable) ? ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT : 0;
64
bits |= (pc->StateCacheInvalidationEnable) ? ANV_PIPE_STATE_CACHE_INVALIDATE_BIT : 0;
65
bits |= (pc->ConstantCacheInvalidationEnable) ? ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT : 0;
66
bits |= (pc->TextureCacheInvalidationEnable) ? ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT : 0;
67
bits |= (pc->InstructionCacheInvalidateEnable) ? ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT : 0;
68
bits |= (pc->StallAtPixelScoreboard) ? ANV_PIPE_STALL_AT_SCOREBOARD_BIT : 0;
69
bits |= (pc->DepthStallEnable) ? ANV_PIPE_DEPTH_STALL_BIT : 0;
70
bits |= (pc->CommandStreamerStallEnable) ? ANV_PIPE_CS_STALL_BIT : 0;
71
return bits;
72
}
73
74
#define anv_debug_dump_pc(pc) \
75
if (unlikely(INTEL_DEBUG & DEBUG_PIPE_CONTROL)) { \
76
fputs("pc: emit PC=( ", stderr); \
77
anv_dump_pipe_bits(convert_pc_to_bits(&(pc))); \
78
fprintf(stderr, ") reason: %s\n", __FUNCTION__); \
79
}
80
81
void
82
genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
83
{
84
struct anv_device *device = cmd_buffer->device;
85
UNUSED const struct intel_device_info *devinfo = &device->info;
86
uint32_t mocs = isl_mocs(&device->isl_dev, 0, false);
87
88
/* If we are emitting a new state base address we probably need to re-emit
89
* binding tables.
90
*/
91
cmd_buffer->state.descriptors_dirty |= ~0;
92
93
/* Emit a render target cache flush.
94
*
95
* This isn't documented anywhere in the PRM. However, it seems to be
96
* necessary prior to changing the surface state base adress. Without
97
* this, we get GPU hangs when using multi-level command buffers which
98
* clear depth, reset state base address, and then go render stuff.
99
*/
100
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
101
#if GFX_VER >= 12
102
pc.HDCPipelineFlushEnable = true;
103
#else
104
pc.DCFlushEnable = true;
105
#endif
106
pc.RenderTargetCacheFlushEnable = true;
107
pc.CommandStreamerStallEnable = true;
108
#if GFX_VER == 12
109
/* Wa_1606662791:
110
*
111
* Software must program PIPE_CONTROL command with "HDC Pipeline
112
* Flush" prior to programming of the below two non-pipeline state :
113
* * STATE_BASE_ADDRESS
114
* * 3DSTATE_BINDING_TABLE_POOL_ALLOC
115
*/
116
if (devinfo->revision == 0 /* A0 */)
117
pc.HDCPipelineFlushEnable = true;
118
#endif
119
anv_debug_dump_pc(pc);
120
}
121
122
#if GFX_VER == 12
123
/* Wa_1607854226:
124
*
125
* Workaround the non pipelined state not applying in MEDIA/GPGPU pipeline
126
* mode by putting the pipeline temporarily in 3D mode.
127
*/
128
uint32_t gfx12_wa_pipeline = cmd_buffer->state.current_pipeline;
129
genX(flush_pipeline_select_3d)(cmd_buffer);
130
#endif
131
132
anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) {
133
sba.GeneralStateBaseAddress = (struct anv_address) { NULL, 0 };
134
sba.GeneralStateMOCS = mocs;
135
sba.GeneralStateBaseAddressModifyEnable = true;
136
137
sba.StatelessDataPortAccessMOCS = mocs;
138
139
sba.SurfaceStateBaseAddress =
140
anv_cmd_buffer_surface_base_address(cmd_buffer);
141
sba.SurfaceStateMOCS = mocs;
142
sba.SurfaceStateBaseAddressModifyEnable = true;
143
144
sba.DynamicStateBaseAddress =
145
(struct anv_address) { device->dynamic_state_pool.block_pool.bo, 0 };
146
sba.DynamicStateMOCS = mocs;
147
sba.DynamicStateBaseAddressModifyEnable = true;
148
149
sba.IndirectObjectBaseAddress = (struct anv_address) { NULL, 0 };
150
sba.IndirectObjectMOCS = mocs;
151
sba.IndirectObjectBaseAddressModifyEnable = true;
152
153
sba.InstructionBaseAddress =
154
(struct anv_address) { device->instruction_state_pool.block_pool.bo, 0 };
155
sba.InstructionMOCS = mocs;
156
sba.InstructionBaseAddressModifyEnable = true;
157
158
# if (GFX_VER >= 8)
159
/* Broadwell requires that we specify a buffer size for a bunch of
160
* these fields. However, since we will be growing the BO's live, we
161
* just set them all to the maximum.
162
*/
163
sba.GeneralStateBufferSize = 0xfffff;
164
sba.IndirectObjectBufferSize = 0xfffff;
165
if (anv_use_softpin(device->physical)) {
166
/* With softpin, we use fixed addresses so we actually know how big
167
* our base addresses are.
168
*/
169
sba.DynamicStateBufferSize = DYNAMIC_STATE_POOL_SIZE / 4096;
170
sba.InstructionBufferSize = INSTRUCTION_STATE_POOL_SIZE / 4096;
171
} else {
172
sba.DynamicStateBufferSize = 0xfffff;
173
sba.InstructionBufferSize = 0xfffff;
174
}
175
sba.GeneralStateBufferSizeModifyEnable = true;
176
sba.IndirectObjectBufferSizeModifyEnable = true;
177
sba.DynamicStateBufferSizeModifyEnable = true;
178
sba.InstructionBuffersizeModifyEnable = true;
179
# else
180
/* On gfx7, we have upper bounds instead. According to the docs,
181
* setting an upper bound of zero means that no bounds checking is
182
* performed so, in theory, we should be able to leave them zero.
183
* However, border color is broken and the GPU bounds-checks anyway.
184
* To avoid this and other potential problems, we may as well set it
185
* for everything.
186
*/
187
sba.GeneralStateAccessUpperBound =
188
(struct anv_address) { .bo = NULL, .offset = 0xfffff000 };
189
sba.GeneralStateAccessUpperBoundModifyEnable = true;
190
sba.DynamicStateAccessUpperBound =
191
(struct anv_address) { .bo = NULL, .offset = 0xfffff000 };
192
sba.DynamicStateAccessUpperBoundModifyEnable = true;
193
sba.InstructionAccessUpperBound =
194
(struct anv_address) { .bo = NULL, .offset = 0xfffff000 };
195
sba.InstructionAccessUpperBoundModifyEnable = true;
196
# endif
197
# if (GFX_VER >= 9)
198
if (anv_use_softpin(device->physical)) {
199
sba.BindlessSurfaceStateBaseAddress = (struct anv_address) {
200
.bo = device->surface_state_pool.block_pool.bo,
201
.offset = 0,
202
};
203
sba.BindlessSurfaceStateSize = (1 << 20) - 1;
204
} else {
205
sba.BindlessSurfaceStateBaseAddress = ANV_NULL_ADDRESS;
206
sba.BindlessSurfaceStateSize = 0;
207
}
208
sba.BindlessSurfaceStateMOCS = mocs;
209
sba.BindlessSurfaceStateBaseAddressModifyEnable = true;
210
# endif
211
# if (GFX_VER >= 10)
212
sba.BindlessSamplerStateBaseAddress = (struct anv_address) { NULL, 0 };
213
sba.BindlessSamplerStateMOCS = mocs;
214
sba.BindlessSamplerStateBaseAddressModifyEnable = true;
215
sba.BindlessSamplerStateBufferSize = 0;
216
# endif
217
}
218
219
#if GFX_VER == 12
220
/* Wa_1607854226:
221
*
222
* Put the pipeline back into its current mode.
223
*/
224
if (gfx12_wa_pipeline != UINT32_MAX)
225
genX(flush_pipeline_select)(cmd_buffer, gfx12_wa_pipeline);
226
#endif
227
228
/* After re-setting the surface state base address, we have to do some
229
* cache flusing so that the sampler engine will pick up the new
230
* SURFACE_STATE objects and binding tables. From the Broadwell PRM,
231
* Shared Function > 3D Sampler > State > State Caching (page 96):
232
*
233
* Coherency with system memory in the state cache, like the texture
234
* cache is handled partially by software. It is expected that the
235
* command stream or shader will issue Cache Flush operation or
236
* Cache_Flush sampler message to ensure that the L1 cache remains
237
* coherent with system memory.
238
*
239
* [...]
240
*
241
* Whenever the value of the Dynamic_State_Base_Addr,
242
* Surface_State_Base_Addr are altered, the L1 state cache must be
243
* invalidated to ensure the new surface or sampler state is fetched
244
* from system memory.
245
*
246
* The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
247
* which, according the PIPE_CONTROL instruction documentation in the
248
* Broadwell PRM:
249
*
250
* Setting this bit is independent of any other bit in this packet.
251
* This bit controls the invalidation of the L1 and L2 state caches
252
* at the top of the pipe i.e. at the parsing time.
253
*
254
* Unfortunately, experimentation seems to indicate that state cache
255
* invalidation through a PIPE_CONTROL does nothing whatsoever in
256
* regards to surface state and binding tables. In stead, it seems that
257
* invalidating the texture cache is what is actually needed.
258
*
259
* XXX: As far as we have been able to determine through
260
* experimentation, shows that flush the texture cache appears to be
261
* sufficient. The theory here is that all of the sampling/rendering
262
* units cache the binding table in the texture cache. However, we have
263
* yet to be able to actually confirm this.
264
*/
265
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
266
pc.TextureCacheInvalidationEnable = true;
267
pc.ConstantCacheInvalidationEnable = true;
268
pc.StateCacheInvalidationEnable = true;
269
anv_debug_dump_pc(pc);
270
}
271
}
272
273
static void
274
add_surface_reloc(struct anv_cmd_buffer *cmd_buffer,
275
struct anv_state state, struct anv_address addr)
276
{
277
VkResult result;
278
279
if (anv_use_softpin(cmd_buffer->device->physical)) {
280
result = anv_reloc_list_add_bo(&cmd_buffer->surface_relocs,
281
&cmd_buffer->pool->alloc,
282
addr.bo);
283
} else {
284
const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
285
result = anv_reloc_list_add(&cmd_buffer->surface_relocs,
286
&cmd_buffer->pool->alloc,
287
state.offset + isl_dev->ss.addr_offset,
288
addr.bo, addr.offset, NULL);
289
}
290
291
if (unlikely(result != VK_SUCCESS))
292
anv_batch_set_error(&cmd_buffer->batch, result);
293
}
294
295
static void
296
add_surface_state_relocs(struct anv_cmd_buffer *cmd_buffer,
297
struct anv_surface_state state)
298
{
299
const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
300
301
assert(!anv_address_is_null(state.address));
302
add_surface_reloc(cmd_buffer, state.state, state.address);
303
304
if (!anv_address_is_null(state.aux_address)) {
305
VkResult result =
306
anv_reloc_list_add(&cmd_buffer->surface_relocs,
307
&cmd_buffer->pool->alloc,
308
state.state.offset + isl_dev->ss.aux_addr_offset,
309
state.aux_address.bo,
310
state.aux_address.offset,
311
NULL);
312
if (result != VK_SUCCESS)
313
anv_batch_set_error(&cmd_buffer->batch, result);
314
}
315
316
if (!anv_address_is_null(state.clear_address)) {
317
VkResult result =
318
anv_reloc_list_add(&cmd_buffer->surface_relocs,
319
&cmd_buffer->pool->alloc,
320
state.state.offset +
321
isl_dev->ss.clear_color_state_offset,
322
state.clear_address.bo,
323
state.clear_address.offset,
324
NULL);
325
if (result != VK_SUCCESS)
326
anv_batch_set_error(&cmd_buffer->batch, result);
327
}
328
}
329
330
static bool
331
isl_color_value_requires_conversion(union isl_color_value color,
332
const struct isl_surf *surf,
333
const struct isl_view *view)
334
{
335
if (surf->format == view->format && isl_swizzle_is_identity(view->swizzle))
336
return false;
337
338
uint32_t surf_pack[4] = { 0, 0, 0, 0 };
339
isl_color_value_pack(&color, surf->format, surf_pack);
340
341
uint32_t view_pack[4] = { 0, 0, 0, 0 };
342
union isl_color_value swiz_color =
343
isl_color_value_swizzle_inv(color, view->swizzle);
344
isl_color_value_pack(&swiz_color, view->format, view_pack);
345
346
return memcmp(surf_pack, view_pack, sizeof(surf_pack)) != 0;
347
}
348
349
static bool
350
anv_can_fast_clear_color_view(struct anv_device * device,
351
struct anv_image_view *iview,
352
VkImageLayout layout,
353
union isl_color_value clear_color,
354
uint32_t num_layers,
355
VkRect2D render_area)
356
{
357
if (iview->planes[0].isl.base_array_layer >=
358
anv_image_aux_layers(iview->image, VK_IMAGE_ASPECT_COLOR_BIT,
359
iview->planes[0].isl.base_level))
360
return false;
361
362
/* Start by getting the fast clear type. We use the first subpass
363
* layout here because we don't want to fast-clear if the first subpass
364
* to use the attachment can't handle fast-clears.
365
*/
366
enum anv_fast_clear_type fast_clear_type =
367
anv_layout_to_fast_clear_type(&device->info, iview->image,
368
VK_IMAGE_ASPECT_COLOR_BIT,
369
layout);
370
switch (fast_clear_type) {
371
case ANV_FAST_CLEAR_NONE:
372
return false;
373
case ANV_FAST_CLEAR_DEFAULT_VALUE:
374
if (!isl_color_value_is_zero(clear_color, iview->planes[0].isl.format))
375
return false;
376
break;
377
case ANV_FAST_CLEAR_ANY:
378
break;
379
}
380
381
/* Potentially, we could do partial fast-clears but doing so has crazy
382
* alignment restrictions. It's easier to just restrict to full size
383
* fast clears for now.
384
*/
385
if (render_area.offset.x != 0 ||
386
render_area.offset.y != 0 ||
387
render_area.extent.width != iview->extent.width ||
388
render_area.extent.height != iview->extent.height)
389
return false;
390
391
/* On Broadwell and earlier, we can only handle 0/1 clear colors */
392
if (GFX_VER <= 8 &&
393
!isl_color_value_is_zero_one(clear_color, iview->planes[0].isl.format))
394
return false;
395
396
/* If the clear color is one that would require non-trivial format
397
* conversion on resolve, we don't bother with the fast clear. This
398
* shouldn't be common as most clear colors are 0/1 and the most common
399
* format re-interpretation is for sRGB.
400
*/
401
if (isl_color_value_requires_conversion(clear_color,
402
&iview->image->planes[0].primary_surface.isl,
403
&iview->planes[0].isl)) {
404
anv_perf_warn(device, &iview->base,
405
"Cannot fast-clear to colors which would require "
406
"format conversion on resolve");
407
return false;
408
}
409
410
/* We only allow fast clears to the first slice of an image (level 0,
411
* layer 0) and only for the entire slice. This guarantees us that, at
412
* any given time, there is only one clear color on any given image at
413
* any given time. At the time of our testing (Jan 17, 2018), there
414
* were no known applications which would benefit from fast-clearing
415
* more than just the first slice.
416
*/
417
if (iview->planes[0].isl.base_level > 0 ||
418
iview->planes[0].isl.base_array_layer > 0) {
419
anv_perf_warn(device, &iview->image->base,
420
"Rendering with multi-lod or multi-layer framebuffer "
421
"with LOAD_OP_LOAD and baseMipLevel > 0 or "
422
"baseArrayLayer > 0. Not fast clearing.");
423
return false;
424
}
425
426
if (num_layers > 1) {
427
anv_perf_warn(device, &iview->image->base,
428
"Rendering to a multi-layer framebuffer with "
429
"LOAD_OP_CLEAR. Only fast-clearing the first slice");
430
}
431
432
return true;
433
}
434
435
static bool
436
anv_can_hiz_clear_ds_view(struct anv_device *device,
437
struct anv_image_view *iview,
438
VkImageLayout layout,
439
VkImageAspectFlags clear_aspects,
440
float depth_clear_value,
441
VkRect2D render_area)
442
{
443
/* We don't do any HiZ or depth fast-clears on gfx7 yet */
444
if (GFX_VER == 7)
445
return false;
446
447
/* If we're just clearing stencil, we can always HiZ clear */
448
if (!(clear_aspects & VK_IMAGE_ASPECT_DEPTH_BIT))
449
return true;
450
451
/* We must have depth in order to have HiZ */
452
if (!(iview->image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT))
453
return false;
454
455
const enum isl_aux_usage clear_aux_usage =
456
anv_layout_to_aux_usage(&device->info, iview->image,
457
VK_IMAGE_ASPECT_DEPTH_BIT,
458
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
459
layout);
460
if (!blorp_can_hiz_clear_depth(&device->info,
461
&iview->image->planes[0].primary_surface.isl,
462
clear_aux_usage,
463
iview->planes[0].isl.base_level,
464
iview->planes[0].isl.base_array_layer,
465
render_area.offset.x,
466
render_area.offset.y,
467
render_area.offset.x +
468
render_area.extent.width,
469
render_area.offset.y +
470
render_area.extent.height))
471
return false;
472
473
if (depth_clear_value != ANV_HZ_FC_VAL)
474
return false;
475
476
/* Only gfx9+ supports returning ANV_HZ_FC_VAL when sampling a fast-cleared
477
* portion of a HiZ buffer. Testing has revealed that Gfx8 only supports
478
* returning 0.0f. Gens prior to gfx8 do not support this feature at all.
479
*/
480
if (GFX_VER == 8 && anv_can_sample_with_hiz(&device->info, iview->image))
481
return false;
482
483
/* If we got here, then we can fast clear */
484
return true;
485
}
486
487
#define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
488
489
#if GFX_VER == 12
490
static void
491
anv_image_init_aux_tt(struct anv_cmd_buffer *cmd_buffer,
492
const struct anv_image *image,
493
VkImageAspectFlagBits aspect,
494
uint32_t base_level, uint32_t level_count,
495
uint32_t base_layer, uint32_t layer_count)
496
{
497
uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
498
499
const struct anv_surface *surface = &image->planes[plane].primary_surface;
500
uint64_t base_address =
501
anv_address_physical(anv_image_address(image, &surface->memory_range));
502
503
const struct isl_surf *isl_surf = &image->planes[plane].primary_surface.isl;
504
uint64_t format_bits = intel_aux_map_format_bits_for_isl_surf(isl_surf);
505
506
/* We're about to live-update the AUX-TT. We really don't want anyone else
507
* trying to read it while we're doing this. We could probably get away
508
* with not having this stall in some cases if we were really careful but
509
* it's better to play it safe. Full stall the GPU.
510
*/
511
anv_add_pending_pipe_bits(cmd_buffer,
512
ANV_PIPE_END_OF_PIPE_SYNC_BIT,
513
"before update AUX-TT");
514
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
515
516
struct mi_builder b;
517
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
518
519
for (uint32_t a = 0; a < layer_count; a++) {
520
const uint32_t layer = base_layer + a;
521
522
uint64_t start_offset_B = UINT64_MAX, end_offset_B = 0;
523
for (uint32_t l = 0; l < level_count; l++) {
524
const uint32_t level = base_level + l;
525
526
uint32_t logical_array_layer, logical_z_offset_px;
527
if (image->type == VK_IMAGE_TYPE_3D) {
528
logical_array_layer = 0;
529
530
/* If the given miplevel does not have this layer, then any higher
531
* miplevels won't either because miplevels only get smaller the
532
* higher the LOD.
533
*/
534
assert(layer < image->extent.depth);
535
if (layer >= anv_minify(image->extent.depth, level))
536
break;
537
logical_z_offset_px = layer;
538
} else {
539
assert(layer < image->array_size);
540
logical_array_layer = layer;
541
logical_z_offset_px = 0;
542
}
543
544
uint32_t slice_start_offset_B, slice_end_offset_B;
545
isl_surf_get_image_range_B_tile(isl_surf, level,
546
logical_array_layer,
547
logical_z_offset_px,
548
&slice_start_offset_B,
549
&slice_end_offset_B);
550
551
start_offset_B = MIN2(start_offset_B, slice_start_offset_B);
552
end_offset_B = MAX2(end_offset_B, slice_end_offset_B);
553
}
554
555
/* Aux operates 64K at a time */
556
start_offset_B = align_down_u64(start_offset_B, 64 * 1024);
557
end_offset_B = align_u64(end_offset_B, 64 * 1024);
558
559
for (uint64_t offset = start_offset_B;
560
offset < end_offset_B; offset += 64 * 1024) {
561
uint64_t address = base_address + offset;
562
563
uint64_t aux_entry_addr64, *aux_entry_map;
564
aux_entry_map = intel_aux_map_get_entry(cmd_buffer->device->aux_map_ctx,
565
address, &aux_entry_addr64);
566
567
assert(anv_use_softpin(cmd_buffer->device->physical));
568
struct anv_address aux_entry_address = {
569
.bo = NULL,
570
.offset = aux_entry_addr64,
571
};
572
573
const uint64_t old_aux_entry = READ_ONCE(*aux_entry_map);
574
uint64_t new_aux_entry =
575
(old_aux_entry & INTEL_AUX_MAP_ADDRESS_MASK) | format_bits;
576
577
if (isl_aux_usage_has_ccs(image->planes[plane].aux_usage))
578
new_aux_entry |= INTEL_AUX_MAP_ENTRY_VALID_BIT;
579
580
mi_store(&b, mi_mem64(aux_entry_address), mi_imm(new_aux_entry));
581
}
582
}
583
584
anv_add_pending_pipe_bits(cmd_buffer,
585
ANV_PIPE_AUX_TABLE_INVALIDATE_BIT,
586
"after update AUX-TT");
587
}
588
#endif /* GFX_VER == 12 */
589
590
/* Transitions a HiZ-enabled depth buffer from one layout to another. Unless
591
* the initial layout is undefined, the HiZ buffer and depth buffer will
592
* represent the same data at the end of this operation.
593
*/
594
static void
595
transition_depth_buffer(struct anv_cmd_buffer *cmd_buffer,
596
const struct anv_image *image,
597
uint32_t base_layer, uint32_t layer_count,
598
VkImageLayout initial_layout,
599
VkImageLayout final_layout,
600
bool will_full_fast_clear)
601
{
602
uint32_t depth_plane =
603
anv_image_aspect_to_plane(image->aspects, VK_IMAGE_ASPECT_DEPTH_BIT);
604
if (image->planes[depth_plane].aux_usage == ISL_AUX_USAGE_NONE)
605
return;
606
607
#if GFX_VER == 12
608
if ((initial_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
609
initial_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) &&
610
cmd_buffer->device->physical->has_implicit_ccs &&
611
cmd_buffer->device->info.has_aux_map) {
612
anv_image_init_aux_tt(cmd_buffer, image, VK_IMAGE_ASPECT_DEPTH_BIT,
613
0, 1, base_layer, layer_count);
614
}
615
#endif
616
617
/* If will_full_fast_clear is set, the caller promises to fast-clear the
618
* largest portion of the specified range as it can. For depth images,
619
* that means the entire image because we don't support multi-LOD HiZ.
620
*/
621
assert(image->planes[0].primary_surface.isl.levels == 1);
622
if (will_full_fast_clear)
623
return;
624
625
const enum isl_aux_state initial_state =
626
anv_layout_to_aux_state(&cmd_buffer->device->info, image,
627
VK_IMAGE_ASPECT_DEPTH_BIT,
628
initial_layout);
629
const enum isl_aux_state final_state =
630
anv_layout_to_aux_state(&cmd_buffer->device->info, image,
631
VK_IMAGE_ASPECT_DEPTH_BIT,
632
final_layout);
633
634
const bool initial_depth_valid =
635
isl_aux_state_has_valid_primary(initial_state);
636
const bool initial_hiz_valid =
637
isl_aux_state_has_valid_aux(initial_state);
638
const bool final_needs_depth =
639
isl_aux_state_has_valid_primary(final_state);
640
const bool final_needs_hiz =
641
isl_aux_state_has_valid_aux(final_state);
642
643
/* Getting into the pass-through state for Depth is tricky and involves
644
* both a resolve and an ambiguate. We don't handle that state right now
645
* as anv_layout_to_aux_state never returns it. Resolve/ambiguate will
646
* trigger depth clears which require tile cache flushes.
647
*/
648
assert(final_state != ISL_AUX_STATE_PASS_THROUGH);
649
650
if (final_needs_depth && !initial_depth_valid) {
651
assert(initial_hiz_valid);
652
anv_image_hiz_op(cmd_buffer, image, VK_IMAGE_ASPECT_DEPTH_BIT,
653
0, base_layer, layer_count, ISL_AUX_OP_FULL_RESOLVE);
654
anv_add_pending_pipe_bits(cmd_buffer,
655
ANV_PIPE_TILE_CACHE_FLUSH_BIT,
656
"after depth resolve");
657
} else if (final_needs_hiz && !initial_hiz_valid) {
658
assert(initial_depth_valid);
659
anv_image_hiz_op(cmd_buffer, image, VK_IMAGE_ASPECT_DEPTH_BIT,
660
0, base_layer, layer_count, ISL_AUX_OP_AMBIGUATE);
661
anv_add_pending_pipe_bits(cmd_buffer,
662
ANV_PIPE_TILE_CACHE_FLUSH_BIT,
663
"after hiz resolve");
664
}
665
}
666
667
static inline bool
668
vk_image_layout_stencil_write_optimal(VkImageLayout layout)
669
{
670
return layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
671
layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
672
layout == VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR;
673
}
674
675
/* Transitions a HiZ-enabled depth buffer from one layout to another. Unless
676
* the initial layout is undefined, the HiZ buffer and depth buffer will
677
* represent the same data at the end of this operation.
678
*/
679
static void
680
transition_stencil_buffer(struct anv_cmd_buffer *cmd_buffer,
681
const struct anv_image *image,
682
uint32_t base_level, uint32_t level_count,
683
uint32_t base_layer, uint32_t layer_count,
684
VkImageLayout initial_layout,
685
VkImageLayout final_layout,
686
bool will_full_fast_clear)
687
{
688
#if GFX_VER == 7
689
uint32_t plane = anv_image_aspect_to_plane(image->aspects,
690
VK_IMAGE_ASPECT_STENCIL_BIT);
691
692
/* On gfx7, we have to store a texturable version of the stencil buffer in
693
* a shadow whenever VK_IMAGE_USAGE_SAMPLED_BIT is set and copy back and
694
* forth at strategic points. Stencil writes are only allowed in following
695
* layouts:
696
*
697
* - VK_IMAGE_LAYOUT_GENERAL
698
* - VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
699
* - VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
700
* - VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL
701
* - VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR
702
*
703
* For general, we have no nice opportunity to transition so we do the copy
704
* to the shadow unconditionally at the end of the subpass. For transfer
705
* destinations, we can update it as part of the transfer op. For the other
706
* layouts, we delay the copy until a transition into some other layout.
707
*/
708
if (anv_surface_is_valid(&image->planes[plane].shadow_surface) &&
709
vk_image_layout_stencil_write_optimal(initial_layout) &&
710
!vk_image_layout_stencil_write_optimal(final_layout)) {
711
anv_image_copy_to_shadow(cmd_buffer, image,
712
VK_IMAGE_ASPECT_STENCIL_BIT,
713
base_level, level_count,
714
base_layer, layer_count);
715
}
716
#elif GFX_VER == 12
717
uint32_t plane = anv_image_aspect_to_plane(image->aspects,
718
VK_IMAGE_ASPECT_STENCIL_BIT);
719
if (image->planes[plane].aux_usage == ISL_AUX_USAGE_NONE)
720
return;
721
722
if ((initial_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
723
initial_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) &&
724
cmd_buffer->device->physical->has_implicit_ccs &&
725
cmd_buffer->device->info.has_aux_map) {
726
anv_image_init_aux_tt(cmd_buffer, image, VK_IMAGE_ASPECT_STENCIL_BIT,
727
base_level, level_count, base_layer, layer_count);
728
729
/* If will_full_fast_clear is set, the caller promises to fast-clear the
730
* largest portion of the specified range as it can.
731
*/
732
if (will_full_fast_clear)
733
return;
734
735
for (uint32_t l = 0; l < level_count; l++) {
736
const uint32_t level = base_level + l;
737
const VkRect2D clear_rect = {
738
.offset.x = 0,
739
.offset.y = 0,
740
.extent.width = anv_minify(image->extent.width, level),
741
.extent.height = anv_minify(image->extent.height, level),
742
};
743
744
uint32_t aux_layers =
745
anv_image_aux_layers(image, VK_IMAGE_ASPECT_STENCIL_BIT, level);
746
uint32_t level_layer_count =
747
MIN2(layer_count, aux_layers - base_layer);
748
749
/* From Bspec's 3DSTATE_STENCIL_BUFFER_BODY > Stencil Compression
750
* Enable:
751
*
752
* "When enabled, Stencil Buffer needs to be initialized via
753
* stencil clear (HZ_OP) before any renderpass."
754
*/
755
anv_image_hiz_clear(cmd_buffer, image, VK_IMAGE_ASPECT_STENCIL_BIT,
756
level, base_layer, level_layer_count,
757
clear_rect, 0 /* Stencil clear value */);
758
}
759
}
760
#endif
761
}
762
763
#define MI_PREDICATE_SRC0 0x2400
764
#define MI_PREDICATE_SRC1 0x2408
765
#define MI_PREDICATE_RESULT 0x2418
766
767
static void
768
set_image_compressed_bit(struct anv_cmd_buffer *cmd_buffer,
769
const struct anv_image *image,
770
VkImageAspectFlagBits aspect,
771
uint32_t level,
772
uint32_t base_layer, uint32_t layer_count,
773
bool compressed)
774
{
775
uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
776
777
/* We only have compression tracking for CCS_E */
778
if (image->planes[plane].aux_usage != ISL_AUX_USAGE_CCS_E)
779
return;
780
781
for (uint32_t a = 0; a < layer_count; a++) {
782
uint32_t layer = base_layer + a;
783
anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdi) {
784
sdi.Address = anv_image_get_compression_state_addr(cmd_buffer->device,
785
image, aspect,
786
level, layer);
787
sdi.ImmediateData = compressed ? UINT32_MAX : 0;
788
}
789
}
790
}
791
792
static void
793
set_image_fast_clear_state(struct anv_cmd_buffer *cmd_buffer,
794
const struct anv_image *image,
795
VkImageAspectFlagBits aspect,
796
enum anv_fast_clear_type fast_clear)
797
{
798
anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdi) {
799
sdi.Address = anv_image_get_fast_clear_type_addr(cmd_buffer->device,
800
image, aspect);
801
sdi.ImmediateData = fast_clear;
802
}
803
804
/* Whenever we have fast-clear, we consider that slice to be compressed.
805
* This makes building predicates much easier.
806
*/
807
if (fast_clear != ANV_FAST_CLEAR_NONE)
808
set_image_compressed_bit(cmd_buffer, image, aspect, 0, 0, 1, true);
809
}
810
811
/* This is only really practical on haswell and above because it requires
812
* MI math in order to get it correct.
813
*/
814
#if GFX_VERx10 >= 75
815
static void
816
anv_cmd_compute_resolve_predicate(struct anv_cmd_buffer *cmd_buffer,
817
const struct anv_image *image,
818
VkImageAspectFlagBits aspect,
819
uint32_t level, uint32_t array_layer,
820
enum isl_aux_op resolve_op,
821
enum anv_fast_clear_type fast_clear_supported)
822
{
823
struct mi_builder b;
824
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
825
826
const struct mi_value fast_clear_type =
827
mi_mem32(anv_image_get_fast_clear_type_addr(cmd_buffer->device,
828
image, aspect));
829
830
if (resolve_op == ISL_AUX_OP_FULL_RESOLVE) {
831
/* In this case, we're doing a full resolve which means we want the
832
* resolve to happen if any compression (including fast-clears) is
833
* present.
834
*
835
* In order to simplify the logic a bit, we make the assumption that,
836
* if the first slice has been fast-cleared, it is also marked as
837
* compressed. See also set_image_fast_clear_state.
838
*/
839
const struct mi_value compression_state =
840
mi_mem32(anv_image_get_compression_state_addr(cmd_buffer->device,
841
image, aspect,
842
level, array_layer));
843
mi_store(&b, mi_reg64(MI_PREDICATE_SRC0), compression_state);
844
mi_store(&b, compression_state, mi_imm(0));
845
846
if (level == 0 && array_layer == 0) {
847
/* If the predicate is true, we want to write 0 to the fast clear type
848
* and, if it's false, leave it alone. We can do this by writing
849
*
850
* clear_type = clear_type & ~predicate;
851
*/
852
struct mi_value new_fast_clear_type =
853
mi_iand(&b, fast_clear_type,
854
mi_inot(&b, mi_reg64(MI_PREDICATE_SRC0)));
855
mi_store(&b, fast_clear_type, new_fast_clear_type);
856
}
857
} else if (level == 0 && array_layer == 0) {
858
/* In this case, we are doing a partial resolve to get rid of fast-clear
859
* colors. We don't care about the compression state but we do care
860
* about how much fast clear is allowed by the final layout.
861
*/
862
assert(resolve_op == ISL_AUX_OP_PARTIAL_RESOLVE);
863
assert(fast_clear_supported < ANV_FAST_CLEAR_ANY);
864
865
/* We need to compute (fast_clear_supported < image->fast_clear) */
866
struct mi_value pred =
867
mi_ult(&b, mi_imm(fast_clear_supported), fast_clear_type);
868
mi_store(&b, mi_reg64(MI_PREDICATE_SRC0), mi_value_ref(&b, pred));
869
870
/* If the predicate is true, we want to write 0 to the fast clear type
871
* and, if it's false, leave it alone. We can do this by writing
872
*
873
* clear_type = clear_type & ~predicate;
874
*/
875
struct mi_value new_fast_clear_type =
876
mi_iand(&b, fast_clear_type, mi_inot(&b, pred));
877
mi_store(&b, fast_clear_type, new_fast_clear_type);
878
} else {
879
/* In this case, we're trying to do a partial resolve on a slice that
880
* doesn't have clear color. There's nothing to do.
881
*/
882
assert(resolve_op == ISL_AUX_OP_PARTIAL_RESOLVE);
883
return;
884
}
885
886
/* Set src1 to 0 and use a != condition */
887
mi_store(&b, mi_reg64(MI_PREDICATE_SRC1), mi_imm(0));
888
889
anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) {
890
mip.LoadOperation = LOAD_LOADINV;
891
mip.CombineOperation = COMBINE_SET;
892
mip.CompareOperation = COMPARE_SRCS_EQUAL;
893
}
894
}
895
#endif /* GFX_VERx10 >= 75 */
896
897
#if GFX_VER <= 8
898
static void
899
anv_cmd_simple_resolve_predicate(struct anv_cmd_buffer *cmd_buffer,
900
const struct anv_image *image,
901
VkImageAspectFlagBits aspect,
902
uint32_t level, uint32_t array_layer,
903
enum isl_aux_op resolve_op,
904
enum anv_fast_clear_type fast_clear_supported)
905
{
906
struct mi_builder b;
907
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
908
909
struct mi_value fast_clear_type_mem =
910
mi_mem32(anv_image_get_fast_clear_type_addr(cmd_buffer->device,
911
image, aspect));
912
913
/* This only works for partial resolves and only when the clear color is
914
* all or nothing. On the upside, this emits less command streamer code
915
* and works on Ivybridge and Bay Trail.
916
*/
917
assert(resolve_op == ISL_AUX_OP_PARTIAL_RESOLVE);
918
assert(fast_clear_supported != ANV_FAST_CLEAR_ANY);
919
920
/* We don't support fast clears on anything other than the first slice. */
921
if (level > 0 || array_layer > 0)
922
return;
923
924
/* On gfx8, we don't have a concept of default clear colors because we
925
* can't sample from CCS surfaces. It's enough to just load the fast clear
926
* state into the predicate register.
927
*/
928
mi_store(&b, mi_reg64(MI_PREDICATE_SRC0), fast_clear_type_mem);
929
mi_store(&b, mi_reg64(MI_PREDICATE_SRC1), mi_imm(0));
930
mi_store(&b, fast_clear_type_mem, mi_imm(0));
931
932
anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) {
933
mip.LoadOperation = LOAD_LOADINV;
934
mip.CombineOperation = COMBINE_SET;
935
mip.CompareOperation = COMPARE_SRCS_EQUAL;
936
}
937
}
938
#endif /* GFX_VER <= 8 */
939
940
static void
941
anv_cmd_predicated_ccs_resolve(struct anv_cmd_buffer *cmd_buffer,
942
const struct anv_image *image,
943
enum isl_format format,
944
struct isl_swizzle swizzle,
945
VkImageAspectFlagBits aspect,
946
uint32_t level, uint32_t array_layer,
947
enum isl_aux_op resolve_op,
948
enum anv_fast_clear_type fast_clear_supported)
949
{
950
const uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
951
952
#if GFX_VER >= 9
953
anv_cmd_compute_resolve_predicate(cmd_buffer, image,
954
aspect, level, array_layer,
955
resolve_op, fast_clear_supported);
956
#else /* GFX_VER <= 8 */
957
anv_cmd_simple_resolve_predicate(cmd_buffer, image,
958
aspect, level, array_layer,
959
resolve_op, fast_clear_supported);
960
#endif
961
962
/* CCS_D only supports full resolves and BLORP will assert on us if we try
963
* to do a partial resolve on a CCS_D surface.
964
*/
965
if (resolve_op == ISL_AUX_OP_PARTIAL_RESOLVE &&
966
image->planes[plane].aux_usage == ISL_AUX_USAGE_CCS_D)
967
resolve_op = ISL_AUX_OP_FULL_RESOLVE;
968
969
anv_image_ccs_op(cmd_buffer, image, format, swizzle, aspect,
970
level, array_layer, 1, resolve_op, NULL, true);
971
}
972
973
static void
974
anv_cmd_predicated_mcs_resolve(struct anv_cmd_buffer *cmd_buffer,
975
const struct anv_image *image,
976
enum isl_format format,
977
struct isl_swizzle swizzle,
978
VkImageAspectFlagBits aspect,
979
uint32_t array_layer,
980
enum isl_aux_op resolve_op,
981
enum anv_fast_clear_type fast_clear_supported)
982
{
983
assert(aspect == VK_IMAGE_ASPECT_COLOR_BIT);
984
assert(resolve_op == ISL_AUX_OP_PARTIAL_RESOLVE);
985
986
#if GFX_VERx10 >= 75
987
anv_cmd_compute_resolve_predicate(cmd_buffer, image,
988
aspect, 0, array_layer,
989
resolve_op, fast_clear_supported);
990
991
anv_image_mcs_op(cmd_buffer, image, format, swizzle, aspect,
992
array_layer, 1, resolve_op, NULL, true);
993
#else
994
unreachable("MCS resolves are unsupported on Ivybridge and Bay Trail");
995
#endif
996
}
997
998
void
999
genX(cmd_buffer_mark_image_written)(struct anv_cmd_buffer *cmd_buffer,
1000
const struct anv_image *image,
1001
VkImageAspectFlagBits aspect,
1002
enum isl_aux_usage aux_usage,
1003
uint32_t level,
1004
uint32_t base_layer,
1005
uint32_t layer_count)
1006
{
1007
/* The aspect must be exactly one of the image aspects. */
1008
assert(util_bitcount(aspect) == 1 && (aspect & image->aspects));
1009
1010
/* The only compression types with more than just fast-clears are MCS,
1011
* CCS_E, and HiZ. With HiZ we just trust the layout and don't actually
1012
* track the current fast-clear and compression state. This leaves us
1013
* with just MCS and CCS_E.
1014
*/
1015
if (aux_usage != ISL_AUX_USAGE_CCS_E &&
1016
aux_usage != ISL_AUX_USAGE_MCS)
1017
return;
1018
1019
set_image_compressed_bit(cmd_buffer, image, aspect,
1020
level, base_layer, layer_count, true);
1021
}
1022
1023
static void
1024
init_fast_clear_color(struct anv_cmd_buffer *cmd_buffer,
1025
const struct anv_image *image,
1026
VkImageAspectFlagBits aspect)
1027
{
1028
assert(cmd_buffer && image);
1029
assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
1030
1031
set_image_fast_clear_state(cmd_buffer, image, aspect,
1032
ANV_FAST_CLEAR_NONE);
1033
1034
/* Initialize the struct fields that are accessed for fast-clears so that
1035
* the HW restrictions on the field values are satisfied.
1036
*/
1037
struct anv_address addr =
1038
anv_image_get_clear_color_addr(cmd_buffer->device, image, aspect);
1039
1040
if (GFX_VER >= 9) {
1041
const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
1042
const unsigned num_dwords = GFX_VER >= 10 ?
1043
isl_dev->ss.clear_color_state_size / 4 :
1044
isl_dev->ss.clear_value_size / 4;
1045
for (unsigned i = 0; i < num_dwords; i++) {
1046
anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdi) {
1047
sdi.Address = addr;
1048
sdi.Address.offset += i * 4;
1049
sdi.ImmediateData = 0;
1050
}
1051
}
1052
} else {
1053
anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdi) {
1054
sdi.Address = addr;
1055
if (GFX_VERx10 >= 75) {
1056
/* Pre-SKL, the dword containing the clear values also contains
1057
* other fields, so we need to initialize those fields to match the
1058
* values that would be in a color attachment.
1059
*/
1060
sdi.ImmediateData = ISL_CHANNEL_SELECT_RED << 25 |
1061
ISL_CHANNEL_SELECT_GREEN << 22 |
1062
ISL_CHANNEL_SELECT_BLUE << 19 |
1063
ISL_CHANNEL_SELECT_ALPHA << 16;
1064
} else if (GFX_VER == 7) {
1065
/* On IVB, the dword containing the clear values also contains
1066
* other fields that must be zero or can be zero.
1067
*/
1068
sdi.ImmediateData = 0;
1069
}
1070
}
1071
}
1072
}
1073
1074
/* Copy the fast-clear value dword(s) between a surface state object and an
1075
* image's fast clear state buffer.
1076
*/
1077
static void
1078
genX(copy_fast_clear_dwords)(struct anv_cmd_buffer *cmd_buffer,
1079
struct anv_state surface_state,
1080
const struct anv_image *image,
1081
VkImageAspectFlagBits aspect,
1082
bool copy_from_surface_state)
1083
{
1084
assert(cmd_buffer && image);
1085
assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
1086
1087
struct anv_address ss_clear_addr = {
1088
.bo = cmd_buffer->device->surface_state_pool.block_pool.bo,
1089
.offset = surface_state.offset +
1090
cmd_buffer->device->isl_dev.ss.clear_value_offset,
1091
};
1092
const struct anv_address entry_addr =
1093
anv_image_get_clear_color_addr(cmd_buffer->device, image, aspect);
1094
unsigned copy_size = cmd_buffer->device->isl_dev.ss.clear_value_size;
1095
1096
#if GFX_VER == 7
1097
/* On gfx7, the combination of commands used here(MI_LOAD_REGISTER_MEM
1098
* and MI_STORE_REGISTER_MEM) can cause GPU hangs if any rendering is
1099
* in-flight when they are issued even if the memory touched is not
1100
* currently active for rendering. The weird bit is that it is not the
1101
* MI_LOAD/STORE_REGISTER_MEM commands which hang but rather the in-flight
1102
* rendering hangs such that the next stalling command after the
1103
* MI_LOAD/STORE_REGISTER_MEM commands will catch the hang.
1104
*
1105
* It is unclear exactly why this hang occurs. Both MI commands come with
1106
* warnings about the 3D pipeline but that doesn't seem to fully explain
1107
* it. My (Jason's) best theory is that it has something to do with the
1108
* fact that we're using a GPU state register as our temporary and that
1109
* something with reading/writing it is causing problems.
1110
*
1111
* In order to work around this issue, we emit a PIPE_CONTROL with the
1112
* command streamer stall bit set.
1113
*/
1114
anv_add_pending_pipe_bits(cmd_buffer,
1115
ANV_PIPE_CS_STALL_BIT,
1116
"after copy_fast_clear_dwords. Avoid potential hang");
1117
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1118
#endif
1119
1120
struct mi_builder b;
1121
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
1122
1123
if (copy_from_surface_state) {
1124
mi_memcpy(&b, entry_addr, ss_clear_addr, copy_size);
1125
} else {
1126
mi_memcpy(&b, ss_clear_addr, entry_addr, copy_size);
1127
1128
/* Updating a surface state object may require that the state cache be
1129
* invalidated. From the SKL PRM, Shared Functions -> State -> State
1130
* Caching:
1131
*
1132
* Whenever the RENDER_SURFACE_STATE object in memory pointed to by
1133
* the Binding Table Pointer (BTP) and Binding Table Index (BTI) is
1134
* modified [...], the L1 state cache must be invalidated to ensure
1135
* the new surface or sampler state is fetched from system memory.
1136
*
1137
* In testing, SKL doesn't actually seem to need this, but HSW does.
1138
*/
1139
anv_add_pending_pipe_bits(cmd_buffer,
1140
ANV_PIPE_STATE_CACHE_INVALIDATE_BIT,
1141
"after copy_fast_clear_dwords surface state update");
1142
}
1143
}
1144
1145
/**
1146
* @brief Transitions a color buffer from one layout to another.
1147
*
1148
* See section 6.1.1. Image Layout Transitions of the Vulkan 1.0.50 spec for
1149
* more information.
1150
*
1151
* @param level_count VK_REMAINING_MIP_LEVELS isn't supported.
1152
* @param layer_count VK_REMAINING_ARRAY_LAYERS isn't supported. For 3D images,
1153
* this represents the maximum layers to transition at each
1154
* specified miplevel.
1155
*/
1156
static void
1157
transition_color_buffer(struct anv_cmd_buffer *cmd_buffer,
1158
const struct anv_image *image,
1159
VkImageAspectFlagBits aspect,
1160
const uint32_t base_level, uint32_t level_count,
1161
uint32_t base_layer, uint32_t layer_count,
1162
VkImageLayout initial_layout,
1163
VkImageLayout final_layout,
1164
uint64_t src_queue_family,
1165
uint64_t dst_queue_family,
1166
bool will_full_fast_clear)
1167
{
1168
struct anv_device *device = cmd_buffer->device;
1169
const struct intel_device_info *devinfo = &device->info;
1170
/* Validate the inputs. */
1171
assert(cmd_buffer);
1172
assert(image && image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
1173
/* These values aren't supported for simplicity's sake. */
1174
assert(level_count != VK_REMAINING_MIP_LEVELS &&
1175
layer_count != VK_REMAINING_ARRAY_LAYERS);
1176
/* Ensure the subresource range is valid. */
1177
UNUSED uint64_t last_level_num = base_level + level_count;
1178
const uint32_t max_depth = anv_minify(image->extent.depth, base_level);
1179
UNUSED const uint32_t image_layers = MAX2(image->array_size, max_depth);
1180
assert((uint64_t)base_layer + layer_count <= image_layers);
1181
assert(last_level_num <= image->levels);
1182
/* The spec disallows these final layouts. */
1183
assert(final_layout != VK_IMAGE_LAYOUT_UNDEFINED &&
1184
final_layout != VK_IMAGE_LAYOUT_PREINITIALIZED);
1185
const struct isl_drm_modifier_info *isl_mod_info =
1186
image->tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT
1187
? isl_drm_modifier_get_info(image->drm_format_mod)
1188
: NULL;
1189
1190
const bool src_queue_external =
1191
src_queue_family == VK_QUEUE_FAMILY_FOREIGN_EXT ||
1192
src_queue_family == VK_QUEUE_FAMILY_EXTERNAL;
1193
1194
const bool dst_queue_external =
1195
dst_queue_family == VK_QUEUE_FAMILY_FOREIGN_EXT ||
1196
dst_queue_family == VK_QUEUE_FAMILY_EXTERNAL;
1197
1198
/* Simultaneous acquire and release on external queues is illegal. */
1199
assert(!src_queue_external || !dst_queue_external);
1200
1201
/* Ownership transition on an external queue requires special action if the
1202
* image has a DRM format modifier because we store image data in
1203
* a driver-private bo which is inaccessible to the external queue.
1204
*/
1205
const bool mod_acquire =
1206
src_queue_external &&
1207
image->tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
1208
1209
const bool mod_release =
1210
dst_queue_external &&
1211
image->tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
1212
1213
if (initial_layout == final_layout &&
1214
!mod_acquire && !mod_release) {
1215
/* No work is needed. */
1216
return;
1217
}
1218
1219
uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
1220
1221
if (anv_surface_is_valid(&image->planes[plane].shadow_surface) &&
1222
final_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
1223
/* This surface is a linear compressed image with a tiled shadow surface
1224
* for texturing. The client is about to use it in READ_ONLY_OPTIMAL so
1225
* we need to ensure the shadow copy is up-to-date.
1226
*/
1227
assert(image->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT);
1228
assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1229
assert(image->planes[plane].primary_surface.isl.tiling == ISL_TILING_LINEAR);
1230
assert(image->planes[plane].shadow_surface.isl.tiling != ISL_TILING_LINEAR);
1231
assert(isl_format_is_compressed(image->planes[plane].primary_surface.isl.format));
1232
assert(plane == 0);
1233
anv_image_copy_to_shadow(cmd_buffer, image,
1234
VK_IMAGE_ASPECT_COLOR_BIT,
1235
base_level, level_count,
1236
base_layer, layer_count);
1237
}
1238
1239
if (base_layer >= anv_image_aux_layers(image, aspect, base_level))
1240
return;
1241
1242
assert(image->planes[plane].primary_surface.isl.tiling != ISL_TILING_LINEAR);
1243
1244
/* The following layouts are equivalent for non-linear images. */
1245
const bool initial_layout_undefined =
1246
initial_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
1247
initial_layout == VK_IMAGE_LAYOUT_PREINITIALIZED;
1248
1249
bool must_init_fast_clear_state = false;
1250
bool must_init_aux_surface = false;
1251
1252
if (initial_layout_undefined) {
1253
/* The subresource may have been aliased and populated with arbitrary
1254
* data.
1255
*/
1256
must_init_fast_clear_state = true;
1257
must_init_aux_surface = true;
1258
} else if (mod_acquire) {
1259
/* The fast clear state lives in a driver-private bo, and therefore the
1260
* external/foreign queue is unaware of it.
1261
*
1262
* If this is the first time we are accessing the image, then the fast
1263
* clear state is uninitialized.
1264
*
1265
* If this is NOT the first time we are accessing the image, then the fast
1266
* clear state may still be valid and correct due to the resolve during
1267
* our most recent ownership release. However, we do not track the aux
1268
* state with MI stores, and therefore must assume the worst-case: that
1269
* this is the first time we are accessing the image.
1270
*/
1271
assert(image->planes[plane].fast_clear_memory_range.binding ==
1272
ANV_IMAGE_MEMORY_BINDING_PRIVATE);
1273
must_init_fast_clear_state = true;
1274
1275
if (image->planes[plane].aux_surface.memory_range.binding ==
1276
ANV_IMAGE_MEMORY_BINDING_PRIVATE) {
1277
assert(isl_mod_info->aux_usage == ISL_AUX_USAGE_NONE);
1278
1279
/* The aux surface, like the fast clear state, lives in
1280
* a driver-private bo. We must initialize the aux surface for the
1281
* same reasons we must initialize the fast clear state.
1282
*/
1283
must_init_aux_surface = true;
1284
} else {
1285
assert(isl_mod_info->aux_usage != ISL_AUX_USAGE_NONE);
1286
1287
/* The aux surface, unlike the fast clear state, lives in
1288
* application-visible VkDeviceMemory and is shared with the
1289
* external/foreign queue. Therefore, when we acquire ownership of the
1290
* image with a defined VkImageLayout, the aux surface is valid and has
1291
* the aux state required by the modifier.
1292
*/
1293
must_init_aux_surface = false;
1294
}
1295
}
1296
1297
#if GFX_VER == 12
1298
/* We do not yet support modifiers with aux on gen12. */
1299
assert(image->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT);
1300
1301
if (initial_layout_undefined) {
1302
if (device->physical->has_implicit_ccs && devinfo->has_aux_map) {
1303
anv_image_init_aux_tt(cmd_buffer, image, aspect,
1304
base_level, level_count,
1305
base_layer, layer_count);
1306
}
1307
}
1308
#else
1309
assert(!(device->physical->has_implicit_ccs && devinfo->has_aux_map));
1310
#endif
1311
1312
if (must_init_fast_clear_state) {
1313
if (base_level == 0 && base_layer == 0)
1314
init_fast_clear_color(cmd_buffer, image, aspect);
1315
}
1316
1317
if (must_init_aux_surface) {
1318
assert(must_init_fast_clear_state);
1319
1320
/* Initialize the aux buffers to enable correct rendering. In order to
1321
* ensure that things such as storage images work correctly, aux buffers
1322
* need to be initialized to valid data.
1323
*
1324
* Having an aux buffer with invalid data is a problem for two reasons:
1325
*
1326
* 1) Having an invalid value in the buffer can confuse the hardware.
1327
* For instance, with CCS_E on SKL, a two-bit CCS value of 2 is
1328
* invalid and leads to the hardware doing strange things. It
1329
* doesn't hang as far as we can tell but rendering corruption can
1330
* occur.
1331
*
1332
* 2) If this transition is into the GENERAL layout and we then use the
1333
* image as a storage image, then we must have the aux buffer in the
1334
* pass-through state so that, if we then go to texture from the
1335
* image, we get the results of our storage image writes and not the
1336
* fast clear color or other random data.
1337
*
1338
* For CCS both of the problems above are real demonstrable issues. In
1339
* that case, the only thing we can do is to perform an ambiguate to
1340
* transition the aux surface into the pass-through state.
1341
*
1342
* For MCS, (2) is never an issue because we don't support multisampled
1343
* storage images. In theory, issue (1) is a problem with MCS but we've
1344
* never seen it in the wild. For 4x and 16x, all bit patters could, in
1345
* theory, be interpreted as something but we don't know that all bit
1346
* patterns are actually valid. For 2x and 8x, you could easily end up
1347
* with the MCS referring to an invalid plane because not all bits of
1348
* the MCS value are actually used. Even though we've never seen issues
1349
* in the wild, it's best to play it safe and initialize the MCS. We
1350
* can use a fast-clear for MCS because we only ever touch from render
1351
* and texture (no image load store).
1352
*/
1353
if (image->samples == 1) {
1354
for (uint32_t l = 0; l < level_count; l++) {
1355
const uint32_t level = base_level + l;
1356
1357
uint32_t aux_layers = anv_image_aux_layers(image, aspect, level);
1358
if (base_layer >= aux_layers)
1359
break; /* We will only get fewer layers as level increases */
1360
uint32_t level_layer_count =
1361
MIN2(layer_count, aux_layers - base_layer);
1362
1363
/* If will_full_fast_clear is set, the caller promises to
1364
* fast-clear the largest portion of the specified range as it can.
1365
* For color images, that means only the first LOD and array slice.
1366
*/
1367
if (level == 0 && base_layer == 0 && will_full_fast_clear) {
1368
base_layer++;
1369
level_layer_count--;
1370
if (level_layer_count == 0)
1371
continue;
1372
}
1373
1374
anv_image_ccs_op(cmd_buffer, image,
1375
image->planes[plane].primary_surface.isl.format,
1376
ISL_SWIZZLE_IDENTITY,
1377
aspect, level, base_layer, level_layer_count,
1378
ISL_AUX_OP_AMBIGUATE, NULL, false);
1379
1380
if (image->planes[plane].aux_usage == ISL_AUX_USAGE_CCS_E) {
1381
set_image_compressed_bit(cmd_buffer, image, aspect,
1382
level, base_layer, level_layer_count,
1383
false);
1384
}
1385
}
1386
} else {
1387
if (image->samples == 4 || image->samples == 16) {
1388
anv_perf_warn(cmd_buffer->device, &image->base,
1389
"Doing a potentially unnecessary fast-clear to "
1390
"define an MCS buffer.");
1391
}
1392
1393
/* If will_full_fast_clear is set, the caller promises to fast-clear
1394
* the largest portion of the specified range as it can.
1395
*/
1396
if (will_full_fast_clear)
1397
return;
1398
1399
assert(base_level == 0 && level_count == 1);
1400
anv_image_mcs_op(cmd_buffer, image,
1401
image->planes[plane].primary_surface.isl.format,
1402
ISL_SWIZZLE_IDENTITY,
1403
aspect, base_layer, layer_count,
1404
ISL_AUX_OP_FAST_CLEAR, NULL, false);
1405
}
1406
return;
1407
}
1408
1409
enum isl_aux_usage initial_aux_usage =
1410
anv_layout_to_aux_usage(devinfo, image, aspect, 0, initial_layout);
1411
enum isl_aux_usage final_aux_usage =
1412
anv_layout_to_aux_usage(devinfo, image, aspect, 0, final_layout);
1413
1414
/* We must override the anv_layout_to_* functions because they are unaware of
1415
* acquire/release direction.
1416
*/
1417
if (mod_acquire) {
1418
initial_aux_usage = isl_mod_info->aux_usage;
1419
} else if (mod_release) {
1420
final_aux_usage = isl_mod_info->aux_usage;
1421
}
1422
1423
/* The current code assumes that there is no mixing of CCS_E and CCS_D.
1424
* We can handle transitions between CCS_D/E to and from NONE. What we
1425
* don't yet handle is switching between CCS_E and CCS_D within a given
1426
* image. Doing so in a performant way requires more detailed aux state
1427
* tracking such as what is done in i965. For now, just assume that we
1428
* only have one type of compression.
1429
*/
1430
assert(initial_aux_usage == ISL_AUX_USAGE_NONE ||
1431
final_aux_usage == ISL_AUX_USAGE_NONE ||
1432
initial_aux_usage == final_aux_usage);
1433
1434
/* If initial aux usage is NONE, there is nothing to resolve */
1435
if (initial_aux_usage == ISL_AUX_USAGE_NONE)
1436
return;
1437
1438
enum isl_aux_op resolve_op = ISL_AUX_OP_NONE;
1439
1440
/* If the initial layout supports more fast clear than the final layout
1441
* then we need at least a partial resolve.
1442
*/
1443
const enum anv_fast_clear_type initial_fast_clear =
1444
anv_layout_to_fast_clear_type(devinfo, image, aspect, initial_layout);
1445
const enum anv_fast_clear_type final_fast_clear =
1446
anv_layout_to_fast_clear_type(devinfo, image, aspect, final_layout);
1447
if (final_fast_clear < initial_fast_clear)
1448
resolve_op = ISL_AUX_OP_PARTIAL_RESOLVE;
1449
1450
if (initial_aux_usage == ISL_AUX_USAGE_CCS_E &&
1451
final_aux_usage != ISL_AUX_USAGE_CCS_E)
1452
resolve_op = ISL_AUX_OP_FULL_RESOLVE;
1453
1454
if (resolve_op == ISL_AUX_OP_NONE)
1455
return;
1456
1457
/* Perform a resolve to synchronize data between the main and aux buffer.
1458
* Before we begin, we must satisfy the cache flushing requirement specified
1459
* in the Sky Lake PRM Vol. 7, "MCS Buffer for Render Target(s)":
1460
*
1461
* Any transition from any value in {Clear, Render, Resolve} to a
1462
* different value in {Clear, Render, Resolve} requires end of pipe
1463
* synchronization.
1464
*
1465
* We perform a flush of the write cache before and after the clear and
1466
* resolve operations to meet this requirement.
1467
*
1468
* Unlike other drawing, fast clear operations are not properly
1469
* synchronized. The first PIPE_CONTROL here likely ensures that the
1470
* contents of the previous render or clear hit the render target before we
1471
* resolve and the second likely ensures that the resolve is complete before
1472
* we do any more rendering or clearing.
1473
*/
1474
anv_add_pending_pipe_bits(cmd_buffer,
1475
ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
1476
ANV_PIPE_END_OF_PIPE_SYNC_BIT,
1477
"after transition RT");
1478
1479
for (uint32_t l = 0; l < level_count; l++) {
1480
uint32_t level = base_level + l;
1481
1482
uint32_t aux_layers = anv_image_aux_layers(image, aspect, level);
1483
if (base_layer >= aux_layers)
1484
break; /* We will only get fewer layers as level increases */
1485
uint32_t level_layer_count =
1486
MIN2(layer_count, aux_layers - base_layer);
1487
1488
for (uint32_t a = 0; a < level_layer_count; a++) {
1489
uint32_t array_layer = base_layer + a;
1490
1491
/* If will_full_fast_clear is set, the caller promises to fast-clear
1492
* the largest portion of the specified range as it can. For color
1493
* images, that means only the first LOD and array slice.
1494
*/
1495
if (level == 0 && array_layer == 0 && will_full_fast_clear)
1496
continue;
1497
1498
if (image->samples == 1) {
1499
anv_cmd_predicated_ccs_resolve(cmd_buffer, image,
1500
image->planes[plane].primary_surface.isl.format,
1501
ISL_SWIZZLE_IDENTITY,
1502
aspect, level, array_layer, resolve_op,
1503
final_fast_clear);
1504
} else {
1505
/* We only support fast-clear on the first layer so partial
1506
* resolves should not be used on other layers as they will use
1507
* the clear color stored in memory that is only valid for layer0.
1508
*/
1509
if (resolve_op == ISL_AUX_OP_PARTIAL_RESOLVE &&
1510
array_layer != 0)
1511
continue;
1512
1513
anv_cmd_predicated_mcs_resolve(cmd_buffer, image,
1514
image->planes[plane].primary_surface.isl.format,
1515
ISL_SWIZZLE_IDENTITY,
1516
aspect, array_layer, resolve_op,
1517
final_fast_clear);
1518
}
1519
}
1520
}
1521
1522
anv_add_pending_pipe_bits(cmd_buffer,
1523
ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
1524
ANV_PIPE_END_OF_PIPE_SYNC_BIT,
1525
"after transition RT");
1526
}
1527
1528
static VkResult
1529
genX(cmd_buffer_setup_attachments)(struct anv_cmd_buffer *cmd_buffer,
1530
const struct anv_render_pass *pass,
1531
const struct anv_framebuffer *framebuffer,
1532
const VkRenderPassBeginInfo *begin)
1533
{
1534
struct anv_cmd_state *state = &cmd_buffer->state;
1535
1536
vk_free(&cmd_buffer->pool->alloc, state->attachments);
1537
1538
if (pass->attachment_count > 0) {
1539
state->attachments = vk_zalloc(&cmd_buffer->pool->alloc,
1540
pass->attachment_count *
1541
sizeof(state->attachments[0]),
1542
8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1543
if (state->attachments == NULL) {
1544
/* Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
1545
return anv_batch_set_error(&cmd_buffer->batch,
1546
VK_ERROR_OUT_OF_HOST_MEMORY);
1547
}
1548
} else {
1549
state->attachments = NULL;
1550
}
1551
1552
const VkRenderPassAttachmentBeginInfoKHR *attach_begin =
1553
vk_find_struct_const(begin, RENDER_PASS_ATTACHMENT_BEGIN_INFO_KHR);
1554
if (begin && !attach_begin)
1555
assert(pass->attachment_count == framebuffer->attachment_count);
1556
1557
for (uint32_t i = 0; i < pass->attachment_count; ++i) {
1558
if (attach_begin && attach_begin->attachmentCount != 0) {
1559
assert(attach_begin->attachmentCount == pass->attachment_count);
1560
ANV_FROM_HANDLE(anv_image_view, iview, attach_begin->pAttachments[i]);
1561
state->attachments[i].image_view = iview;
1562
} else if (framebuffer && i < framebuffer->attachment_count) {
1563
state->attachments[i].image_view = framebuffer->attachments[i];
1564
} else {
1565
state->attachments[i].image_view = NULL;
1566
}
1567
}
1568
1569
if (begin) {
1570
for (uint32_t i = 0; i < pass->attachment_count; ++i) {
1571
const struct anv_render_pass_attachment *pass_att = &pass->attachments[i];
1572
struct anv_attachment_state *att_state = &state->attachments[i];
1573
VkImageAspectFlags att_aspects = vk_format_aspects(pass_att->format);
1574
VkImageAspectFlags clear_aspects = 0;
1575
VkImageAspectFlags load_aspects = 0;
1576
1577
if (att_aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
1578
/* color attachment */
1579
if (pass_att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1580
clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
1581
} else if (pass_att->load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
1582
load_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
1583
}
1584
} else {
1585
/* depthstencil attachment */
1586
if (att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
1587
if (pass_att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1588
clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
1589
} else if (pass_att->load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
1590
load_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
1591
}
1592
}
1593
if (att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
1594
if (pass_att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1595
clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
1596
} else if (pass_att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
1597
load_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
1598
}
1599
}
1600
}
1601
1602
att_state->current_layout = pass_att->initial_layout;
1603
att_state->current_stencil_layout = pass_att->stencil_initial_layout;
1604
att_state->pending_clear_aspects = clear_aspects;
1605
att_state->pending_load_aspects = load_aspects;
1606
if (clear_aspects)
1607
att_state->clear_value = begin->pClearValues[i];
1608
1609
struct anv_image_view *iview = state->attachments[i].image_view;
1610
anv_assert(iview->vk_format == pass_att->format);
1611
1612
const uint32_t num_layers = iview->planes[0].isl.array_len;
1613
att_state->pending_clear_views = (1 << num_layers) - 1;
1614
1615
/* This will be initialized after the first subpass transition. */
1616
att_state->aux_usage = ISL_AUX_USAGE_NONE;
1617
1618
att_state->fast_clear = false;
1619
if (clear_aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
1620
assert(clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1621
att_state->fast_clear =
1622
anv_can_fast_clear_color_view(cmd_buffer->device, iview,
1623
pass_att->first_subpass_layout,
1624
vk_to_isl_color(att_state->clear_value.color),
1625
framebuffer->layers,
1626
begin->renderArea);
1627
} else if (clear_aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
1628
VK_IMAGE_ASPECT_STENCIL_BIT)) {
1629
att_state->fast_clear =
1630
anv_can_hiz_clear_ds_view(cmd_buffer->device, iview,
1631
pass_att->first_subpass_layout,
1632
clear_aspects,
1633
att_state->clear_value.depthStencil.depth,
1634
begin->renderArea);
1635
}
1636
}
1637
}
1638
1639
return VK_SUCCESS;
1640
}
1641
1642
/**
1643
* Setup anv_cmd_state::attachments for vkCmdBeginRenderPass.
1644
*/
1645
static VkResult
1646
genX(cmd_buffer_alloc_att_surf_states)(struct anv_cmd_buffer *cmd_buffer,
1647
const struct anv_render_pass *pass,
1648
const struct anv_subpass *subpass)
1649
{
1650
const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
1651
struct anv_cmd_state *state = &cmd_buffer->state;
1652
1653
/* Reserve one for the NULL state. */
1654
unsigned num_states = 1;
1655
for (uint32_t i = 0; i < subpass->attachment_count; i++) {
1656
uint32_t att = subpass->attachments[i].attachment;
1657
if (att == VK_ATTACHMENT_UNUSED)
1658
continue;
1659
1660
assert(att < pass->attachment_count);
1661
if (!vk_format_is_color(pass->attachments[att].format))
1662
continue;
1663
1664
const VkImageUsageFlagBits att_usage = subpass->attachments[i].usage;
1665
assert(util_bitcount(att_usage) == 1);
1666
1667
if (att_usage == VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT ||
1668
att_usage == VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)
1669
num_states++;
1670
}
1671
1672
const uint32_t ss_stride = align_u32(isl_dev->ss.size, isl_dev->ss.align);
1673
state->attachment_states =
1674
anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
1675
num_states * ss_stride, isl_dev->ss.align);
1676
if (state->attachment_states.map == NULL) {
1677
return anv_batch_set_error(&cmd_buffer->batch,
1678
VK_ERROR_OUT_OF_DEVICE_MEMORY);
1679
}
1680
1681
struct anv_state next_state = state->attachment_states;
1682
next_state.alloc_size = isl_dev->ss.size;
1683
1684
state->null_surface_state = next_state;
1685
next_state.offset += ss_stride;
1686
next_state.map += ss_stride;
1687
1688
for (uint32_t i = 0; i < subpass->attachment_count; i++) {
1689
uint32_t att = subpass->attachments[i].attachment;
1690
if (att == VK_ATTACHMENT_UNUSED)
1691
continue;
1692
1693
assert(att < pass->attachment_count);
1694
if (!vk_format_is_color(pass->attachments[att].format))
1695
continue;
1696
1697
const VkImageUsageFlagBits att_usage = subpass->attachments[i].usage;
1698
assert(util_bitcount(att_usage) == 1);
1699
1700
if (att_usage == VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)
1701
state->attachments[att].color.state = next_state;
1702
else if (att_usage == VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)
1703
state->attachments[att].input.state = next_state;
1704
else
1705
continue;
1706
1707
state->attachments[att].color.state = next_state;
1708
next_state.offset += ss_stride;
1709
next_state.map += ss_stride;
1710
}
1711
1712
assert(next_state.offset == state->attachment_states.offset +
1713
state->attachment_states.alloc_size);
1714
1715
return VK_SUCCESS;
1716
}
1717
1718
VkResult
1719
genX(BeginCommandBuffer)(
1720
VkCommandBuffer commandBuffer,
1721
const VkCommandBufferBeginInfo* pBeginInfo)
1722
{
1723
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1724
1725
/* If this is the first vkBeginCommandBuffer, we must *initialize* the
1726
* command buffer's state. Otherwise, we must *reset* its state. In both
1727
* cases we reset it.
1728
*
1729
* From the Vulkan 1.0 spec:
1730
*
1731
* If a command buffer is in the executable state and the command buffer
1732
* was allocated from a command pool with the
1733
* VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT flag set, then
1734
* vkBeginCommandBuffer implicitly resets the command buffer, behaving
1735
* as if vkResetCommandBuffer had been called with
1736
* VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts
1737
* the command buffer in the recording state.
1738
*/
1739
anv_cmd_buffer_reset(cmd_buffer);
1740
1741
cmd_buffer->usage_flags = pBeginInfo->flags;
1742
1743
/* VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT must be ignored for
1744
* primary level command buffers.
1745
*
1746
* From the Vulkan 1.0 spec:
1747
*
1748
* VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT specifies that a
1749
* secondary command buffer is considered to be entirely inside a render
1750
* pass. If this is a primary command buffer, then this bit is ignored.
1751
*/
1752
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
1753
cmd_buffer->usage_flags &= ~VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT;
1754
1755
genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
1756
1757
/* We sometimes store vertex data in the dynamic state buffer for blorp
1758
* operations and our dynamic state stream may re-use data from previous
1759
* command buffers. In order to prevent stale cache data, we flush the VF
1760
* cache. We could do this on every blorp call but that's not really
1761
* needed as all of the data will get written by the CPU prior to the GPU
1762
* executing anything. The chances are fairly high that they will use
1763
* blorp at least once per primary command buffer so it shouldn't be
1764
* wasted.
1765
*
1766
* There is also a workaround on gfx8 which requires us to invalidate the
1767
* VF cache occasionally. It's easier if we can assume we start with a
1768
* fresh cache (See also genX(cmd_buffer_set_binding_for_gfx8_vb_flush).)
1769
*/
1770
anv_add_pending_pipe_bits(cmd_buffer,
1771
ANV_PIPE_VF_CACHE_INVALIDATE_BIT,
1772
"new cmd buffer");
1773
1774
/* Re-emit the aux table register in every command buffer. This way we're
1775
* ensured that we have the table even if this command buffer doesn't
1776
* initialize any images.
1777
*/
1778
if (cmd_buffer->device->info.has_aux_map) {
1779
anv_add_pending_pipe_bits(cmd_buffer,
1780
ANV_PIPE_AUX_TABLE_INVALIDATE_BIT,
1781
"new cmd buffer with aux-tt");
1782
}
1783
1784
/* We send an "Indirect State Pointers Disable" packet at
1785
* EndCommandBuffer, so all push contant packets are ignored during a
1786
* context restore. Documentation says after that command, we need to
1787
* emit push constants again before any rendering operation. So we
1788
* flag them dirty here to make sure they get emitted.
1789
*/
1790
cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
1791
1792
VkResult result = VK_SUCCESS;
1793
if (cmd_buffer->usage_flags &
1794
VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
1795
assert(pBeginInfo->pInheritanceInfo);
1796
ANV_FROM_HANDLE(anv_render_pass, pass,
1797
pBeginInfo->pInheritanceInfo->renderPass);
1798
struct anv_subpass *subpass =
1799
&pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
1800
ANV_FROM_HANDLE(anv_framebuffer, framebuffer,
1801
pBeginInfo->pInheritanceInfo->framebuffer);
1802
1803
cmd_buffer->state.pass = pass;
1804
cmd_buffer->state.subpass = subpass;
1805
1806
/* This is optional in the inheritance info. */
1807
cmd_buffer->state.framebuffer = framebuffer;
1808
1809
result = genX(cmd_buffer_setup_attachments)(cmd_buffer, pass,
1810
framebuffer, NULL);
1811
if (result != VK_SUCCESS)
1812
return result;
1813
1814
result = genX(cmd_buffer_alloc_att_surf_states)(cmd_buffer, pass,
1815
subpass);
1816
if (result != VK_SUCCESS)
1817
return result;
1818
1819
/* Record that HiZ is enabled if we can. */
1820
if (cmd_buffer->state.framebuffer) {
1821
const struct anv_image_view * const iview =
1822
anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
1823
1824
if (iview) {
1825
VkImageLayout layout =
1826
cmd_buffer->state.subpass->depth_stencil_attachment->layout;
1827
1828
enum isl_aux_usage aux_usage =
1829
anv_layout_to_aux_usage(&cmd_buffer->device->info, iview->image,
1830
VK_IMAGE_ASPECT_DEPTH_BIT,
1831
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
1832
layout);
1833
1834
cmd_buffer->state.hiz_enabled = isl_aux_usage_has_hiz(aux_usage);
1835
}
1836
}
1837
1838
cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
1839
}
1840
1841
#if GFX_VERx10 >= 75
1842
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
1843
const VkCommandBufferInheritanceConditionalRenderingInfoEXT *conditional_rendering_info =
1844
vk_find_struct_const(pBeginInfo->pInheritanceInfo->pNext, COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT);
1845
1846
/* If secondary buffer supports conditional rendering
1847
* we should emit commands as if conditional rendering is enabled.
1848
*/
1849
cmd_buffer->state.conditional_render_enabled =
1850
conditional_rendering_info && conditional_rendering_info->conditionalRenderingEnable;
1851
}
1852
#endif
1853
1854
return result;
1855
}
1856
1857
/* From the PRM, Volume 2a:
1858
*
1859
* "Indirect State Pointers Disable
1860
*
1861
* At the completion of the post-sync operation associated with this pipe
1862
* control packet, the indirect state pointers in the hardware are
1863
* considered invalid; the indirect pointers are not saved in the context.
1864
* If any new indirect state commands are executed in the command stream
1865
* while the pipe control is pending, the new indirect state commands are
1866
* preserved.
1867
*
1868
* [DevIVB+]: Using Invalidate State Pointer (ISP) only inhibits context
1869
* restoring of Push Constant (3DSTATE_CONSTANT_*) commands. Push Constant
1870
* commands are only considered as Indirect State Pointers. Once ISP is
1871
* issued in a context, SW must initialize by programming push constant
1872
* commands for all the shaders (at least to zero length) before attempting
1873
* any rendering operation for the same context."
1874
*
1875
* 3DSTATE_CONSTANT_* packets are restored during a context restore,
1876
* even though they point to a BO that has been already unreferenced at
1877
* the end of the previous batch buffer. This has been fine so far since
1878
* we are protected by these scratch page (every address not covered by
1879
* a BO should be pointing to the scratch page). But on CNL, it is
1880
* causing a GPU hang during context restore at the 3DSTATE_CONSTANT_*
1881
* instruction.
1882
*
1883
* The flag "Indirect State Pointers Disable" in PIPE_CONTROL tells the
1884
* hardware to ignore previous 3DSTATE_CONSTANT_* packets during a
1885
* context restore, so the mentioned hang doesn't happen. However,
1886
* software must program push constant commands for all stages prior to
1887
* rendering anything. So we flag them dirty in BeginCommandBuffer.
1888
*
1889
* Finally, we also make sure to stall at pixel scoreboard to make sure the
1890
* constants have been loaded into the EUs prior to disable the push constants
1891
* so that it doesn't hang a previous 3DPRIMITIVE.
1892
*/
1893
static void
1894
emit_isp_disable(struct anv_cmd_buffer *cmd_buffer)
1895
{
1896
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1897
pc.StallAtPixelScoreboard = true;
1898
pc.CommandStreamerStallEnable = true;
1899
anv_debug_dump_pc(pc);
1900
}
1901
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1902
pc.IndirectStatePointersDisable = true;
1903
pc.CommandStreamerStallEnable = true;
1904
anv_debug_dump_pc(pc);
1905
}
1906
}
1907
1908
VkResult
1909
genX(EndCommandBuffer)(
1910
VkCommandBuffer commandBuffer)
1911
{
1912
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1913
1914
if (anv_batch_has_error(&cmd_buffer->batch))
1915
return cmd_buffer->batch.status;
1916
1917
anv_measure_endcommandbuffer(cmd_buffer);
1918
1919
/* We want every command buffer to start with the PMA fix in a known state,
1920
* so we disable it at the end of the command buffer.
1921
*/
1922
genX(cmd_buffer_enable_pma_fix)(cmd_buffer, false);
1923
1924
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1925
1926
emit_isp_disable(cmd_buffer);
1927
1928
anv_cmd_buffer_end_batch_buffer(cmd_buffer);
1929
1930
return VK_SUCCESS;
1931
}
1932
1933
void
1934
genX(CmdExecuteCommands)(
1935
VkCommandBuffer commandBuffer,
1936
uint32_t commandBufferCount,
1937
const VkCommandBuffer* pCmdBuffers)
1938
{
1939
ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer);
1940
1941
assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1942
1943
if (anv_batch_has_error(&primary->batch))
1944
return;
1945
1946
/* The secondary command buffers will assume that the PMA fix is disabled
1947
* when they begin executing. Make sure this is true.
1948
*/
1949
genX(cmd_buffer_enable_pma_fix)(primary, false);
1950
1951
/* The secondary command buffer doesn't know which textures etc. have been
1952
* flushed prior to their execution. Apply those flushes now.
1953
*/
1954
genX(cmd_buffer_apply_pipe_flushes)(primary);
1955
1956
for (uint32_t i = 0; i < commandBufferCount; i++) {
1957
ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
1958
1959
assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
1960
assert(!anv_batch_has_error(&secondary->batch));
1961
1962
#if GFX_VERx10 >= 75
1963
if (secondary->state.conditional_render_enabled) {
1964
if (!primary->state.conditional_render_enabled) {
1965
/* Secondary buffer is constructed as if it will be executed
1966
* with conditional rendering, we should satisfy this dependency
1967
* regardless of conditional rendering being enabled in primary.
1968
*/
1969
struct mi_builder b;
1970
mi_builder_init(&b, &primary->device->info, &primary->batch);
1971
mi_store(&b, mi_reg64(ANV_PREDICATE_RESULT_REG),
1972
mi_imm(UINT64_MAX));
1973
}
1974
}
1975
#endif
1976
1977
if (secondary->usage_flags &
1978
VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
1979
/* If we're continuing a render pass from the primary, we need to
1980
* copy the surface states for the current subpass into the storage
1981
* we allocated for them in BeginCommandBuffer.
1982
*/
1983
struct anv_bo *ss_bo =
1984
primary->device->surface_state_pool.block_pool.bo;
1985
struct anv_state src_state = primary->state.attachment_states;
1986
struct anv_state dst_state = secondary->state.attachment_states;
1987
assert(src_state.alloc_size == dst_state.alloc_size);
1988
1989
genX(cmd_buffer_so_memcpy)(primary,
1990
(struct anv_address) {
1991
.bo = ss_bo,
1992
.offset = dst_state.offset,
1993
},
1994
(struct anv_address) {
1995
.bo = ss_bo,
1996
.offset = src_state.offset,
1997
},
1998
src_state.alloc_size);
1999
}
2000
2001
anv_cmd_buffer_add_secondary(primary, secondary);
2002
2003
assert(secondary->perf_query_pool == NULL || primary->perf_query_pool == NULL ||
2004
secondary->perf_query_pool == primary->perf_query_pool);
2005
if (secondary->perf_query_pool)
2006
primary->perf_query_pool = secondary->perf_query_pool;
2007
}
2008
2009
/* The secondary isn't counted in our VF cache tracking so we need to
2010
* invalidate the whole thing.
2011
*/
2012
if (GFX_VER >= 8 && GFX_VER <= 9) {
2013
anv_add_pending_pipe_bits(primary,
2014
ANV_PIPE_CS_STALL_BIT | ANV_PIPE_VF_CACHE_INVALIDATE_BIT,
2015
"Secondary cmd buffer not tracked in VF cache");
2016
}
2017
2018
/* The secondary may have selected a different pipeline (3D or compute) and
2019
* may have changed the current L3$ configuration. Reset our tracking
2020
* variables to invalid values to ensure that we re-emit these in the case
2021
* where we do any draws or compute dispatches from the primary after the
2022
* secondary has returned.
2023
*/
2024
primary->state.current_pipeline = UINT32_MAX;
2025
primary->state.current_l3_config = NULL;
2026
primary->state.current_hash_scale = 0;
2027
2028
/* Each of the secondary command buffers will use its own state base
2029
* address. We need to re-emit state base address for the primary after
2030
* all of the secondaries are done.
2031
*
2032
* TODO: Maybe we want to make this a dirty bit to avoid extra state base
2033
* address calls?
2034
*/
2035
genX(cmd_buffer_emit_state_base_address)(primary);
2036
}
2037
2038
/**
2039
* Program the hardware to use the specified L3 configuration.
2040
*/
2041
void
2042
genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer,
2043
const struct intel_l3_config *cfg)
2044
{
2045
assert(cfg || GFX_VER >= 12);
2046
if (cfg == cmd_buffer->state.current_l3_config)
2047
return;
2048
2049
#if GFX_VER >= 11
2050
/* On Gfx11+ we use only one config, so verify it remains the same and skip
2051
* the stalling programming entirely.
2052
*/
2053
assert(cfg == cmd_buffer->device->l3_config);
2054
#else
2055
if (INTEL_DEBUG & DEBUG_L3) {
2056
mesa_logd("L3 config transition: ");
2057
intel_dump_l3_config(cfg, stderr);
2058
}
2059
2060
/* According to the hardware docs, the L3 partitioning can only be changed
2061
* while the pipeline is completely drained and the caches are flushed,
2062
* which involves a first PIPE_CONTROL flush which stalls the pipeline...
2063
*/
2064
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
2065
pc.DCFlushEnable = true;
2066
pc.PostSyncOperation = NoWrite;
2067
pc.CommandStreamerStallEnable = true;
2068
anv_debug_dump_pc(pc);
2069
}
2070
2071
/* ...followed by a second pipelined PIPE_CONTROL that initiates
2072
* invalidation of the relevant caches. Note that because RO invalidation
2073
* happens at the top of the pipeline (i.e. right away as the PIPE_CONTROL
2074
* command is processed by the CS) we cannot combine it with the previous
2075
* stalling flush as the hardware documentation suggests, because that
2076
* would cause the CS to stall on previous rendering *after* RO
2077
* invalidation and wouldn't prevent the RO caches from being polluted by
2078
* concurrent rendering before the stall completes. This intentionally
2079
* doesn't implement the SKL+ hardware workaround suggesting to enable CS
2080
* stall on PIPE_CONTROLs with the texture cache invalidation bit set for
2081
* GPGPU workloads because the previous and subsequent PIPE_CONTROLs
2082
* already guarantee that there is no concurrent GPGPU kernel execution
2083
* (see SKL HSD 2132585).
2084
*/
2085
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
2086
pc.TextureCacheInvalidationEnable = true;
2087
pc.ConstantCacheInvalidationEnable = true;
2088
pc.InstructionCacheInvalidateEnable = true;
2089
pc.StateCacheInvalidationEnable = true;
2090
pc.PostSyncOperation = NoWrite;
2091
anv_debug_dump_pc(pc);
2092
}
2093
2094
/* Now send a third stalling flush to make sure that invalidation is
2095
* complete when the L3 configuration registers are modified.
2096
*/
2097
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
2098
pc.DCFlushEnable = true;
2099
pc.PostSyncOperation = NoWrite;
2100
pc.CommandStreamerStallEnable = true;
2101
anv_debug_dump_pc(pc);
2102
}
2103
2104
genX(emit_l3_config)(&cmd_buffer->batch, cmd_buffer->device, cfg);
2105
#endif /* GFX_VER >= 11 */
2106
cmd_buffer->state.current_l3_config = cfg;
2107
}
2108
2109
void
2110
genX(cmd_buffer_apply_pipe_flushes)(struct anv_cmd_buffer *cmd_buffer)
2111
{
2112
UNUSED const struct intel_device_info *devinfo = &cmd_buffer->device->info;
2113
enum anv_pipe_bits bits = cmd_buffer->state.pending_pipe_bits;
2114
2115
if (unlikely(cmd_buffer->device->physical->always_flush_cache))
2116
bits |= ANV_PIPE_FLUSH_BITS | ANV_PIPE_INVALIDATE_BITS;
2117
else if (bits == 0)
2118
return;
2119
2120
/*
2121
* From Sandybridge PRM, volume 2, "1.7.2 End-of-Pipe Synchronization":
2122
*
2123
* Write synchronization is a special case of end-of-pipe
2124
* synchronization that requires that the render cache and/or depth
2125
* related caches are flushed to memory, where the data will become
2126
* globally visible. This type of synchronization is required prior to
2127
* SW (CPU) actually reading the result data from memory, or initiating
2128
* an operation that will use as a read surface (such as a texture
2129
* surface) a previous render target and/or depth/stencil buffer
2130
*
2131
*
2132
* From Haswell PRM, volume 2, part 1, "End-of-Pipe Synchronization":
2133
*
2134
* Exercising the write cache flush bits (Render Target Cache Flush
2135
* Enable, Depth Cache Flush Enable, DC Flush) in PIPE_CONTROL only
2136
* ensures the write caches are flushed and doesn't guarantee the data
2137
* is globally visible.
2138
*
2139
* SW can track the completion of the end-of-pipe-synchronization by
2140
* using "Notify Enable" and "PostSync Operation - Write Immediate
2141
* Data" in the PIPE_CONTROL command.
2142
*
2143
* In other words, flushes are pipelined while invalidations are handled
2144
* immediately. Therefore, if we're flushing anything then we need to
2145
* schedule an end-of-pipe sync before any invalidations can happen.
2146
*/
2147
if (bits & ANV_PIPE_FLUSH_BITS)
2148
bits |= ANV_PIPE_NEEDS_END_OF_PIPE_SYNC_BIT;
2149
2150
2151
/* HSD 1209978178: docs say that before programming the aux table:
2152
*
2153
* "Driver must ensure that the engine is IDLE but ensure it doesn't
2154
* add extra flushes in the case it knows that the engine is already
2155
* IDLE."
2156
*/
2157
if (GFX_VER == 12 && (bits & ANV_PIPE_AUX_TABLE_INVALIDATE_BIT))
2158
bits |= ANV_PIPE_NEEDS_END_OF_PIPE_SYNC_BIT;
2159
2160
/* If we're going to do an invalidate and we have a pending end-of-pipe
2161
* sync that has yet to be resolved, we do the end-of-pipe sync now.
2162
*/
2163
if ((bits & ANV_PIPE_INVALIDATE_BITS) &&
2164
(bits & ANV_PIPE_NEEDS_END_OF_PIPE_SYNC_BIT)) {
2165
bits |= ANV_PIPE_END_OF_PIPE_SYNC_BIT;
2166
bits &= ~ANV_PIPE_NEEDS_END_OF_PIPE_SYNC_BIT;
2167
}
2168
2169
/* Wa_1409226450, Wait for EU to be idle before pipe control which
2170
* invalidates the instruction cache
2171
*/
2172
if (GFX_VER == 12 && (bits & ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT))
2173
bits |= ANV_PIPE_CS_STALL_BIT | ANV_PIPE_STALL_AT_SCOREBOARD_BIT;
2174
2175
if ((GFX_VER >= 8 && GFX_VER <= 9) &&
2176
(bits & ANV_PIPE_CS_STALL_BIT) &&
2177
(bits & ANV_PIPE_VF_CACHE_INVALIDATE_BIT)) {
2178
/* If we are doing a VF cache invalidate AND a CS stall (it must be
2179
* both) then we can reset our vertex cache tracking.
2180
*/
2181
memset(cmd_buffer->state.gfx.vb_dirty_ranges, 0,
2182
sizeof(cmd_buffer->state.gfx.vb_dirty_ranges));
2183
memset(&cmd_buffer->state.gfx.ib_dirty_range, 0,
2184
sizeof(cmd_buffer->state.gfx.ib_dirty_range));
2185
}
2186
2187
/* Project: SKL / Argument: LRI Post Sync Operation [23]
2188
*
2189
* "PIPECONTROL command with “Command Streamer Stall Enable” must be
2190
* programmed prior to programming a PIPECONTROL command with "LRI
2191
* Post Sync Operation" in GPGPU mode of operation (i.e when
2192
* PIPELINE_SELECT command is set to GPGPU mode of operation)."
2193
*
2194
* The same text exists a few rows below for Post Sync Op.
2195
*
2196
* On Gfx12 this is Wa_1607156449.
2197
*/
2198
if (bits & ANV_PIPE_POST_SYNC_BIT) {
2199
if ((GFX_VER == 9 || (GFX_VER == 12 && devinfo->revision == 0 /* A0 */)) &&
2200
cmd_buffer->state.current_pipeline == GPGPU)
2201
bits |= ANV_PIPE_CS_STALL_BIT;
2202
bits &= ~ANV_PIPE_POST_SYNC_BIT;
2203
}
2204
2205
if (bits & (ANV_PIPE_FLUSH_BITS | ANV_PIPE_STALL_BITS |
2206
ANV_PIPE_END_OF_PIPE_SYNC_BIT)) {
2207
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
2208
#if GFX_VER >= 12
2209
pipe.TileCacheFlushEnable = bits & ANV_PIPE_TILE_CACHE_FLUSH_BIT;
2210
pipe.HDCPipelineFlushEnable |= bits & ANV_PIPE_HDC_PIPELINE_FLUSH_BIT;
2211
#else
2212
/* Flushing HDC pipeline requires DC Flush on earlier HW. */
2213
pipe.DCFlushEnable |= bits & ANV_PIPE_HDC_PIPELINE_FLUSH_BIT;
2214
#endif
2215
pipe.DepthCacheFlushEnable = bits & ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
2216
pipe.DCFlushEnable |= bits & ANV_PIPE_DATA_CACHE_FLUSH_BIT;
2217
pipe.RenderTargetCacheFlushEnable =
2218
bits & ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
2219
2220
/* Wa_1409600907: "PIPE_CONTROL with Depth Stall Enable bit must
2221
* be set with any PIPE_CONTROL with Depth Flush Enable bit set.
2222
*/
2223
#if GFX_VER >= 12
2224
pipe.DepthStallEnable =
2225
pipe.DepthCacheFlushEnable || (bits & ANV_PIPE_DEPTH_STALL_BIT);
2226
#else
2227
pipe.DepthStallEnable = bits & ANV_PIPE_DEPTH_STALL_BIT;
2228
#endif
2229
2230
pipe.CommandStreamerStallEnable = bits & ANV_PIPE_CS_STALL_BIT;
2231
pipe.StallAtPixelScoreboard = bits & ANV_PIPE_STALL_AT_SCOREBOARD_BIT;
2232
2233
/* From Sandybridge PRM, volume 2, "1.7.3.1 Writing a Value to Memory":
2234
*
2235
* "The most common action to perform upon reaching a
2236
* synchronization point is to write a value out to memory. An
2237
* immediate value (included with the synchronization command) may
2238
* be written."
2239
*
2240
*
2241
* From Broadwell PRM, volume 7, "End-of-Pipe Synchronization":
2242
*
2243
* "In case the data flushed out by the render engine is to be
2244
* read back in to the render engine in coherent manner, then the
2245
* render engine has to wait for the fence completion before
2246
* accessing the flushed data. This can be achieved by following
2247
* means on various products: PIPE_CONTROL command with CS Stall
2248
* and the required write caches flushed with Post-Sync-Operation
2249
* as Write Immediate Data.
2250
*
2251
* Example:
2252
* - Workload-1 (3D/GPGPU/MEDIA)
2253
* - PIPE_CONTROL (CS Stall, Post-Sync-Operation Write
2254
* Immediate Data, Required Write Cache Flush bits set)
2255
* - Workload-2 (Can use the data produce or output by
2256
* Workload-1)
2257
*/
2258
if (bits & ANV_PIPE_END_OF_PIPE_SYNC_BIT) {
2259
pipe.CommandStreamerStallEnable = true;
2260
pipe.PostSyncOperation = WriteImmediateData;
2261
pipe.Address = cmd_buffer->device->workaround_address;
2262
}
2263
2264
/*
2265
* According to the Broadwell documentation, any PIPE_CONTROL with the
2266
* "Command Streamer Stall" bit set must also have another bit set,
2267
* with five different options:
2268
*
2269
* - Render Target Cache Flush
2270
* - Depth Cache Flush
2271
* - Stall at Pixel Scoreboard
2272
* - Post-Sync Operation
2273
* - Depth Stall
2274
* - DC Flush Enable
2275
*
2276
* I chose "Stall at Pixel Scoreboard" since that's what we use in
2277
* mesa and it seems to work fine. The choice is fairly arbitrary.
2278
*/
2279
if (pipe.CommandStreamerStallEnable &&
2280
!pipe.RenderTargetCacheFlushEnable &&
2281
!pipe.DepthCacheFlushEnable &&
2282
!pipe.StallAtPixelScoreboard &&
2283
!pipe.PostSyncOperation &&
2284
!pipe.DepthStallEnable &&
2285
!pipe.DCFlushEnable)
2286
pipe.StallAtPixelScoreboard = true;
2287
anv_debug_dump_pc(pipe);
2288
}
2289
2290
/* If a render target flush was emitted, then we can toggle off the bit
2291
* saying that render target writes are ongoing.
2292
*/
2293
if (bits & ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
2294
bits &= ~(ANV_PIPE_RENDER_TARGET_BUFFER_WRITES);
2295
2296
if (GFX_VERx10 == 75) {
2297
/* Haswell needs addition work-arounds:
2298
*
2299
* From Haswell PRM, volume 2, part 1, "End-of-Pipe Synchronization":
2300
*
2301
* Option 1:
2302
* PIPE_CONTROL command with the CS Stall and the required write
2303
* caches flushed with Post-SyncOperation as Write Immediate Data
2304
* followed by eight dummy MI_STORE_DATA_IMM (write to scratch
2305
* spce) commands.
2306
*
2307
* Example:
2308
* - Workload-1
2309
* - PIPE_CONTROL (CS Stall, Post-Sync-Operation Write
2310
* Immediate Data, Required Write Cache Flush bits set)
2311
* - MI_STORE_DATA_IMM (8 times) (Dummy data, Scratch Address)
2312
* - Workload-2 (Can use the data produce or output by
2313
* Workload-1)
2314
*
2315
* Unfortunately, both the PRMs and the internal docs are a bit
2316
* out-of-date in this regard. What the windows driver does (and
2317
* this appears to actually work) is to emit a register read from the
2318
* memory address written by the pipe control above.
2319
*
2320
* What register we load into doesn't matter. We choose an indirect
2321
* rendering register because we know it always exists and it's one
2322
* of the first registers the command parser allows us to write. If
2323
* you don't have command parser support in your kernel (pre-4.2),
2324
* this will get turned into MI_NOOP and you won't get the
2325
* workaround. Unfortunately, there's just not much we can do in
2326
* that case. This register is perfectly safe to write since we
2327
* always re-load all of the indirect draw registers right before
2328
* 3DPRIMITIVE when needed anyway.
2329
*/
2330
anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
2331
lrm.RegisterAddress = 0x243C; /* GFX7_3DPRIM_START_INSTANCE */
2332
lrm.MemoryAddress = cmd_buffer->device->workaround_address;
2333
}
2334
}
2335
2336
bits &= ~(ANV_PIPE_FLUSH_BITS | ANV_PIPE_STALL_BITS |
2337
ANV_PIPE_END_OF_PIPE_SYNC_BIT);
2338
}
2339
2340
if (bits & ANV_PIPE_INVALIDATE_BITS) {
2341
/* From the SKL PRM, Vol. 2a, "PIPE_CONTROL",
2342
*
2343
* "If the VF Cache Invalidation Enable is set to a 1 in a
2344
* PIPE_CONTROL, a separate Null PIPE_CONTROL, all bitfields sets to
2345
* 0, with the VF Cache Invalidation Enable set to 0 needs to be sent
2346
* prior to the PIPE_CONTROL with VF Cache Invalidation Enable set to
2347
* a 1."
2348
*
2349
* This appears to hang Broadwell, so we restrict it to just gfx9.
2350
*/
2351
if (GFX_VER == 9 && (bits & ANV_PIPE_VF_CACHE_INVALIDATE_BIT))
2352
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe);
2353
2354
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
2355
pipe.StateCacheInvalidationEnable =
2356
bits & ANV_PIPE_STATE_CACHE_INVALIDATE_BIT;
2357
pipe.ConstantCacheInvalidationEnable =
2358
bits & ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
2359
pipe.VFCacheInvalidationEnable =
2360
bits & ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
2361
pipe.TextureCacheInvalidationEnable =
2362
bits & ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
2363
pipe.InstructionCacheInvalidateEnable =
2364
bits & ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT;
2365
2366
/* From the SKL PRM, Vol. 2a, "PIPE_CONTROL",
2367
*
2368
* "When VF Cache Invalidate is set “Post Sync Operation” must be
2369
* enabled to “Write Immediate Data” or “Write PS Depth Count” or
2370
* “Write Timestamp”.
2371
*/
2372
if (GFX_VER == 9 && pipe.VFCacheInvalidationEnable) {
2373
pipe.PostSyncOperation = WriteImmediateData;
2374
pipe.Address = cmd_buffer->device->workaround_address;
2375
}
2376
anv_debug_dump_pc(pipe);
2377
}
2378
2379
#if GFX_VER == 12
2380
if ((bits & ANV_PIPE_AUX_TABLE_INVALIDATE_BIT) &&
2381
cmd_buffer->device->info.has_aux_map) {
2382
anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
2383
lri.RegisterOffset = GENX(GFX_CCS_AUX_INV_num);
2384
lri.DataDWord = 1;
2385
}
2386
}
2387
#endif
2388
2389
bits &= ~ANV_PIPE_INVALIDATE_BITS;
2390
}
2391
2392
cmd_buffer->state.pending_pipe_bits = bits;
2393
}
2394
2395
void genX(CmdPipelineBarrier)(
2396
VkCommandBuffer commandBuffer,
2397
VkPipelineStageFlags srcStageMask,
2398
VkPipelineStageFlags destStageMask,
2399
VkBool32 byRegion,
2400
uint32_t memoryBarrierCount,
2401
const VkMemoryBarrier* pMemoryBarriers,
2402
uint32_t bufferMemoryBarrierCount,
2403
const VkBufferMemoryBarrier* pBufferMemoryBarriers,
2404
uint32_t imageMemoryBarrierCount,
2405
const VkImageMemoryBarrier* pImageMemoryBarriers)
2406
{
2407
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
2408
2409
/* XXX: Right now, we're really dumb and just flush whatever categories
2410
* the app asks for. One of these days we may make this a bit better
2411
* but right now that's all the hardware allows for in most areas.
2412
*/
2413
VkAccessFlags src_flags = 0;
2414
VkAccessFlags dst_flags = 0;
2415
2416
for (uint32_t i = 0; i < memoryBarrierCount; i++) {
2417
src_flags |= pMemoryBarriers[i].srcAccessMask;
2418
dst_flags |= pMemoryBarriers[i].dstAccessMask;
2419
}
2420
2421
for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
2422
src_flags |= pBufferMemoryBarriers[i].srcAccessMask;
2423
dst_flags |= pBufferMemoryBarriers[i].dstAccessMask;
2424
}
2425
2426
for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
2427
src_flags |= pImageMemoryBarriers[i].srcAccessMask;
2428
dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
2429
ANV_FROM_HANDLE(anv_image, image, pImageMemoryBarriers[i].image);
2430
const VkImageSubresourceRange *range =
2431
&pImageMemoryBarriers[i].subresourceRange;
2432
2433
uint32_t base_layer, layer_count;
2434
if (image->type == VK_IMAGE_TYPE_3D) {
2435
base_layer = 0;
2436
layer_count = anv_minify(image->extent.depth, range->baseMipLevel);
2437
} else {
2438
base_layer = range->baseArrayLayer;
2439
layer_count = anv_get_layerCount(image, range);
2440
}
2441
2442
if (range->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
2443
transition_depth_buffer(cmd_buffer, image,
2444
base_layer, layer_count,
2445
pImageMemoryBarriers[i].oldLayout,
2446
pImageMemoryBarriers[i].newLayout,
2447
false /* will_full_fast_clear */);
2448
}
2449
2450
if (range->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
2451
transition_stencil_buffer(cmd_buffer, image,
2452
range->baseMipLevel,
2453
anv_get_levelCount(image, range),
2454
base_layer, layer_count,
2455
pImageMemoryBarriers[i].oldLayout,
2456
pImageMemoryBarriers[i].newLayout,
2457
false /* will_full_fast_clear */);
2458
}
2459
2460
if (range->aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
2461
VkImageAspectFlags color_aspects =
2462
anv_image_expand_aspects(image, range->aspectMask);
2463
anv_foreach_image_aspect_bit(aspect_bit, image, color_aspects) {
2464
transition_color_buffer(cmd_buffer, image, 1UL << aspect_bit,
2465
range->baseMipLevel,
2466
anv_get_levelCount(image, range),
2467
base_layer, layer_count,
2468
pImageMemoryBarriers[i].oldLayout,
2469
pImageMemoryBarriers[i].newLayout,
2470
pImageMemoryBarriers[i].srcQueueFamilyIndex,
2471
pImageMemoryBarriers[i].dstQueueFamilyIndex,
2472
false /* will_full_fast_clear */);
2473
}
2474
}
2475
}
2476
2477
anv_add_pending_pipe_bits(cmd_buffer,
2478
anv_pipe_flush_bits_for_access_flags(cmd_buffer->device, src_flags) |
2479
anv_pipe_invalidate_bits_for_access_flags(cmd_buffer->device, dst_flags),
2480
"pipe barrier");
2481
}
2482
2483
static void
2484
cmd_buffer_alloc_push_constants(struct anv_cmd_buffer *cmd_buffer)
2485
{
2486
VkShaderStageFlags stages =
2487
cmd_buffer->state.gfx.pipeline->active_stages;
2488
2489
/* In order to avoid thrash, we assume that vertex and fragment stages
2490
* always exist. In the rare case where one is missing *and* the other
2491
* uses push concstants, this may be suboptimal. However, avoiding stalls
2492
* seems more important.
2493
*/
2494
stages |= VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_VERTEX_BIT;
2495
2496
if (stages == cmd_buffer->state.gfx.push_constant_stages)
2497
return;
2498
2499
#if GFX_VER >= 8
2500
const unsigned push_constant_kb = 32;
2501
#elif GFX_VERx10 == 75
2502
const unsigned push_constant_kb = cmd_buffer->device->info.gt == 3 ? 32 : 16;
2503
#else
2504
const unsigned push_constant_kb = 16;
2505
#endif
2506
2507
const unsigned num_stages =
2508
util_bitcount(stages & VK_SHADER_STAGE_ALL_GRAPHICS);
2509
unsigned size_per_stage = push_constant_kb / num_stages;
2510
2511
/* Broadwell+ and Haswell gt3 require that the push constant sizes be in
2512
* units of 2KB. Incidentally, these are the same platforms that have
2513
* 32KB worth of push constant space.
2514
*/
2515
if (push_constant_kb == 32)
2516
size_per_stage &= ~1u;
2517
2518
uint32_t kb_used = 0;
2519
for (int i = MESA_SHADER_VERTEX; i < MESA_SHADER_FRAGMENT; i++) {
2520
unsigned push_size = (stages & (1 << i)) ? size_per_stage : 0;
2521
anv_batch_emit(&cmd_buffer->batch,
2522
GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
2523
alloc._3DCommandSubOpcode = 18 + i;
2524
alloc.ConstantBufferOffset = (push_size > 0) ? kb_used : 0;
2525
alloc.ConstantBufferSize = push_size;
2526
}
2527
kb_used += push_size;
2528
}
2529
2530
anv_batch_emit(&cmd_buffer->batch,
2531
GENX(3DSTATE_PUSH_CONSTANT_ALLOC_PS), alloc) {
2532
alloc.ConstantBufferOffset = kb_used;
2533
alloc.ConstantBufferSize = push_constant_kb - kb_used;
2534
}
2535
2536
cmd_buffer->state.gfx.push_constant_stages = stages;
2537
2538
/* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
2539
*
2540
* "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
2541
* the next 3DPRIMITIVE command after programming the
2542
* 3DSTATE_PUSH_CONSTANT_ALLOC_VS"
2543
*
2544
* Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
2545
* pipeline setup, we need to dirty push constants.
2546
*/
2547
cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
2548
}
2549
2550
static VkResult
2551
emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
2552
struct anv_cmd_pipeline_state *pipe_state,
2553
struct anv_shader_bin *shader,
2554
struct anv_state *bt_state)
2555
{
2556
struct anv_subpass *subpass = cmd_buffer->state.subpass;
2557
uint32_t state_offset;
2558
2559
struct anv_pipeline_bind_map *map = &shader->bind_map;
2560
if (map->surface_count == 0) {
2561
*bt_state = (struct anv_state) { 0, };
2562
return VK_SUCCESS;
2563
}
2564
2565
*bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer,
2566
map->surface_count,
2567
&state_offset);
2568
uint32_t *bt_map = bt_state->map;
2569
2570
if (bt_state->map == NULL)
2571
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2572
2573
/* We only need to emit relocs if we're not using softpin. If we are using
2574
* softpin then we always keep all user-allocated memory objects resident.
2575
*/
2576
const bool need_client_mem_relocs =
2577
!anv_use_softpin(cmd_buffer->device->physical);
2578
struct anv_push_constants *push = &pipe_state->push_constants;
2579
2580
for (uint32_t s = 0; s < map->surface_count; s++) {
2581
struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s];
2582
2583
struct anv_state surface_state;
2584
2585
switch (binding->set) {
2586
case ANV_DESCRIPTOR_SET_NULL:
2587
bt_map[s] = 0;
2588
break;
2589
2590
case ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS:
2591
/* Color attachment binding */
2592
assert(shader->stage == MESA_SHADER_FRAGMENT);
2593
if (binding->index < subpass->color_count) {
2594
const unsigned att =
2595
subpass->color_attachments[binding->index].attachment;
2596
2597
/* From the Vulkan 1.0.46 spec:
2598
*
2599
* "If any color or depth/stencil attachments are
2600
* VK_ATTACHMENT_UNUSED, then no writes occur for those
2601
* attachments."
2602
*/
2603
if (att == VK_ATTACHMENT_UNUSED) {
2604
surface_state = cmd_buffer->state.null_surface_state;
2605
} else {
2606
surface_state = cmd_buffer->state.attachments[att].color.state;
2607
}
2608
} else {
2609
surface_state = cmd_buffer->state.null_surface_state;
2610
}
2611
2612
assert(surface_state.map);
2613
bt_map[s] = surface_state.offset + state_offset;
2614
break;
2615
2616
case ANV_DESCRIPTOR_SET_SHADER_CONSTANTS: {
2617
struct anv_state surface_state =
2618
anv_cmd_buffer_alloc_surface_state(cmd_buffer);
2619
2620
struct anv_address constant_data = {
2621
.bo = cmd_buffer->device->instruction_state_pool.block_pool.bo,
2622
.offset = shader->kernel.offset +
2623
shader->prog_data->const_data_offset,
2624
};
2625
unsigned constant_data_size = shader->prog_data->const_data_size;
2626
2627
const enum isl_format format =
2628
anv_isl_format_for_descriptor_type(cmd_buffer->device,
2629
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
2630
anv_fill_buffer_surface_state(cmd_buffer->device,
2631
surface_state, format,
2632
ISL_SURF_USAGE_CONSTANT_BUFFER_BIT,
2633
constant_data, constant_data_size, 1);
2634
2635
assert(surface_state.map);
2636
bt_map[s] = surface_state.offset + state_offset;
2637
add_surface_reloc(cmd_buffer, surface_state, constant_data);
2638
break;
2639
}
2640
2641
case ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS: {
2642
/* This is always the first binding for compute shaders */
2643
assert(shader->stage == MESA_SHADER_COMPUTE && s == 0);
2644
2645
struct anv_state surface_state =
2646
anv_cmd_buffer_alloc_surface_state(cmd_buffer);
2647
2648
const enum isl_format format =
2649
anv_isl_format_for_descriptor_type(cmd_buffer->device,
2650
VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
2651
anv_fill_buffer_surface_state(cmd_buffer->device, surface_state,
2652
format,
2653
ISL_SURF_USAGE_CONSTANT_BUFFER_BIT,
2654
cmd_buffer->state.compute.num_workgroups,
2655
12, 1);
2656
2657
assert(surface_state.map);
2658
bt_map[s] = surface_state.offset + state_offset;
2659
if (need_client_mem_relocs) {
2660
add_surface_reloc(cmd_buffer, surface_state,
2661
cmd_buffer->state.compute.num_workgroups);
2662
}
2663
break;
2664
}
2665
2666
case ANV_DESCRIPTOR_SET_DESCRIPTORS: {
2667
/* This is a descriptor set buffer so the set index is actually
2668
* given by binding->binding. (Yes, that's confusing.)
2669
*/
2670
struct anv_descriptor_set *set =
2671
pipe_state->descriptors[binding->index];
2672
assert(set->desc_mem.alloc_size);
2673
assert(set->desc_surface_state.alloc_size);
2674
bt_map[s] = set->desc_surface_state.offset + state_offset;
2675
add_surface_reloc(cmd_buffer, set->desc_surface_state,
2676
anv_descriptor_set_address(set));
2677
break;
2678
}
2679
2680
default: {
2681
assert(binding->set < MAX_SETS);
2682
const struct anv_descriptor_set *set =
2683
pipe_state->descriptors[binding->set];
2684
if (binding->index >= set->descriptor_count) {
2685
/* From the Vulkan spec section entitled "DescriptorSet and
2686
* Binding Assignment":
2687
*
2688
* "If the array is runtime-sized, then array elements greater
2689
* than or equal to the size of that binding in the bound
2690
* descriptor set must not be used."
2691
*
2692
* Unfortunately, the compiler isn't smart enough to figure out
2693
* when a dynamic binding isn't used so it may grab the whole
2694
* array and stick it in the binding table. In this case, it's
2695
* safe to just skip those bindings that are OOB.
2696
*/
2697
assert(binding->index < set->layout->descriptor_count);
2698
continue;
2699
}
2700
const struct anv_descriptor *desc = &set->descriptors[binding->index];
2701
2702
switch (desc->type) {
2703
case VK_DESCRIPTOR_TYPE_SAMPLER:
2704
/* Nothing for us to do here */
2705
continue;
2706
2707
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2708
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: {
2709
if (desc->image_view) {
2710
struct anv_surface_state sstate =
2711
(desc->layout == VK_IMAGE_LAYOUT_GENERAL) ?
2712
desc->image_view->planes[binding->plane].general_sampler_surface_state :
2713
desc->image_view->planes[binding->plane].optimal_sampler_surface_state;
2714
surface_state = sstate.state;
2715
assert(surface_state.alloc_size);
2716
if (need_client_mem_relocs)
2717
add_surface_state_relocs(cmd_buffer, sstate);
2718
} else {
2719
surface_state = cmd_buffer->device->null_surface_state;
2720
}
2721
break;
2722
}
2723
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
2724
assert(shader->stage == MESA_SHADER_FRAGMENT);
2725
assert(desc->image_view != NULL);
2726
if ((desc->image_view->aspect_mask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) == 0) {
2727
/* For depth and stencil input attachments, we treat it like any
2728
* old texture that a user may have bound.
2729
*/
2730
assert(desc->image_view->n_planes == 1);
2731
struct anv_surface_state sstate =
2732
(desc->layout == VK_IMAGE_LAYOUT_GENERAL) ?
2733
desc->image_view->planes[0].general_sampler_surface_state :
2734
desc->image_view->planes[0].optimal_sampler_surface_state;
2735
surface_state = sstate.state;
2736
assert(surface_state.alloc_size);
2737
if (need_client_mem_relocs)
2738
add_surface_state_relocs(cmd_buffer, sstate);
2739
} else {
2740
/* For color input attachments, we create the surface state at
2741
* vkBeginRenderPass time so that we can include aux and clear
2742
* color information.
2743
*/
2744
assert(binding->input_attachment_index < subpass->input_count);
2745
const unsigned subpass_att = binding->input_attachment_index;
2746
const unsigned att = subpass->input_attachments[subpass_att].attachment;
2747
surface_state = cmd_buffer->state.attachments[att].input.state;
2748
}
2749
break;
2750
2751
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
2752
if (desc->image_view) {
2753
struct anv_surface_state sstate = (binding->write_only)
2754
? desc->image_view->planes[binding->plane].writeonly_storage_surface_state
2755
: desc->image_view->planes[binding->plane].storage_surface_state;
2756
surface_state = sstate.state;
2757
assert(surface_state.alloc_size);
2758
if (surface_state.offset == 0) {
2759
mesa_loge("Bound a image to a descriptor where the "
2760
"descriptor does not have NonReadable "
2761
"set and the image does not have a "
2762
"corresponding SPIR-V format enum.");
2763
vk_debug_report(&cmd_buffer->device->physical->instance->vk,
2764
VK_DEBUG_REPORT_ERROR_BIT_EXT,
2765
&desc->image_view->base,
2766
__LINE__, 0, "anv",
2767
"Bound a image to a descriptor where the "
2768
"descriptor does not have NonReadable "
2769
"set and the image does not have a "
2770
"corresponding SPIR-V format enum.");
2771
}
2772
if (surface_state.offset && need_client_mem_relocs)
2773
add_surface_state_relocs(cmd_buffer, sstate);
2774
} else {
2775
surface_state = cmd_buffer->device->null_surface_state;
2776
}
2777
break;
2778
}
2779
2780
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2781
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2782
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2783
if (desc->buffer_view) {
2784
surface_state = desc->buffer_view->surface_state;
2785
assert(surface_state.alloc_size);
2786
if (need_client_mem_relocs) {
2787
add_surface_reloc(cmd_buffer, surface_state,
2788
desc->buffer_view->address);
2789
}
2790
} else {
2791
surface_state = cmd_buffer->device->null_surface_state;
2792
}
2793
break;
2794
2795
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2796
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
2797
if (desc->buffer) {
2798
/* Compute the offset within the buffer */
2799
uint32_t dynamic_offset =
2800
push->dynamic_offsets[binding->dynamic_offset_index];
2801
uint64_t offset = desc->offset + dynamic_offset;
2802
/* Clamp to the buffer size */
2803
offset = MIN2(offset, desc->buffer->size);
2804
/* Clamp the range to the buffer size */
2805
uint32_t range = MIN2(desc->range, desc->buffer->size - offset);
2806
2807
/* Align the range for consistency */
2808
if (desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
2809
range = align_u32(range, ANV_UBO_ALIGNMENT);
2810
2811
struct anv_address address =
2812
anv_address_add(desc->buffer->address, offset);
2813
2814
surface_state =
2815
anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64);
2816
enum isl_format format =
2817
anv_isl_format_for_descriptor_type(cmd_buffer->device,
2818
desc->type);
2819
2820
isl_surf_usage_flags_t usage =
2821
desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ?
2822
ISL_SURF_USAGE_CONSTANT_BUFFER_BIT :
2823
ISL_SURF_USAGE_STORAGE_BIT;
2824
2825
anv_fill_buffer_surface_state(cmd_buffer->device, surface_state,
2826
format, usage, address, range, 1);
2827
if (need_client_mem_relocs)
2828
add_surface_reloc(cmd_buffer, surface_state, address);
2829
} else {
2830
surface_state = cmd_buffer->device->null_surface_state;
2831
}
2832
break;
2833
}
2834
2835
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2836
if (desc->buffer_view) {
2837
surface_state = (binding->write_only)
2838
? desc->buffer_view->writeonly_storage_surface_state
2839
: desc->buffer_view->storage_surface_state;
2840
assert(surface_state.alloc_size);
2841
if (need_client_mem_relocs) {
2842
add_surface_reloc(cmd_buffer, surface_state,
2843
desc->buffer_view->address);
2844
}
2845
} else {
2846
surface_state = cmd_buffer->device->null_surface_state;
2847
}
2848
break;
2849
2850
default:
2851
assert(!"Invalid descriptor type");
2852
continue;
2853
}
2854
assert(surface_state.map);
2855
bt_map[s] = surface_state.offset + state_offset;
2856
break;
2857
}
2858
}
2859
}
2860
2861
return VK_SUCCESS;
2862
}
2863
2864
static VkResult
2865
emit_samplers(struct anv_cmd_buffer *cmd_buffer,
2866
struct anv_cmd_pipeline_state *pipe_state,
2867
struct anv_shader_bin *shader,
2868
struct anv_state *state)
2869
{
2870
struct anv_pipeline_bind_map *map = &shader->bind_map;
2871
if (map->sampler_count == 0) {
2872
*state = (struct anv_state) { 0, };
2873
return VK_SUCCESS;
2874
}
2875
2876
uint32_t size = map->sampler_count * 16;
2877
*state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 32);
2878
2879
if (state->map == NULL)
2880
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2881
2882
for (uint32_t s = 0; s < map->sampler_count; s++) {
2883
struct anv_pipeline_binding *binding = &map->sampler_to_descriptor[s];
2884
const struct anv_descriptor *desc =
2885
&pipe_state->descriptors[binding->set]->descriptors[binding->index];
2886
2887
if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER &&
2888
desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
2889
continue;
2890
2891
struct anv_sampler *sampler = desc->sampler;
2892
2893
/* This can happen if we have an unfilled slot since TYPE_SAMPLER
2894
* happens to be zero.
2895
*/
2896
if (sampler == NULL)
2897
continue;
2898
2899
memcpy(state->map + (s * 16),
2900
sampler->state[binding->plane], sizeof(sampler->state[0]));
2901
}
2902
2903
return VK_SUCCESS;
2904
}
2905
2906
static uint32_t
2907
flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer,
2908
struct anv_cmd_pipeline_state *pipe_state,
2909
const VkShaderStageFlags dirty,
2910
struct anv_shader_bin **shaders,
2911
uint32_t num_shaders)
2912
{
2913
VkShaderStageFlags flushed = 0;
2914
2915
VkResult result = VK_SUCCESS;
2916
for (uint32_t i = 0; i < num_shaders; i++) {
2917
if (!shaders[i])
2918
continue;
2919
2920
gl_shader_stage stage = shaders[i]->stage;
2921
VkShaderStageFlags vk_stage = mesa_to_vk_shader_stage(stage);
2922
if ((vk_stage & dirty) == 0)
2923
continue;
2924
2925
assert(stage < ARRAY_SIZE(cmd_buffer->state.samplers));
2926
result = emit_samplers(cmd_buffer, pipe_state, shaders[i],
2927
&cmd_buffer->state.samplers[stage]);
2928
if (result != VK_SUCCESS)
2929
break;
2930
2931
assert(stage < ARRAY_SIZE(cmd_buffer->state.binding_tables));
2932
result = emit_binding_table(cmd_buffer, pipe_state, shaders[i],
2933
&cmd_buffer->state.binding_tables[stage]);
2934
if (result != VK_SUCCESS)
2935
break;
2936
2937
flushed |= vk_stage;
2938
}
2939
2940
if (result != VK_SUCCESS) {
2941
assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY);
2942
2943
result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
2944
if (result != VK_SUCCESS)
2945
return 0;
2946
2947
/* Re-emit state base addresses so we get the new surface state base
2948
* address before we start emitting binding tables etc.
2949
*/
2950
genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
2951
2952
/* Re-emit all active binding tables */
2953
flushed = 0;
2954
2955
for (uint32_t i = 0; i < num_shaders; i++) {
2956
if (!shaders[i])
2957
continue;
2958
2959
gl_shader_stage stage = shaders[i]->stage;
2960
2961
result = emit_samplers(cmd_buffer, pipe_state, shaders[i],
2962
&cmd_buffer->state.samplers[stage]);
2963
if (result != VK_SUCCESS) {
2964
anv_batch_set_error(&cmd_buffer->batch, result);
2965
return 0;
2966
}
2967
result = emit_binding_table(cmd_buffer, pipe_state, shaders[i],
2968
&cmd_buffer->state.binding_tables[stage]);
2969
if (result != VK_SUCCESS) {
2970
anv_batch_set_error(&cmd_buffer->batch, result);
2971
return 0;
2972
}
2973
2974
flushed |= mesa_to_vk_shader_stage(stage);
2975
}
2976
}
2977
2978
return flushed;
2979
}
2980
2981
static void
2982
cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer *cmd_buffer,
2983
uint32_t stages)
2984
{
2985
static const uint32_t sampler_state_opcodes[] = {
2986
[MESA_SHADER_VERTEX] = 43,
2987
[MESA_SHADER_TESS_CTRL] = 44, /* HS */
2988
[MESA_SHADER_TESS_EVAL] = 45, /* DS */
2989
[MESA_SHADER_GEOMETRY] = 46,
2990
[MESA_SHADER_FRAGMENT] = 47,
2991
[MESA_SHADER_COMPUTE] = 0,
2992
};
2993
2994
static const uint32_t binding_table_opcodes[] = {
2995
[MESA_SHADER_VERTEX] = 38,
2996
[MESA_SHADER_TESS_CTRL] = 39,
2997
[MESA_SHADER_TESS_EVAL] = 40,
2998
[MESA_SHADER_GEOMETRY] = 41,
2999
[MESA_SHADER_FRAGMENT] = 42,
3000
[MESA_SHADER_COMPUTE] = 0,
3001
};
3002
3003
anv_foreach_stage(s, stages) {
3004
assert(s < ARRAY_SIZE(binding_table_opcodes));
3005
assert(binding_table_opcodes[s] > 0);
3006
3007
if (cmd_buffer->state.samplers[s].alloc_size > 0) {
3008
anv_batch_emit(&cmd_buffer->batch,
3009
GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ssp) {
3010
ssp._3DCommandSubOpcode = sampler_state_opcodes[s];
3011
ssp.PointertoVSSamplerState = cmd_buffer->state.samplers[s].offset;
3012
}
3013
}
3014
3015
/* Always emit binding table pointers if we're asked to, since on SKL
3016
* this is what flushes push constants. */
3017
anv_batch_emit(&cmd_buffer->batch,
3018
GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), btp) {
3019
btp._3DCommandSubOpcode = binding_table_opcodes[s];
3020
btp.PointertoVSBindingTable = cmd_buffer->state.binding_tables[s].offset;
3021
}
3022
}
3023
}
3024
3025
static struct anv_address
3026
get_push_range_address(struct anv_cmd_buffer *cmd_buffer,
3027
const struct anv_shader_bin *shader,
3028
const struct anv_push_range *range)
3029
{
3030
struct anv_cmd_graphics_state *gfx_state = &cmd_buffer->state.gfx;
3031
switch (range->set) {
3032
case ANV_DESCRIPTOR_SET_DESCRIPTORS: {
3033
/* This is a descriptor set buffer so the set index is
3034
* actually given by binding->binding. (Yes, that's
3035
* confusing.)
3036
*/
3037
struct anv_descriptor_set *set =
3038
gfx_state->base.descriptors[range->index];
3039
return anv_descriptor_set_address(set);
3040
}
3041
3042
case ANV_DESCRIPTOR_SET_PUSH_CONSTANTS: {
3043
if (gfx_state->base.push_constants_state.alloc_size == 0) {
3044
gfx_state->base.push_constants_state =
3045
anv_cmd_buffer_gfx_push_constants(cmd_buffer);
3046
}
3047
return (struct anv_address) {
3048
.bo = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
3049
.offset = gfx_state->base.push_constants_state.offset,
3050
};
3051
}
3052
3053
case ANV_DESCRIPTOR_SET_SHADER_CONSTANTS:
3054
return (struct anv_address) {
3055
.bo = cmd_buffer->device->instruction_state_pool.block_pool.bo,
3056
.offset = shader->kernel.offset +
3057
shader->prog_data->const_data_offset,
3058
};
3059
3060
default: {
3061
assert(range->set < MAX_SETS);
3062
struct anv_descriptor_set *set =
3063
gfx_state->base.descriptors[range->set];
3064
const struct anv_descriptor *desc =
3065
&set->descriptors[range->index];
3066
3067
if (desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
3068
if (desc->buffer_view)
3069
return desc->buffer_view->address;
3070
} else {
3071
assert(desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
3072
if (desc->buffer) {
3073
const struct anv_push_constants *push =
3074
&gfx_state->base.push_constants;
3075
uint32_t dynamic_offset =
3076
push->dynamic_offsets[range->dynamic_offset_index];
3077
return anv_address_add(desc->buffer->address,
3078
desc->offset + dynamic_offset);
3079
}
3080
}
3081
3082
/* For NULL UBOs, we just return an address in the workaround BO. We do
3083
* writes to it for workarounds but always at the bottom. The higher
3084
* bytes should be all zeros.
3085
*/
3086
assert(range->length * 32 <= 2048);
3087
return (struct anv_address) {
3088
.bo = cmd_buffer->device->workaround_bo,
3089
.offset = 1024,
3090
};
3091
}
3092
}
3093
}
3094
3095
3096
/** Returns the size in bytes of the bound buffer
3097
*
3098
* The range is relative to the start of the buffer, not the start of the
3099
* range. The returned range may be smaller than
3100
*
3101
* (range->start + range->length) * 32;
3102
*/
3103
static uint32_t
3104
get_push_range_bound_size(struct anv_cmd_buffer *cmd_buffer,
3105
const struct anv_shader_bin *shader,
3106
const struct anv_push_range *range)
3107
{
3108
assert(shader->stage != MESA_SHADER_COMPUTE);
3109
const struct anv_cmd_graphics_state *gfx_state = &cmd_buffer->state.gfx;
3110
switch (range->set) {
3111
case ANV_DESCRIPTOR_SET_DESCRIPTORS: {
3112
struct anv_descriptor_set *set =
3113
gfx_state->base.descriptors[range->index];
3114
assert(range->start * 32 < set->desc_mem.alloc_size);
3115
assert((range->start + range->length) * 32 <= set->desc_mem.alloc_size);
3116
return set->desc_mem.alloc_size;
3117
}
3118
3119
case ANV_DESCRIPTOR_SET_PUSH_CONSTANTS:
3120
return (range->start + range->length) * 32;
3121
3122
case ANV_DESCRIPTOR_SET_SHADER_CONSTANTS:
3123
return ALIGN(shader->prog_data->const_data_size, ANV_UBO_ALIGNMENT);
3124
3125
default: {
3126
assert(range->set < MAX_SETS);
3127
struct anv_descriptor_set *set =
3128
gfx_state->base.descriptors[range->set];
3129
const struct anv_descriptor *desc =
3130
&set->descriptors[range->index];
3131
3132
if (desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
3133
if (!desc->buffer_view)
3134
return 0;
3135
3136
if (range->start * 32 > desc->buffer_view->range)
3137
return 0;
3138
3139
return desc->buffer_view->range;
3140
} else {
3141
if (!desc->buffer)
3142
return 0;
3143
3144
assert(desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
3145
/* Compute the offset within the buffer */
3146
const struct anv_push_constants *push =
3147
&gfx_state->base.push_constants;
3148
uint32_t dynamic_offset =
3149
push->dynamic_offsets[range->dynamic_offset_index];
3150
uint64_t offset = desc->offset + dynamic_offset;
3151
/* Clamp to the buffer size */
3152
offset = MIN2(offset, desc->buffer->size);
3153
/* Clamp the range to the buffer size */
3154
uint32_t bound_range = MIN2(desc->range, desc->buffer->size - offset);
3155
3156
/* Align the range for consistency */
3157
bound_range = align_u32(bound_range, ANV_UBO_ALIGNMENT);
3158
3159
return bound_range;
3160
}
3161
}
3162
}
3163
}
3164
3165
static void
3166
cmd_buffer_emit_push_constant(struct anv_cmd_buffer *cmd_buffer,
3167
gl_shader_stage stage,
3168
struct anv_address *buffers,
3169
unsigned buffer_count)
3170
{
3171
const struct anv_cmd_graphics_state *gfx_state = &cmd_buffer->state.gfx;
3172
const struct anv_graphics_pipeline *pipeline = gfx_state->pipeline;
3173
3174
static const uint32_t push_constant_opcodes[] = {
3175
[MESA_SHADER_VERTEX] = 21,
3176
[MESA_SHADER_TESS_CTRL] = 25, /* HS */
3177
[MESA_SHADER_TESS_EVAL] = 26, /* DS */
3178
[MESA_SHADER_GEOMETRY] = 22,
3179
[MESA_SHADER_FRAGMENT] = 23,
3180
[MESA_SHADER_COMPUTE] = 0,
3181
};
3182
3183
assert(stage < ARRAY_SIZE(push_constant_opcodes));
3184
assert(push_constant_opcodes[stage] > 0);
3185
3186
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) {
3187
c._3DCommandSubOpcode = push_constant_opcodes[stage];
3188
3189
if (anv_pipeline_has_stage(pipeline, stage)) {
3190
const struct anv_pipeline_bind_map *bind_map =
3191
&pipeline->shaders[stage]->bind_map;
3192
3193
#if GFX_VER >= 9
3194
/* This field exists since Gfx8. However, the Broadwell PRM says:
3195
*
3196
* "Constant Buffer Object Control State must be always programmed
3197
* to zero."
3198
*
3199
* This restriction does not exist on any newer platforms.
3200
*
3201
* We only have one MOCS field for the whole packet, not one per
3202
* buffer. We could go out of our way here to walk over all of the
3203
* buffers and see if any of them are used externally and use the
3204
* external MOCS. However, the notion that someone would use the
3205
* same bit of memory for both scanout and a UBO is nuts. Let's not
3206
* bother and assume it's all internal.
3207
*/
3208
c.MOCS = isl_mocs(&cmd_buffer->device->isl_dev, 0, false);
3209
#endif
3210
3211
#if GFX_VERx10 >= 75
3212
/* The Skylake PRM contains the following restriction:
3213
*
3214
* "The driver must ensure The following case does not occur
3215
* without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
3216
* buffer 3 read length equal to zero committed followed by a
3217
* 3DSTATE_CONSTANT_* with buffer 0 read length not equal to
3218
* zero committed."
3219
*
3220
* To avoid this, we program the buffers in the highest slots.
3221
* This way, slot 0 is only used if slot 3 is also used.
3222
*/
3223
assert(buffer_count <= 4);
3224
const unsigned shift = 4 - buffer_count;
3225
for (unsigned i = 0; i < buffer_count; i++) {
3226
const struct anv_push_range *range = &bind_map->push_ranges[i];
3227
3228
/* At this point we only have non-empty ranges */
3229
assert(range->length > 0);
3230
3231
/* For Ivy Bridge, make sure we only set the first range (actual
3232
* push constants)
3233
*/
3234
assert((GFX_VERx10 >= 75) || i == 0);
3235
3236
c.ConstantBody.ReadLength[i + shift] = range->length;
3237
c.ConstantBody.Buffer[i + shift] =
3238
anv_address_add(buffers[i], range->start * 32);
3239
}
3240
#else
3241
/* For Ivy Bridge, push constants are relative to dynamic state
3242
* base address and we only ever push actual push constants.
3243
*/
3244
if (bind_map->push_ranges[0].length > 0) {
3245
assert(buffer_count == 1);
3246
assert(bind_map->push_ranges[0].set ==
3247
ANV_DESCRIPTOR_SET_PUSH_CONSTANTS);
3248
assert(buffers[0].bo ==
3249
cmd_buffer->device->dynamic_state_pool.block_pool.bo);
3250
c.ConstantBody.ReadLength[0] = bind_map->push_ranges[0].length;
3251
c.ConstantBody.Buffer[0].bo = NULL;
3252
c.ConstantBody.Buffer[0].offset = buffers[0].offset;
3253
}
3254
assert(bind_map->push_ranges[1].length == 0);
3255
assert(bind_map->push_ranges[2].length == 0);
3256
assert(bind_map->push_ranges[3].length == 0);
3257
#endif
3258
}
3259
}
3260
}
3261
3262
#if GFX_VER >= 12
3263
static void
3264
cmd_buffer_emit_push_constant_all(struct anv_cmd_buffer *cmd_buffer,
3265
uint32_t shader_mask,
3266
struct anv_address *buffers,
3267
uint32_t buffer_count)
3268
{
3269
if (buffer_count == 0) {
3270
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_ALL), c) {
3271
c.ShaderUpdateEnable = shader_mask;
3272
c.MOCS = isl_mocs(&cmd_buffer->device->isl_dev, 0, false);
3273
}
3274
return;
3275
}
3276
3277
const struct anv_cmd_graphics_state *gfx_state = &cmd_buffer->state.gfx;
3278
const struct anv_graphics_pipeline *pipeline = gfx_state->pipeline;
3279
3280
static const UNUSED uint32_t push_constant_opcodes[] = {
3281
[MESA_SHADER_VERTEX] = 21,
3282
[MESA_SHADER_TESS_CTRL] = 25, /* HS */
3283
[MESA_SHADER_TESS_EVAL] = 26, /* DS */
3284
[MESA_SHADER_GEOMETRY] = 22,
3285
[MESA_SHADER_FRAGMENT] = 23,
3286
[MESA_SHADER_COMPUTE] = 0,
3287
};
3288
3289
gl_shader_stage stage = vk_to_mesa_shader_stage(shader_mask);
3290
assert(stage < ARRAY_SIZE(push_constant_opcodes));
3291
assert(push_constant_opcodes[stage] > 0);
3292
3293
const struct anv_pipeline_bind_map *bind_map =
3294
&pipeline->shaders[stage]->bind_map;
3295
3296
uint32_t *dw;
3297
const uint32_t buffer_mask = (1 << buffer_count) - 1;
3298
const uint32_t num_dwords = 2 + 2 * buffer_count;
3299
3300
dw = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
3301
GENX(3DSTATE_CONSTANT_ALL),
3302
.ShaderUpdateEnable = shader_mask,
3303
.PointerBufferMask = buffer_mask,
3304
.MOCS = isl_mocs(&cmd_buffer->device->isl_dev, 0, false));
3305
3306
for (int i = 0; i < buffer_count; i++) {
3307
const struct anv_push_range *range = &bind_map->push_ranges[i];
3308
GENX(3DSTATE_CONSTANT_ALL_DATA_pack)(
3309
&cmd_buffer->batch, dw + 2 + i * 2,
3310
&(struct GENX(3DSTATE_CONSTANT_ALL_DATA)) {
3311
.PointerToConstantBuffer =
3312
anv_address_add(buffers[i], range->start * 32),
3313
.ConstantBufferReadLength = range->length,
3314
});
3315
}
3316
}
3317
#endif
3318
3319
static void
3320
cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer,
3321
VkShaderStageFlags dirty_stages)
3322
{
3323
VkShaderStageFlags flushed = 0;
3324
struct anv_cmd_graphics_state *gfx_state = &cmd_buffer->state.gfx;
3325
const struct anv_graphics_pipeline *pipeline = gfx_state->pipeline;
3326
3327
#if GFX_VER >= 12
3328
uint32_t nobuffer_stages = 0;
3329
#endif
3330
3331
/* Compute robust pushed register access mask for each stage. */
3332
if (cmd_buffer->device->robust_buffer_access) {
3333
anv_foreach_stage(stage, dirty_stages) {
3334
if (!anv_pipeline_has_stage(pipeline, stage))
3335
continue;
3336
3337
const struct anv_shader_bin *shader = pipeline->shaders[stage];
3338
const struct anv_pipeline_bind_map *bind_map = &shader->bind_map;
3339
struct anv_push_constants *push = &gfx_state->base.push_constants;
3340
3341
push->push_reg_mask[stage] = 0;
3342
/* Start of the current range in the shader, relative to the start of
3343
* push constants in the shader.
3344
*/
3345
unsigned range_start_reg = 0;
3346
for (unsigned i = 0; i < 4; i++) {
3347
const struct anv_push_range *range = &bind_map->push_ranges[i];
3348
if (range->length == 0)
3349
continue;
3350
3351
unsigned bound_size =
3352
get_push_range_bound_size(cmd_buffer, shader, range);
3353
if (bound_size >= range->start * 32) {
3354
unsigned bound_regs =
3355
MIN2(DIV_ROUND_UP(bound_size, 32) - range->start,
3356
range->length);
3357
assert(range_start_reg + bound_regs <= 64);
3358
push->push_reg_mask[stage] |= BITFIELD64_RANGE(range_start_reg,
3359
bound_regs);
3360
}
3361
3362
cmd_buffer->state.push_constants_dirty |=
3363
mesa_to_vk_shader_stage(stage);
3364
3365
range_start_reg += range->length;
3366
}
3367
}
3368
}
3369
3370
/* Resets the push constant state so that we allocate a new one if
3371
* needed.
3372
*/
3373
gfx_state->base.push_constants_state = ANV_STATE_NULL;
3374
3375
anv_foreach_stage(stage, dirty_stages) {
3376
unsigned buffer_count = 0;
3377
flushed |= mesa_to_vk_shader_stage(stage);
3378
UNUSED uint32_t max_push_range = 0;
3379
3380
struct anv_address buffers[4] = {};
3381
if (anv_pipeline_has_stage(pipeline, stage)) {
3382
const struct anv_shader_bin *shader = pipeline->shaders[stage];
3383
const struct anv_pipeline_bind_map *bind_map = &shader->bind_map;
3384
3385
/* We have to gather buffer addresses as a second step because the
3386
* loop above puts data into the push constant area and the call to
3387
* get_push_range_address is what locks our push constants and copies
3388
* them into the actual GPU buffer. If we did the two loops at the
3389
* same time, we'd risk only having some of the sizes in the push
3390
* constant buffer when we did the copy.
3391
*/
3392
for (unsigned i = 0; i < 4; i++) {
3393
const struct anv_push_range *range = &bind_map->push_ranges[i];
3394
if (range->length == 0)
3395
break;
3396
3397
buffers[i] = get_push_range_address(cmd_buffer, shader, range);
3398
max_push_range = MAX2(max_push_range, range->length);
3399
buffer_count++;
3400
}
3401
3402
/* We have at most 4 buffers but they should be tightly packed */
3403
for (unsigned i = buffer_count; i < 4; i++)
3404
assert(bind_map->push_ranges[i].length == 0);
3405
}
3406
3407
#if GFX_VER >= 12
3408
/* If this stage doesn't have any push constants, emit it later in a
3409
* single CONSTANT_ALL packet.
3410
*/
3411
if (buffer_count == 0) {
3412
nobuffer_stages |= 1 << stage;
3413
continue;
3414
}
3415
3416
/* The Constant Buffer Read Length field from 3DSTATE_CONSTANT_ALL
3417
* contains only 5 bits, so we can only use it for buffers smaller than
3418
* 32.
3419
*/
3420
if (max_push_range < 32) {
3421
cmd_buffer_emit_push_constant_all(cmd_buffer, 1 << stage,
3422
buffers, buffer_count);
3423
continue;
3424
}
3425
#endif
3426
3427
cmd_buffer_emit_push_constant(cmd_buffer, stage, buffers, buffer_count);
3428
}
3429
3430
#if GFX_VER >= 12
3431
if (nobuffer_stages)
3432
cmd_buffer_emit_push_constant_all(cmd_buffer, nobuffer_stages, NULL, 0);
3433
#endif
3434
3435
cmd_buffer->state.push_constants_dirty &= ~flushed;
3436
}
3437
3438
static void
3439
cmd_buffer_emit_clip(struct anv_cmd_buffer *cmd_buffer)
3440
{
3441
const uint32_t clip_states =
3442
#if GFX_VER <= 7
3443
ANV_CMD_DIRTY_DYNAMIC_FRONT_FACE |
3444
ANV_CMD_DIRTY_DYNAMIC_CULL_MODE |
3445
#endif
3446
ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY |
3447
ANV_CMD_DIRTY_DYNAMIC_VIEWPORT |
3448
ANV_CMD_DIRTY_PIPELINE;
3449
3450
if ((cmd_buffer->state.gfx.dirty & clip_states) == 0)
3451
return;
3452
3453
/* Take dynamic primitive topology in to account with
3454
* 3DSTATE_CLIP::ViewportXYClipTestEnable
3455
*/
3456
bool xy_clip_test_enable = 0;
3457
3458
if (cmd_buffer->state.gfx.pipeline->dynamic_states &
3459
ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY) {
3460
VkPrimitiveTopology primitive_topology =
3461
cmd_buffer->state.gfx.dynamic.primitive_topology;
3462
3463
VkPolygonMode dynamic_raster_mode =
3464
genX(raster_polygon_mode)(cmd_buffer->state.gfx.pipeline,
3465
primitive_topology);
3466
3467
xy_clip_test_enable = (dynamic_raster_mode == VK_POLYGON_MODE_FILL);
3468
}
3469
3470
#if GFX_VER <= 7
3471
const struct anv_dynamic_state *d = &cmd_buffer->state.gfx.dynamic;
3472
#endif
3473
struct GENX(3DSTATE_CLIP) clip = {
3474
GENX(3DSTATE_CLIP_header),
3475
#if GFX_VER <= 7
3476
.FrontWinding = genX(vk_to_intel_front_face)[d->front_face],
3477
.CullMode = genX(vk_to_intel_cullmode)[d->cull_mode],
3478
#endif
3479
.ViewportXYClipTestEnable = xy_clip_test_enable,
3480
};
3481
uint32_t dwords[GENX(3DSTATE_CLIP_length)];
3482
3483
struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
3484
const struct brw_vue_prog_data *last =
3485
anv_pipeline_get_last_vue_prog_data(pipeline);
3486
if (last->vue_map.slots_valid & VARYING_BIT_VIEWPORT) {
3487
clip.MaximumVPIndex =
3488
cmd_buffer->state.gfx.dynamic.viewport.count > 0 ?
3489
cmd_buffer->state.gfx.dynamic.viewport.count - 1 : 0;
3490
}
3491
3492
GENX(3DSTATE_CLIP_pack)(NULL, dwords, &clip);
3493
anv_batch_emit_merge(&cmd_buffer->batch, dwords,
3494
pipeline->gfx7.clip);
3495
}
3496
3497
static void
3498
cmd_buffer_emit_streamout(struct anv_cmd_buffer *cmd_buffer)
3499
{
3500
const struct anv_dynamic_state *d = &cmd_buffer->state.gfx.dynamic;
3501
struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
3502
3503
#if GFX_VER == 7
3504
# define streamout_state_dw pipeline->gfx7.streamout_state
3505
#else
3506
# define streamout_state_dw pipeline->gfx8.streamout_state
3507
#endif
3508
3509
uint32_t dwords[GENX(3DSTATE_STREAMOUT_length)];
3510
3511
struct GENX(3DSTATE_STREAMOUT) so = {
3512
GENX(3DSTATE_STREAMOUT_header),
3513
.RenderingDisable = d->raster_discard,
3514
};
3515
GENX(3DSTATE_STREAMOUT_pack)(NULL, dwords, &so);
3516
anv_batch_emit_merge(&cmd_buffer->batch, dwords, streamout_state_dw);
3517
}
3518
3519
void
3520
genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
3521
{
3522
struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
3523
uint32_t *p;
3524
3525
assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
3526
3527
genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->base.l3_config);
3528
3529
genX(cmd_buffer_emit_hashing_mode)(cmd_buffer, UINT_MAX, UINT_MAX, 1);
3530
3531
genX(flush_pipeline_select_3d)(cmd_buffer);
3532
3533
/* Apply any pending pipeline flushes we may have. We want to apply them
3534
* now because, if any of those flushes are for things like push constants,
3535
* the GPU will read the state at weird times.
3536
*/
3537
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
3538
3539
uint32_t vb_emit = cmd_buffer->state.gfx.vb_dirty & pipeline->vb_used;
3540
if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_PIPELINE)
3541
vb_emit |= pipeline->vb_used;
3542
3543
if (vb_emit) {
3544
const uint32_t num_buffers = __builtin_popcount(vb_emit);
3545
const uint32_t num_dwords = 1 + num_buffers * 4;
3546
3547
p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
3548
GENX(3DSTATE_VERTEX_BUFFERS));
3549
uint32_t i = 0;
3550
u_foreach_bit(vb, vb_emit) {
3551
struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
3552
uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset;
3553
3554
/* If dynamic, use stride/size from vertex binding, otherwise use
3555
* stride/size that was setup in the pipeline object.
3556
*/
3557
bool dynamic_stride = cmd_buffer->state.gfx.dynamic.dyn_vbo_stride;
3558
bool dynamic_size = cmd_buffer->state.gfx.dynamic.dyn_vbo_size;
3559
3560
struct GENX(VERTEX_BUFFER_STATE) state;
3561
if (buffer) {
3562
uint32_t stride = dynamic_stride ?
3563
cmd_buffer->state.vertex_bindings[vb].stride : pipeline->vb[vb].stride;
3564
/* From the Vulkan spec (vkCmdBindVertexBuffers2EXT):
3565
*
3566
* "If pname:pSizes is not NULL then pname:pSizes[i] specifies
3567
* the bound size of the vertex buffer starting from the corresponding
3568
* elements of pname:pBuffers[i] plus pname:pOffsets[i]."
3569
*/
3570
UNUSED uint32_t size = dynamic_size ?
3571
cmd_buffer->state.vertex_bindings[vb].size : buffer->size - offset;
3572
3573
state = (struct GENX(VERTEX_BUFFER_STATE)) {
3574
.VertexBufferIndex = vb,
3575
3576
.MOCS = anv_mocs(cmd_buffer->device, buffer->address.bo,
3577
ISL_SURF_USAGE_VERTEX_BUFFER_BIT),
3578
#if GFX_VER <= 7
3579
.BufferAccessType = pipeline->vb[vb].instanced ? INSTANCEDATA : VERTEXDATA,
3580
.InstanceDataStepRate = pipeline->vb[vb].instance_divisor,
3581
#endif
3582
.AddressModifyEnable = true,
3583
.BufferPitch = stride,
3584
.BufferStartingAddress = anv_address_add(buffer->address, offset),
3585
.NullVertexBuffer = offset >= buffer->size,
3586
#if GFX_VER >= 12
3587
.L3BypassDisable = true,
3588
#endif
3589
3590
#if GFX_VER >= 8
3591
.BufferSize = size,
3592
#else
3593
/* XXX: to handle dynamic offset for older gens we might want
3594
* to modify Endaddress, but there are issues when doing so:
3595
*
3596
* https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/7439
3597
*/
3598
.EndAddress = anv_address_add(buffer->address, buffer->size - 1),
3599
#endif
3600
};
3601
} else {
3602
state = (struct GENX(VERTEX_BUFFER_STATE)) {
3603
.VertexBufferIndex = vb,
3604
.NullVertexBuffer = true,
3605
};
3606
}
3607
3608
#if GFX_VER >= 8 && GFX_VER <= 9
3609
genX(cmd_buffer_set_binding_for_gfx8_vb_flush)(cmd_buffer, vb,
3610
state.BufferStartingAddress,
3611
state.BufferSize);
3612
#endif
3613
3614
GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, &p[1 + i * 4], &state);
3615
i++;
3616
}
3617
}
3618
3619
cmd_buffer->state.gfx.vb_dirty &= ~vb_emit;
3620
3621
uint32_t descriptors_dirty = cmd_buffer->state.descriptors_dirty &
3622
pipeline->active_stages;
3623
if (!cmd_buffer->state.gfx.dirty && !descriptors_dirty &&
3624
!cmd_buffer->state.push_constants_dirty)
3625
return;
3626
3627
if ((cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_XFB_ENABLE) ||
3628
(GFX_VER == 7 && (cmd_buffer->state.gfx.dirty &
3629
ANV_CMD_DIRTY_PIPELINE))) {
3630
/* We don't need any per-buffer dirty tracking because you're not
3631
* allowed to bind different XFB buffers while XFB is enabled.
3632
*/
3633
for (unsigned idx = 0; idx < MAX_XFB_BUFFERS; idx++) {
3634
struct anv_xfb_binding *xfb = &cmd_buffer->state.xfb_bindings[idx];
3635
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_SO_BUFFER), sob) {
3636
#if GFX_VER < 12
3637
sob.SOBufferIndex = idx;
3638
#else
3639
sob._3DCommandOpcode = 0;
3640
sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + idx;
3641
#endif
3642
3643
if (cmd_buffer->state.xfb_enabled && xfb->buffer && xfb->size != 0) {
3644
sob.MOCS = anv_mocs(cmd_buffer->device, xfb->buffer->address.bo, 0);
3645
sob.SurfaceBaseAddress = anv_address_add(xfb->buffer->address,
3646
xfb->offset);
3647
#if GFX_VER >= 8
3648
sob.SOBufferEnable = true;
3649
sob.StreamOffsetWriteEnable = false;
3650
/* Size is in DWords - 1 */
3651
sob.SurfaceSize = DIV_ROUND_UP(xfb->size, 4) - 1;
3652
#else
3653
/* We don't have SOBufferEnable in 3DSTATE_SO_BUFFER on Gfx7 so
3654
* we trust in SurfaceEndAddress = SurfaceBaseAddress = 0 (the
3655
* default for an empty SO_BUFFER packet) to disable them.
3656
*/
3657
sob.SurfacePitch = pipeline->gfx7.xfb_bo_pitch[idx];
3658
sob.SurfaceEndAddress = anv_address_add(xfb->buffer->address,
3659
xfb->offset + xfb->size);
3660
#endif
3661
}
3662
}
3663
}
3664
3665
/* CNL and later require a CS stall after 3DSTATE_SO_BUFFER */
3666
if (GFX_VER >= 10) {
3667
anv_add_pending_pipe_bits(cmd_buffer,
3668
ANV_PIPE_CS_STALL_BIT,
3669
"after 3DSTATE_SO_BUFFER call");
3670
}
3671
}
3672
3673
if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_PIPELINE) {
3674
anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->base.batch);
3675
3676
/* Remove from dynamic state emission all of stuff that is baked into
3677
* the pipeline.
3678
*/
3679
cmd_buffer->state.gfx.dirty &= ~pipeline->static_state_mask;
3680
3681
/* If the pipeline changed, we may need to re-allocate push constant
3682
* space in the URB.
3683
*/
3684
cmd_buffer_alloc_push_constants(cmd_buffer);
3685
}
3686
3687
if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_PIPELINE)
3688
cmd_buffer->state.gfx.primitive_topology = pipeline->topology;
3689
3690
#if GFX_VER <= 7
3691
if (cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_VERTEX_BIT ||
3692
cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_VERTEX_BIT) {
3693
/* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
3694
*
3695
* "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
3696
* stall needs to be sent just prior to any 3DSTATE_VS,
3697
* 3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
3698
* 3DSTATE_BINDING_TABLE_POINTER_VS,
3699
* 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one
3700
* PIPE_CONTROL needs to be sent before any combination of VS
3701
* associated 3DSTATE."
3702
*/
3703
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
3704
pc.DepthStallEnable = true;
3705
pc.PostSyncOperation = WriteImmediateData;
3706
pc.Address = cmd_buffer->device->workaround_address;
3707
anv_debug_dump_pc(pc);
3708
}
3709
}
3710
#endif
3711
3712
/* Render targets live in the same binding table as fragment descriptors */
3713
if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_RENDER_TARGETS)
3714
descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
3715
3716
/* We emit the binding tables and sampler tables first, then emit push
3717
* constants and then finally emit binding table and sampler table
3718
* pointers. It has to happen in this order, since emitting the binding
3719
* tables may change the push constants (in case of storage images). After
3720
* emitting push constants, on SKL+ we have to emit the corresponding
3721
* 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
3722
*/
3723
uint32_t dirty = 0;
3724
if (descriptors_dirty) {
3725
dirty = flush_descriptor_sets(cmd_buffer,
3726
&cmd_buffer->state.gfx.base,
3727
descriptors_dirty,
3728
pipeline->shaders,
3729
ARRAY_SIZE(pipeline->shaders));
3730
cmd_buffer->state.descriptors_dirty &= ~dirty;
3731
}
3732
3733
if (dirty || cmd_buffer->state.push_constants_dirty) {
3734
/* Because we're pushing UBOs, we have to push whenever either
3735
* descriptors or push constants is dirty.
3736
*/
3737
dirty |= cmd_buffer->state.push_constants_dirty;
3738
dirty &= ANV_STAGE_MASK & VK_SHADER_STAGE_ALL_GRAPHICS;
3739
cmd_buffer_flush_push_constants(cmd_buffer, dirty);
3740
}
3741
3742
if (dirty)
3743
cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty);
3744
3745
cmd_buffer_emit_clip(cmd_buffer);
3746
3747
if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_RASTERIZER_DISCARD_ENABLE)
3748
cmd_buffer_emit_streamout(cmd_buffer);
3749
3750
if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
3751
gfx8_cmd_buffer_emit_viewport(cmd_buffer);
3752
3753
if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_DYNAMIC_VIEWPORT |
3754
ANV_CMD_DIRTY_PIPELINE)) {
3755
gfx8_cmd_buffer_emit_depth_viewport(cmd_buffer,
3756
pipeline->depth_clamp_enable);
3757
}
3758
3759
if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_DYNAMIC_SCISSOR |
3760
ANV_CMD_DIRTY_RENDER_TARGETS))
3761
gfx7_cmd_buffer_emit_scissor(cmd_buffer);
3762
3763
genX(cmd_buffer_flush_dynamic_state)(cmd_buffer);
3764
}
3765
3766
static void
3767
emit_vertex_bo(struct anv_cmd_buffer *cmd_buffer,
3768
struct anv_address addr,
3769
uint32_t size, uint32_t index)
3770
{
3771
uint32_t *p = anv_batch_emitn(&cmd_buffer->batch, 5,
3772
GENX(3DSTATE_VERTEX_BUFFERS));
3773
3774
GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, p + 1,
3775
&(struct GENX(VERTEX_BUFFER_STATE)) {
3776
.VertexBufferIndex = index,
3777
.AddressModifyEnable = true,
3778
.BufferPitch = 0,
3779
.MOCS = addr.bo ? anv_mocs(cmd_buffer->device, addr.bo,
3780
ISL_SURF_USAGE_VERTEX_BUFFER_BIT) : 0,
3781
.NullVertexBuffer = size == 0,
3782
#if GFX_VER >= 12
3783
.L3BypassDisable = true,
3784
#endif
3785
#if (GFX_VER >= 8)
3786
.BufferStartingAddress = addr,
3787
.BufferSize = size
3788
#else
3789
.BufferStartingAddress = addr,
3790
.EndAddress = anv_address_add(addr, size),
3791
#endif
3792
});
3793
3794
genX(cmd_buffer_set_binding_for_gfx8_vb_flush)(cmd_buffer,
3795
index, addr, size);
3796
}
3797
3798
static void
3799
emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer,
3800
struct anv_address addr)
3801
{
3802
emit_vertex_bo(cmd_buffer, addr, addr.bo ? 8 : 0, ANV_SVGS_VB_INDEX);
3803
}
3804
3805
static void
3806
emit_base_vertex_instance(struct anv_cmd_buffer *cmd_buffer,
3807
uint32_t base_vertex, uint32_t base_instance)
3808
{
3809
if (base_vertex == 0 && base_instance == 0) {
3810
emit_base_vertex_instance_bo(cmd_buffer, ANV_NULL_ADDRESS);
3811
} else {
3812
struct anv_state id_state =
3813
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 8, 4);
3814
3815
((uint32_t *)id_state.map)[0] = base_vertex;
3816
((uint32_t *)id_state.map)[1] = base_instance;
3817
3818
struct anv_address addr = {
3819
.bo = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
3820
.offset = id_state.offset,
3821
};
3822
3823
emit_base_vertex_instance_bo(cmd_buffer, addr);
3824
}
3825
}
3826
3827
static void
3828
emit_draw_index(struct anv_cmd_buffer *cmd_buffer, uint32_t draw_index)
3829
{
3830
struct anv_state state =
3831
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 4, 4);
3832
3833
((uint32_t *)state.map)[0] = draw_index;
3834
3835
struct anv_address addr = {
3836
.bo = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
3837
.offset = state.offset,
3838
};
3839
3840
emit_vertex_bo(cmd_buffer, addr, 4, ANV_DRAWID_VB_INDEX);
3841
}
3842
3843
static void
3844
update_dirty_vbs_for_gfx8_vb_flush(struct anv_cmd_buffer *cmd_buffer,
3845
uint32_t access_type)
3846
{
3847
struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
3848
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
3849
3850
uint64_t vb_used = pipeline->vb_used;
3851
if (vs_prog_data->uses_firstvertex ||
3852
vs_prog_data->uses_baseinstance)
3853
vb_used |= 1ull << ANV_SVGS_VB_INDEX;
3854
if (vs_prog_data->uses_drawid)
3855
vb_used |= 1ull << ANV_DRAWID_VB_INDEX;
3856
3857
genX(cmd_buffer_update_dirty_vbs_for_gfx8_vb_flush)(cmd_buffer,
3858
access_type == RANDOM,
3859
vb_used);
3860
}
3861
3862
ALWAYS_INLINE static void
3863
cmd_buffer_emit_vertex_constants_and_flush(struct anv_cmd_buffer *cmd_buffer,
3864
const struct brw_vs_prog_data *vs_prog_data,
3865
uint32_t base_vertex,
3866
uint32_t base_instance,
3867
uint32_t draw_id,
3868
bool force_flush)
3869
{
3870
bool emitted = false;
3871
if (vs_prog_data->uses_firstvertex ||
3872
vs_prog_data->uses_baseinstance) {
3873
emit_base_vertex_instance(cmd_buffer, base_vertex, base_instance);
3874
emitted = true;
3875
}
3876
if (vs_prog_data->uses_drawid) {
3877
emit_draw_index(cmd_buffer, draw_id);
3878
emitted = true;
3879
}
3880
/* Emitting draw index or vertex index BOs may result in needing
3881
* additional VF cache flushes.
3882
*/
3883
if (emitted || force_flush)
3884
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
3885
}
3886
3887
void genX(CmdDraw)(
3888
VkCommandBuffer commandBuffer,
3889
uint32_t vertexCount,
3890
uint32_t instanceCount,
3891
uint32_t firstVertex,
3892
uint32_t firstInstance)
3893
{
3894
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
3895
struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
3896
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
3897
3898
if (anv_batch_has_error(&cmd_buffer->batch))
3899
return;
3900
3901
const uint32_t count = (vertexCount *
3902
instanceCount *
3903
(pipeline->use_primitive_replication ?
3904
1 : anv_subpass_view_count(cmd_buffer->state.subpass)));
3905
anv_measure_snapshot(cmd_buffer,
3906
INTEL_SNAPSHOT_DRAW,
3907
"draw", count);
3908
3909
genX(cmd_buffer_flush_state)(cmd_buffer);
3910
3911
if (cmd_buffer->state.conditional_render_enabled)
3912
genX(cmd_emit_conditional_render_predicate)(cmd_buffer);
3913
3914
cmd_buffer_emit_vertex_constants_and_flush(cmd_buffer, vs_prog_data,
3915
firstVertex, firstInstance, 0,
3916
true);
3917
3918
/* Our implementation of VK_KHR_multiview uses instancing to draw the
3919
* different views. We need to multiply instanceCount by the view count.
3920
*/
3921
if (!pipeline->use_primitive_replication)
3922
instanceCount *= anv_subpass_view_count(cmd_buffer->state.subpass);
3923
3924
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
3925
prim.PredicateEnable = cmd_buffer->state.conditional_render_enabled;
3926
prim.VertexAccessType = SEQUENTIAL;
3927
prim.PrimitiveTopologyType = cmd_buffer->state.gfx.primitive_topology;
3928
prim.VertexCountPerInstance = vertexCount;
3929
prim.StartVertexLocation = firstVertex;
3930
prim.InstanceCount = instanceCount;
3931
prim.StartInstanceLocation = firstInstance;
3932
prim.BaseVertexLocation = 0;
3933
}
3934
3935
update_dirty_vbs_for_gfx8_vb_flush(cmd_buffer, SEQUENTIAL);
3936
}
3937
3938
void genX(CmdDrawMultiEXT)(
3939
VkCommandBuffer commandBuffer,
3940
uint32_t drawCount,
3941
const VkMultiDrawInfoEXT *pVertexInfo,
3942
uint32_t instanceCount,
3943
uint32_t firstInstance,
3944
uint32_t stride)
3945
{
3946
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
3947
struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
3948
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
3949
3950
if (anv_batch_has_error(&cmd_buffer->batch))
3951
return;
3952
3953
const uint32_t count = (drawCount *
3954
instanceCount *
3955
(pipeline->use_primitive_replication ?
3956
1 : anv_subpass_view_count(cmd_buffer->state.subpass)));
3957
anv_measure_snapshot(cmd_buffer,
3958
INTEL_SNAPSHOT_DRAW,
3959
"draw_multi", count);
3960
3961
genX(cmd_buffer_flush_state)(cmd_buffer);
3962
3963
if (cmd_buffer->state.conditional_render_enabled)
3964
genX(cmd_emit_conditional_render_predicate)(cmd_buffer);
3965
3966
/* Our implementation of VK_KHR_multiview uses instancing to draw the
3967
* different views. We need to multiply instanceCount by the view count.
3968
*/
3969
if (!pipeline->use_primitive_replication)
3970
instanceCount *= anv_subpass_view_count(cmd_buffer->state.subpass);
3971
3972
uint32_t i = 0;
3973
vk_foreach_multi_draw(draw, i, pVertexInfo, drawCount, stride) {
3974
cmd_buffer_emit_vertex_constants_and_flush(cmd_buffer, vs_prog_data,
3975
draw->firstVertex,
3976
firstInstance, i, !i);
3977
3978
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
3979
prim.PredicateEnable = cmd_buffer->state.conditional_render_enabled;
3980
prim.VertexAccessType = SEQUENTIAL;
3981
prim.PrimitiveTopologyType = cmd_buffer->state.gfx.primitive_topology;
3982
prim.VertexCountPerInstance = draw->vertexCount;
3983
prim.StartVertexLocation = draw->firstVertex;
3984
prim.InstanceCount = instanceCount;
3985
prim.StartInstanceLocation = firstInstance;
3986
prim.BaseVertexLocation = 0;
3987
}
3988
}
3989
3990
update_dirty_vbs_for_gfx8_vb_flush(cmd_buffer, SEQUENTIAL);
3991
}
3992
3993
void genX(CmdDrawIndexed)(
3994
VkCommandBuffer commandBuffer,
3995
uint32_t indexCount,
3996
uint32_t instanceCount,
3997
uint32_t firstIndex,
3998
int32_t vertexOffset,
3999
uint32_t firstInstance)
4000
{
4001
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
4002
struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
4003
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
4004
4005
if (anv_batch_has_error(&cmd_buffer->batch))
4006
return;
4007
4008
const uint32_t count = (indexCount *
4009
instanceCount *
4010
(pipeline->use_primitive_replication ?
4011
1 : anv_subpass_view_count(cmd_buffer->state.subpass)));
4012
anv_measure_snapshot(cmd_buffer,
4013
INTEL_SNAPSHOT_DRAW,
4014
"draw indexed",
4015
count);
4016
4017
genX(cmd_buffer_flush_state)(cmd_buffer);
4018
4019
if (cmd_buffer->state.conditional_render_enabled)
4020
genX(cmd_emit_conditional_render_predicate)(cmd_buffer);
4021
4022
cmd_buffer_emit_vertex_constants_and_flush(cmd_buffer, vs_prog_data, vertexOffset, firstInstance, 0, true);
4023
4024
/* Our implementation of VK_KHR_multiview uses instancing to draw the
4025
* different views. We need to multiply instanceCount by the view count.
4026
*/
4027
if (!pipeline->use_primitive_replication)
4028
instanceCount *= anv_subpass_view_count(cmd_buffer->state.subpass);
4029
4030
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
4031
prim.PredicateEnable = cmd_buffer->state.conditional_render_enabled;
4032
prim.VertexAccessType = RANDOM;
4033
prim.PrimitiveTopologyType = cmd_buffer->state.gfx.primitive_topology;
4034
prim.VertexCountPerInstance = indexCount;
4035
prim.StartVertexLocation = firstIndex;
4036
prim.InstanceCount = instanceCount;
4037
prim.StartInstanceLocation = firstInstance;
4038
prim.BaseVertexLocation = vertexOffset;
4039
}
4040
4041
update_dirty_vbs_for_gfx8_vb_flush(cmd_buffer, RANDOM);
4042
}
4043
4044
void genX(CmdDrawMultiIndexedEXT)(
4045
VkCommandBuffer commandBuffer,
4046
uint32_t drawCount,
4047
const VkMultiDrawIndexedInfoEXT *pIndexInfo,
4048
uint32_t instanceCount,
4049
uint32_t firstInstance,
4050
uint32_t stride,
4051
const int32_t *pVertexOffset)
4052
{
4053
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
4054
struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
4055
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
4056
4057
if (anv_batch_has_error(&cmd_buffer->batch))
4058
return;
4059
4060
const uint32_t count = (drawCount *
4061
instanceCount *
4062
(pipeline->use_primitive_replication ?
4063
1 : anv_subpass_view_count(cmd_buffer->state.subpass)));
4064
anv_measure_snapshot(cmd_buffer,
4065
INTEL_SNAPSHOT_DRAW,
4066
"draw indexed_multi",
4067
count);
4068
4069
genX(cmd_buffer_flush_state)(cmd_buffer);
4070
4071
if (cmd_buffer->state.conditional_render_enabled)
4072
genX(cmd_emit_conditional_render_predicate)(cmd_buffer);
4073
4074
/* Our implementation of VK_KHR_multiview uses instancing to draw the
4075
* different views. We need to multiply instanceCount by the view count.
4076
*/
4077
if (!pipeline->use_primitive_replication)
4078
instanceCount *= anv_subpass_view_count(cmd_buffer->state.subpass);
4079
4080
uint32_t i = 0;
4081
if (pVertexOffset) {
4082
if (vs_prog_data->uses_drawid) {
4083
bool emitted = true;
4084
if (vs_prog_data->uses_firstvertex ||
4085
vs_prog_data->uses_baseinstance) {
4086
emit_base_vertex_instance(cmd_buffer, *pVertexOffset, firstInstance);
4087
emitted = true;
4088
}
4089
vk_foreach_multi_draw_indexed(draw, i, pIndexInfo, drawCount, stride) {
4090
if (vs_prog_data->uses_drawid) {
4091
emit_draw_index(cmd_buffer, i);
4092
emitted = true;
4093
}
4094
/* Emitting draw index or vertex index BOs may result in needing
4095
* additional VF cache flushes.
4096
*/
4097
if (emitted)
4098
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4099
4100
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
4101
prim.PredicateEnable = cmd_buffer->state.conditional_render_enabled;
4102
prim.VertexAccessType = RANDOM;
4103
prim.PrimitiveTopologyType = cmd_buffer->state.gfx.primitive_topology;
4104
prim.VertexCountPerInstance = draw->indexCount;
4105
prim.StartVertexLocation = draw->firstIndex;
4106
prim.InstanceCount = instanceCount;
4107
prim.StartInstanceLocation = firstInstance;
4108
prim.BaseVertexLocation = *pVertexOffset;
4109
}
4110
emitted = false;
4111
}
4112
} else {
4113
if (vs_prog_data->uses_firstvertex ||
4114
vs_prog_data->uses_baseinstance) {
4115
emit_base_vertex_instance(cmd_buffer, *pVertexOffset, firstInstance);
4116
/* Emitting draw index or vertex index BOs may result in needing
4117
* additional VF cache flushes.
4118
*/
4119
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4120
}
4121
vk_foreach_multi_draw_indexed(draw, i, pIndexInfo, drawCount, stride) {
4122
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
4123
prim.PredicateEnable = cmd_buffer->state.conditional_render_enabled;
4124
prim.VertexAccessType = RANDOM;
4125
prim.PrimitiveTopologyType = cmd_buffer->state.gfx.primitive_topology;
4126
prim.VertexCountPerInstance = draw->indexCount;
4127
prim.StartVertexLocation = draw->firstIndex;
4128
prim.InstanceCount = instanceCount;
4129
prim.StartInstanceLocation = firstInstance;
4130
prim.BaseVertexLocation = *pVertexOffset;
4131
}
4132
}
4133
}
4134
} else {
4135
vk_foreach_multi_draw_indexed(draw, i, pIndexInfo, drawCount, stride) {
4136
cmd_buffer_emit_vertex_constants_and_flush(cmd_buffer, vs_prog_data,
4137
draw->vertexOffset,
4138
firstInstance, i, i != 0);
4139
4140
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
4141
prim.PredicateEnable = cmd_buffer->state.conditional_render_enabled;
4142
prim.VertexAccessType = RANDOM;
4143
prim.PrimitiveTopologyType = cmd_buffer->state.gfx.primitive_topology;
4144
prim.VertexCountPerInstance = draw->indexCount;
4145
prim.StartVertexLocation = draw->firstIndex;
4146
prim.InstanceCount = instanceCount;
4147
prim.StartInstanceLocation = firstInstance;
4148
prim.BaseVertexLocation = draw->vertexOffset;
4149
}
4150
}
4151
}
4152
4153
update_dirty_vbs_for_gfx8_vb_flush(cmd_buffer, RANDOM);
4154
}
4155
4156
/* Auto-Draw / Indirect Registers */
4157
#define GFX7_3DPRIM_END_OFFSET 0x2420
4158
#define GFX7_3DPRIM_START_VERTEX 0x2430
4159
#define GFX7_3DPRIM_VERTEX_COUNT 0x2434
4160
#define GFX7_3DPRIM_INSTANCE_COUNT 0x2438
4161
#define GFX7_3DPRIM_START_INSTANCE 0x243C
4162
#define GFX7_3DPRIM_BASE_VERTEX 0x2440
4163
4164
void genX(CmdDrawIndirectByteCountEXT)(
4165
VkCommandBuffer commandBuffer,
4166
uint32_t instanceCount,
4167
uint32_t firstInstance,
4168
VkBuffer counterBuffer,
4169
VkDeviceSize counterBufferOffset,
4170
uint32_t counterOffset,
4171
uint32_t vertexStride)
4172
{
4173
#if GFX_VERx10 >= 75
4174
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
4175
ANV_FROM_HANDLE(anv_buffer, counter_buffer, counterBuffer);
4176
struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
4177
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
4178
4179
/* firstVertex is always zero for this draw function */
4180
const uint32_t firstVertex = 0;
4181
4182
if (anv_batch_has_error(&cmd_buffer->batch))
4183
return;
4184
4185
anv_measure_snapshot(cmd_buffer,
4186
INTEL_SNAPSHOT_DRAW,
4187
"draw indirect byte count",
4188
instanceCount);
4189
4190
genX(cmd_buffer_flush_state)(cmd_buffer);
4191
4192
if (vs_prog_data->uses_firstvertex ||
4193
vs_prog_data->uses_baseinstance)
4194
emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance);
4195
if (vs_prog_data->uses_drawid)
4196
emit_draw_index(cmd_buffer, 0);
4197
4198
/* Emitting draw index or vertex index BOs may result in needing
4199
* additional VF cache flushes.
4200
*/
4201
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4202
4203
/* Our implementation of VK_KHR_multiview uses instancing to draw the
4204
* different views. We need to multiply instanceCount by the view count.
4205
*/
4206
if (!pipeline->use_primitive_replication)
4207
instanceCount *= anv_subpass_view_count(cmd_buffer->state.subpass);
4208
4209
struct mi_builder b;
4210
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
4211
struct mi_value count =
4212
mi_mem32(anv_address_add(counter_buffer->address,
4213
counterBufferOffset));
4214
if (counterOffset)
4215
count = mi_isub(&b, count, mi_imm(counterOffset));
4216
count = mi_udiv32_imm(&b, count, vertexStride);
4217
mi_store(&b, mi_reg32(GFX7_3DPRIM_VERTEX_COUNT), count);
4218
4219
mi_store(&b, mi_reg32(GFX7_3DPRIM_START_VERTEX), mi_imm(firstVertex));
4220
mi_store(&b, mi_reg32(GFX7_3DPRIM_INSTANCE_COUNT), mi_imm(instanceCount));
4221
mi_store(&b, mi_reg32(GFX7_3DPRIM_START_INSTANCE), mi_imm(firstInstance));
4222
mi_store(&b, mi_reg32(GFX7_3DPRIM_BASE_VERTEX), mi_imm(0));
4223
4224
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
4225
prim.IndirectParameterEnable = true;
4226
prim.VertexAccessType = SEQUENTIAL;
4227
prim.PrimitiveTopologyType = cmd_buffer->state.gfx.primitive_topology;
4228
}
4229
4230
update_dirty_vbs_for_gfx8_vb_flush(cmd_buffer, SEQUENTIAL);
4231
#endif /* GFX_VERx10 >= 75 */
4232
}
4233
4234
static void
4235
load_indirect_parameters(struct anv_cmd_buffer *cmd_buffer,
4236
struct anv_address addr,
4237
bool indexed)
4238
{
4239
struct mi_builder b;
4240
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
4241
4242
mi_store(&b, mi_reg32(GFX7_3DPRIM_VERTEX_COUNT),
4243
mi_mem32(anv_address_add(addr, 0)));
4244
4245
struct mi_value instance_count = mi_mem32(anv_address_add(addr, 4));
4246
unsigned view_count = anv_subpass_view_count(cmd_buffer->state.subpass);
4247
if (view_count > 1) {
4248
#if GFX_VERx10 >= 75
4249
instance_count = mi_imul_imm(&b, instance_count, view_count);
4250
#else
4251
anv_finishme("Multiview + indirect draw requires MI_MATH; "
4252
"MI_MATH is not supported on Ivy Bridge");
4253
#endif
4254
}
4255
mi_store(&b, mi_reg32(GFX7_3DPRIM_INSTANCE_COUNT), instance_count);
4256
4257
mi_store(&b, mi_reg32(GFX7_3DPRIM_START_VERTEX),
4258
mi_mem32(anv_address_add(addr, 8)));
4259
4260
if (indexed) {
4261
mi_store(&b, mi_reg32(GFX7_3DPRIM_BASE_VERTEX),
4262
mi_mem32(anv_address_add(addr, 12)));
4263
mi_store(&b, mi_reg32(GFX7_3DPRIM_START_INSTANCE),
4264
mi_mem32(anv_address_add(addr, 16)));
4265
} else {
4266
mi_store(&b, mi_reg32(GFX7_3DPRIM_START_INSTANCE),
4267
mi_mem32(anv_address_add(addr, 12)));
4268
mi_store(&b, mi_reg32(GFX7_3DPRIM_BASE_VERTEX), mi_imm(0));
4269
}
4270
}
4271
4272
void genX(CmdDrawIndirect)(
4273
VkCommandBuffer commandBuffer,
4274
VkBuffer _buffer,
4275
VkDeviceSize offset,
4276
uint32_t drawCount,
4277
uint32_t stride)
4278
{
4279
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
4280
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
4281
struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
4282
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
4283
4284
if (anv_batch_has_error(&cmd_buffer->batch))
4285
return;
4286
4287
genX(cmd_buffer_flush_state)(cmd_buffer);
4288
4289
if (cmd_buffer->state.conditional_render_enabled)
4290
genX(cmd_emit_conditional_render_predicate)(cmd_buffer);
4291
4292
for (uint32_t i = 0; i < drawCount; i++) {
4293
struct anv_address draw = anv_address_add(buffer->address, offset);
4294
4295
if (vs_prog_data->uses_firstvertex ||
4296
vs_prog_data->uses_baseinstance)
4297
emit_base_vertex_instance_bo(cmd_buffer, anv_address_add(draw, 8));
4298
if (vs_prog_data->uses_drawid)
4299
emit_draw_index(cmd_buffer, i);
4300
4301
/* Emitting draw index or vertex index BOs may result in needing
4302
* additional VF cache flushes.
4303
*/
4304
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4305
4306
load_indirect_parameters(cmd_buffer, draw, false);
4307
4308
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
4309
prim.IndirectParameterEnable = true;
4310
prim.PredicateEnable = cmd_buffer->state.conditional_render_enabled;
4311
prim.VertexAccessType = SEQUENTIAL;
4312
prim.PrimitiveTopologyType = cmd_buffer->state.gfx.primitive_topology;
4313
}
4314
4315
update_dirty_vbs_for_gfx8_vb_flush(cmd_buffer, SEQUENTIAL);
4316
4317
offset += stride;
4318
}
4319
}
4320
4321
void genX(CmdDrawIndexedIndirect)(
4322
VkCommandBuffer commandBuffer,
4323
VkBuffer _buffer,
4324
VkDeviceSize offset,
4325
uint32_t drawCount,
4326
uint32_t stride)
4327
{
4328
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
4329
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
4330
struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
4331
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
4332
4333
if (anv_batch_has_error(&cmd_buffer->batch))
4334
return;
4335
4336
genX(cmd_buffer_flush_state)(cmd_buffer);
4337
4338
if (cmd_buffer->state.conditional_render_enabled)
4339
genX(cmd_emit_conditional_render_predicate)(cmd_buffer);
4340
4341
for (uint32_t i = 0; i < drawCount; i++) {
4342
struct anv_address draw = anv_address_add(buffer->address, offset);
4343
4344
/* TODO: We need to stomp base vertex to 0 somehow */
4345
if (vs_prog_data->uses_firstvertex ||
4346
vs_prog_data->uses_baseinstance)
4347
emit_base_vertex_instance_bo(cmd_buffer, anv_address_add(draw, 12));
4348
if (vs_prog_data->uses_drawid)
4349
emit_draw_index(cmd_buffer, i);
4350
4351
/* Emitting draw index or vertex index BOs may result in needing
4352
* additional VF cache flushes.
4353
*/
4354
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4355
4356
load_indirect_parameters(cmd_buffer, draw, true);
4357
4358
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
4359
prim.IndirectParameterEnable = true;
4360
prim.PredicateEnable = cmd_buffer->state.conditional_render_enabled;
4361
prim.VertexAccessType = RANDOM;
4362
prim.PrimitiveTopologyType = cmd_buffer->state.gfx.primitive_topology;
4363
}
4364
4365
update_dirty_vbs_for_gfx8_vb_flush(cmd_buffer, RANDOM);
4366
4367
offset += stride;
4368
}
4369
}
4370
4371
static struct mi_value
4372
prepare_for_draw_count_predicate(struct anv_cmd_buffer *cmd_buffer,
4373
struct mi_builder *b,
4374
struct anv_address count_address,
4375
const bool conditional_render_enabled)
4376
{
4377
struct mi_value ret = mi_imm(0);
4378
4379
if (conditional_render_enabled) {
4380
#if GFX_VERx10 >= 75
4381
ret = mi_new_gpr(b);
4382
mi_store(b, mi_value_ref(b, ret), mi_mem32(count_address));
4383
#endif
4384
} else {
4385
/* Upload the current draw count from the draw parameters buffer to
4386
* MI_PREDICATE_SRC0.
4387
*/
4388
mi_store(b, mi_reg64(MI_PREDICATE_SRC0), mi_mem32(count_address));
4389
mi_store(b, mi_reg32(MI_PREDICATE_SRC1 + 4), mi_imm(0));
4390
}
4391
4392
return ret;
4393
}
4394
4395
static void
4396
emit_draw_count_predicate(struct anv_cmd_buffer *cmd_buffer,
4397
struct mi_builder *b,
4398
uint32_t draw_index)
4399
{
4400
/* Upload the index of the current primitive to MI_PREDICATE_SRC1. */
4401
mi_store(b, mi_reg32(MI_PREDICATE_SRC1), mi_imm(draw_index));
4402
4403
if (draw_index == 0) {
4404
anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) {
4405
mip.LoadOperation = LOAD_LOADINV;
4406
mip.CombineOperation = COMBINE_SET;
4407
mip.CompareOperation = COMPARE_SRCS_EQUAL;
4408
}
4409
} else {
4410
/* While draw_index < draw_count the predicate's result will be
4411
* (draw_index == draw_count) ^ TRUE = TRUE
4412
* When draw_index == draw_count the result is
4413
* (TRUE) ^ TRUE = FALSE
4414
* After this all results will be:
4415
* (FALSE) ^ FALSE = FALSE
4416
*/
4417
anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) {
4418
mip.LoadOperation = LOAD_LOAD;
4419
mip.CombineOperation = COMBINE_XOR;
4420
mip.CompareOperation = COMPARE_SRCS_EQUAL;
4421
}
4422
}
4423
}
4424
4425
#if GFX_VERx10 >= 75
4426
static void
4427
emit_draw_count_predicate_with_conditional_render(
4428
struct anv_cmd_buffer *cmd_buffer,
4429
struct mi_builder *b,
4430
uint32_t draw_index,
4431
struct mi_value max)
4432
{
4433
struct mi_value pred = mi_ult(b, mi_imm(draw_index), max);
4434
pred = mi_iand(b, pred, mi_reg64(ANV_PREDICATE_RESULT_REG));
4435
4436
#if GFX_VER >= 8
4437
mi_store(b, mi_reg32(MI_PREDICATE_RESULT), pred);
4438
#else
4439
/* MI_PREDICATE_RESULT is not whitelisted in i915 command parser
4440
* so we emit MI_PREDICATE to set it.
4441
*/
4442
4443
mi_store(b, mi_reg64(MI_PREDICATE_SRC0), pred);
4444
mi_store(b, mi_reg64(MI_PREDICATE_SRC1), mi_imm(0));
4445
4446
anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) {
4447
mip.LoadOperation = LOAD_LOADINV;
4448
mip.CombineOperation = COMBINE_SET;
4449
mip.CompareOperation = COMPARE_SRCS_EQUAL;
4450
}
4451
#endif
4452
}
4453
#endif
4454
4455
void genX(CmdDrawIndirectCount)(
4456
VkCommandBuffer commandBuffer,
4457
VkBuffer _buffer,
4458
VkDeviceSize offset,
4459
VkBuffer _countBuffer,
4460
VkDeviceSize countBufferOffset,
4461
uint32_t maxDrawCount,
4462
uint32_t stride)
4463
{
4464
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
4465
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
4466
ANV_FROM_HANDLE(anv_buffer, count_buffer, _countBuffer);
4467
struct anv_cmd_state *cmd_state = &cmd_buffer->state;
4468
struct anv_graphics_pipeline *pipeline = cmd_state->gfx.pipeline;
4469
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
4470
4471
if (anv_batch_has_error(&cmd_buffer->batch))
4472
return;
4473
4474
genX(cmd_buffer_flush_state)(cmd_buffer);
4475
4476
struct mi_builder b;
4477
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
4478
struct anv_address count_address =
4479
anv_address_add(count_buffer->address, countBufferOffset);
4480
struct mi_value max =
4481
prepare_for_draw_count_predicate(cmd_buffer, &b, count_address,
4482
cmd_state->conditional_render_enabled);
4483
4484
for (uint32_t i = 0; i < maxDrawCount; i++) {
4485
struct anv_address draw = anv_address_add(buffer->address, offset);
4486
4487
#if GFX_VERx10 >= 75
4488
if (cmd_state->conditional_render_enabled) {
4489
emit_draw_count_predicate_with_conditional_render(
4490
cmd_buffer, &b, i, mi_value_ref(&b, max));
4491
} else {
4492
emit_draw_count_predicate(cmd_buffer, &b, i);
4493
}
4494
#else
4495
emit_draw_count_predicate(cmd_buffer, &b, i);
4496
#endif
4497
4498
if (vs_prog_data->uses_firstvertex ||
4499
vs_prog_data->uses_baseinstance)
4500
emit_base_vertex_instance_bo(cmd_buffer, anv_address_add(draw, 8));
4501
if (vs_prog_data->uses_drawid)
4502
emit_draw_index(cmd_buffer, i);
4503
4504
/* Emitting draw index or vertex index BOs may result in needing
4505
* additional VF cache flushes.
4506
*/
4507
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4508
4509
load_indirect_parameters(cmd_buffer, draw, false);
4510
4511
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
4512
prim.IndirectParameterEnable = true;
4513
prim.PredicateEnable = true;
4514
prim.VertexAccessType = SEQUENTIAL;
4515
prim.PrimitiveTopologyType = cmd_buffer->state.gfx.primitive_topology;
4516
}
4517
4518
update_dirty_vbs_for_gfx8_vb_flush(cmd_buffer, SEQUENTIAL);
4519
4520
offset += stride;
4521
}
4522
4523
mi_value_unref(&b, max);
4524
}
4525
4526
void genX(CmdDrawIndexedIndirectCount)(
4527
VkCommandBuffer commandBuffer,
4528
VkBuffer _buffer,
4529
VkDeviceSize offset,
4530
VkBuffer _countBuffer,
4531
VkDeviceSize countBufferOffset,
4532
uint32_t maxDrawCount,
4533
uint32_t stride)
4534
{
4535
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
4536
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
4537
ANV_FROM_HANDLE(anv_buffer, count_buffer, _countBuffer);
4538
struct anv_cmd_state *cmd_state = &cmd_buffer->state;
4539
struct anv_graphics_pipeline *pipeline = cmd_state->gfx.pipeline;
4540
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
4541
4542
if (anv_batch_has_error(&cmd_buffer->batch))
4543
return;
4544
4545
genX(cmd_buffer_flush_state)(cmd_buffer);
4546
4547
struct mi_builder b;
4548
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
4549
struct anv_address count_address =
4550
anv_address_add(count_buffer->address, countBufferOffset);
4551
struct mi_value max =
4552
prepare_for_draw_count_predicate(cmd_buffer, &b, count_address,
4553
cmd_state->conditional_render_enabled);
4554
4555
for (uint32_t i = 0; i < maxDrawCount; i++) {
4556
struct anv_address draw = anv_address_add(buffer->address, offset);
4557
4558
#if GFX_VERx10 >= 75
4559
if (cmd_state->conditional_render_enabled) {
4560
emit_draw_count_predicate_with_conditional_render(
4561
cmd_buffer, &b, i, mi_value_ref(&b, max));
4562
} else {
4563
emit_draw_count_predicate(cmd_buffer, &b, i);
4564
}
4565
#else
4566
emit_draw_count_predicate(cmd_buffer, &b, i);
4567
#endif
4568
4569
/* TODO: We need to stomp base vertex to 0 somehow */
4570
if (vs_prog_data->uses_firstvertex ||
4571
vs_prog_data->uses_baseinstance)
4572
emit_base_vertex_instance_bo(cmd_buffer, anv_address_add(draw, 12));
4573
if (vs_prog_data->uses_drawid)
4574
emit_draw_index(cmd_buffer, i);
4575
4576
/* Emitting draw index or vertex index BOs may result in needing
4577
* additional VF cache flushes.
4578
*/
4579
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4580
4581
load_indirect_parameters(cmd_buffer, draw, true);
4582
4583
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
4584
prim.IndirectParameterEnable = true;
4585
prim.PredicateEnable = true;
4586
prim.VertexAccessType = RANDOM;
4587
prim.PrimitiveTopologyType = cmd_buffer->state.gfx.primitive_topology;
4588
}
4589
4590
update_dirty_vbs_for_gfx8_vb_flush(cmd_buffer, RANDOM);
4591
4592
offset += stride;
4593
}
4594
4595
mi_value_unref(&b, max);
4596
}
4597
4598
void genX(CmdBeginTransformFeedbackEXT)(
4599
VkCommandBuffer commandBuffer,
4600
uint32_t firstCounterBuffer,
4601
uint32_t counterBufferCount,
4602
const VkBuffer* pCounterBuffers,
4603
const VkDeviceSize* pCounterBufferOffsets)
4604
{
4605
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
4606
4607
assert(firstCounterBuffer < MAX_XFB_BUFFERS);
4608
assert(counterBufferCount <= MAX_XFB_BUFFERS);
4609
assert(firstCounterBuffer + counterBufferCount <= MAX_XFB_BUFFERS);
4610
4611
/* From the SKL PRM Vol. 2c, SO_WRITE_OFFSET:
4612
*
4613
* "Ssoftware must ensure that no HW stream output operations can be in
4614
* process or otherwise pending at the point that the MI_LOAD/STORE
4615
* commands are processed. This will likely require a pipeline flush."
4616
*/
4617
anv_add_pending_pipe_bits(cmd_buffer,
4618
ANV_PIPE_CS_STALL_BIT,
4619
"begin transform feedback");
4620
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4621
4622
for (uint32_t idx = 0; idx < MAX_XFB_BUFFERS; idx++) {
4623
/* If we have a counter buffer, this is a resume so we need to load the
4624
* value into the streamout offset register. Otherwise, this is a begin
4625
* and we need to reset it to zero.
4626
*/
4627
if (pCounterBuffers &&
4628
idx >= firstCounterBuffer &&
4629
idx - firstCounterBuffer < counterBufferCount &&
4630
pCounterBuffers[idx - firstCounterBuffer] != VK_NULL_HANDLE) {
4631
uint32_t cb_idx = idx - firstCounterBuffer;
4632
ANV_FROM_HANDLE(anv_buffer, counter_buffer, pCounterBuffers[cb_idx]);
4633
uint64_t offset = pCounterBufferOffsets ?
4634
pCounterBufferOffsets[cb_idx] : 0;
4635
4636
anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
4637
lrm.RegisterAddress = GENX(SO_WRITE_OFFSET0_num) + idx * 4;
4638
lrm.MemoryAddress = anv_address_add(counter_buffer->address,
4639
offset);
4640
}
4641
} else {
4642
anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
4643
lri.RegisterOffset = GENX(SO_WRITE_OFFSET0_num) + idx * 4;
4644
lri.DataDWord = 0;
4645
}
4646
}
4647
}
4648
4649
cmd_buffer->state.xfb_enabled = true;
4650
cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_XFB_ENABLE;
4651
}
4652
4653
void genX(CmdEndTransformFeedbackEXT)(
4654
VkCommandBuffer commandBuffer,
4655
uint32_t firstCounterBuffer,
4656
uint32_t counterBufferCount,
4657
const VkBuffer* pCounterBuffers,
4658
const VkDeviceSize* pCounterBufferOffsets)
4659
{
4660
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
4661
4662
assert(firstCounterBuffer < MAX_XFB_BUFFERS);
4663
assert(counterBufferCount <= MAX_XFB_BUFFERS);
4664
assert(firstCounterBuffer + counterBufferCount <= MAX_XFB_BUFFERS);
4665
4666
/* From the SKL PRM Vol. 2c, SO_WRITE_OFFSET:
4667
*
4668
* "Ssoftware must ensure that no HW stream output operations can be in
4669
* process or otherwise pending at the point that the MI_LOAD/STORE
4670
* commands are processed. This will likely require a pipeline flush."
4671
*/
4672
anv_add_pending_pipe_bits(cmd_buffer,
4673
ANV_PIPE_CS_STALL_BIT,
4674
"end transform feedback");
4675
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4676
4677
for (uint32_t cb_idx = 0; cb_idx < counterBufferCount; cb_idx++) {
4678
unsigned idx = firstCounterBuffer + cb_idx;
4679
4680
/* If we have a counter buffer, this is a resume so we need to load the
4681
* value into the streamout offset register. Otherwise, this is a begin
4682
* and we need to reset it to zero.
4683
*/
4684
if (pCounterBuffers &&
4685
cb_idx < counterBufferCount &&
4686
pCounterBuffers[cb_idx] != VK_NULL_HANDLE) {
4687
ANV_FROM_HANDLE(anv_buffer, counter_buffer, pCounterBuffers[cb_idx]);
4688
uint64_t offset = pCounterBufferOffsets ?
4689
pCounterBufferOffsets[cb_idx] : 0;
4690
4691
anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
4692
srm.MemoryAddress = anv_address_add(counter_buffer->address,
4693
offset);
4694
srm.RegisterAddress = GENX(SO_WRITE_OFFSET0_num) + idx * 4;
4695
}
4696
}
4697
}
4698
4699
cmd_buffer->state.xfb_enabled = false;
4700
cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_XFB_ENABLE;
4701
}
4702
4703
void
4704
genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer)
4705
{
4706
struct anv_cmd_compute_state *comp_state = &cmd_buffer->state.compute;
4707
struct anv_compute_pipeline *pipeline = comp_state->pipeline;
4708
4709
assert(pipeline->cs);
4710
4711
genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->base.l3_config);
4712
4713
genX(flush_pipeline_select_gpgpu)(cmd_buffer);
4714
4715
/* Apply any pending pipeline flushes we may have. We want to apply them
4716
* now because, if any of those flushes are for things like push constants,
4717
* the GPU will read the state at weird times.
4718
*/
4719
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4720
4721
if (cmd_buffer->state.compute.pipeline_dirty) {
4722
/* From the Sky Lake PRM Vol 2a, MEDIA_VFE_STATE:
4723
*
4724
* "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
4725
* the only bits that are changed are scoreboard related: Scoreboard
4726
* Enable, Scoreboard Type, Scoreboard Mask, Scoreboard * Delta. For
4727
* these scoreboard related states, a MEDIA_STATE_FLUSH is
4728
* sufficient."
4729
*/
4730
anv_add_pending_pipe_bits(cmd_buffer,
4731
ANV_PIPE_CS_STALL_BIT,
4732
"flush compute state");
4733
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4734
4735
anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->base.batch);
4736
4737
/* The workgroup size of the pipeline affects our push constant layout
4738
* so flag push constants as dirty if we change the pipeline.
4739
*/
4740
cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
4741
}
4742
4743
if ((cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_COMPUTE_BIT) ||
4744
cmd_buffer->state.compute.pipeline_dirty) {
4745
flush_descriptor_sets(cmd_buffer,
4746
&cmd_buffer->state.compute.base,
4747
VK_SHADER_STAGE_COMPUTE_BIT,
4748
&pipeline->cs, 1);
4749
cmd_buffer->state.descriptors_dirty &= ~VK_SHADER_STAGE_COMPUTE_BIT;
4750
4751
#if GFX_VERx10 < 125
4752
uint32_t iface_desc_data_dw[GENX(INTERFACE_DESCRIPTOR_DATA_length)];
4753
struct GENX(INTERFACE_DESCRIPTOR_DATA) desc = {
4754
.BindingTablePointer =
4755
cmd_buffer->state.binding_tables[MESA_SHADER_COMPUTE].offset,
4756
.SamplerStatePointer =
4757
cmd_buffer->state.samplers[MESA_SHADER_COMPUTE].offset,
4758
};
4759
GENX(INTERFACE_DESCRIPTOR_DATA_pack)(NULL, iface_desc_data_dw, &desc);
4760
4761
struct anv_state state =
4762
anv_cmd_buffer_merge_dynamic(cmd_buffer, iface_desc_data_dw,
4763
pipeline->interface_descriptor_data,
4764
GENX(INTERFACE_DESCRIPTOR_DATA_length),
4765
64);
4766
4767
uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
4768
anv_batch_emit(&cmd_buffer->batch,
4769
GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), mid) {
4770
mid.InterfaceDescriptorTotalLength = size;
4771
mid.InterfaceDescriptorDataStartAddress = state.offset;
4772
}
4773
#endif
4774
}
4775
4776
if (cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_COMPUTE_BIT) {
4777
comp_state->push_data =
4778
anv_cmd_buffer_cs_push_constants(cmd_buffer);
4779
4780
#if GFX_VERx10 < 125
4781
if (comp_state->push_data.alloc_size) {
4782
anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD), curbe) {
4783
curbe.CURBETotalDataLength = comp_state->push_data.alloc_size;
4784
curbe.CURBEDataStartAddress = comp_state->push_data.offset;
4785
}
4786
}
4787
#endif
4788
4789
cmd_buffer->state.push_constants_dirty &= ~VK_SHADER_STAGE_COMPUTE_BIT;
4790
}
4791
4792
cmd_buffer->state.compute.pipeline_dirty = false;
4793
4794
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4795
}
4796
4797
#if GFX_VER == 7
4798
4799
static VkResult
4800
verify_cmd_parser(const struct anv_device *device,
4801
int required_version,
4802
const char *function)
4803
{
4804
if (device->physical->cmd_parser_version < required_version) {
4805
return vk_errorf(device, &device->physical->vk.base,
4806
VK_ERROR_FEATURE_NOT_PRESENT,
4807
"cmd parser version %d is required for %s",
4808
required_version, function);
4809
} else {
4810
return VK_SUCCESS;
4811
}
4812
}
4813
4814
#endif
4815
4816
static void
4817
anv_cmd_buffer_push_base_group_id(struct anv_cmd_buffer *cmd_buffer,
4818
uint32_t baseGroupX,
4819
uint32_t baseGroupY,
4820
uint32_t baseGroupZ)
4821
{
4822
if (anv_batch_has_error(&cmd_buffer->batch))
4823
return;
4824
4825
struct anv_push_constants *push =
4826
&cmd_buffer->state.compute.base.push_constants;
4827
if (push->cs.base_work_group_id[0] != baseGroupX ||
4828
push->cs.base_work_group_id[1] != baseGroupY ||
4829
push->cs.base_work_group_id[2] != baseGroupZ) {
4830
push->cs.base_work_group_id[0] = baseGroupX;
4831
push->cs.base_work_group_id[1] = baseGroupY;
4832
push->cs.base_work_group_id[2] = baseGroupZ;
4833
4834
cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
4835
}
4836
}
4837
4838
void genX(CmdDispatch)(
4839
VkCommandBuffer commandBuffer,
4840
uint32_t x,
4841
uint32_t y,
4842
uint32_t z)
4843
{
4844
genX(CmdDispatchBase)(commandBuffer, 0, 0, 0, x, y, z);
4845
}
4846
4847
#if GFX_VERx10 >= 125
4848
4849
static inline void
4850
emit_compute_walker(struct anv_cmd_buffer *cmd_buffer,
4851
const struct anv_compute_pipeline *pipeline, bool indirect,
4852
const struct brw_cs_prog_data *prog_data,
4853
uint32_t groupCountX, uint32_t groupCountY,
4854
uint32_t groupCountZ)
4855
{
4856
struct anv_cmd_compute_state *comp_state = &cmd_buffer->state.compute;
4857
const struct anv_shader_bin *cs_bin = pipeline->cs;
4858
bool predicate = cmd_buffer->state.conditional_render_enabled;
4859
4860
const struct intel_device_info *devinfo = &pipeline->base.device->info;
4861
const struct brw_cs_dispatch_info dispatch =
4862
brw_cs_get_dispatch_info(devinfo, prog_data, NULL);
4863
4864
anv_batch_emit(&cmd_buffer->batch, GENX(COMPUTE_WALKER), cw) {
4865
cw.IndirectParameterEnable = indirect;
4866
cw.PredicateEnable = predicate;
4867
cw.SIMDSize = dispatch.simd_size / 16;
4868
cw.IndirectDataStartAddress = comp_state->push_data.offset;
4869
cw.IndirectDataLength = comp_state->push_data.alloc_size;
4870
cw.LocalXMaximum = prog_data->local_size[0] - 1;
4871
cw.LocalYMaximum = prog_data->local_size[1] - 1;
4872
cw.LocalZMaximum = prog_data->local_size[2] - 1;
4873
cw.ThreadGroupIDXDimension = groupCountX;
4874
cw.ThreadGroupIDYDimension = groupCountY;
4875
cw.ThreadGroupIDZDimension = groupCountZ;
4876
cw.ExecutionMask = dispatch.right_mask;
4877
4878
cw.InterfaceDescriptor = (struct GENX(INTERFACE_DESCRIPTOR_DATA)) {
4879
.KernelStartPointer = cs_bin->kernel.offset,
4880
.SamplerStatePointer =
4881
cmd_buffer->state.samplers[MESA_SHADER_COMPUTE].offset,
4882
.BindingTablePointer =
4883
cmd_buffer->state.binding_tables[MESA_SHADER_COMPUTE].offset,
4884
.BindingTableEntryCount =
4885
1 + MIN2(pipeline->cs->bind_map.surface_count, 30),
4886
.NumberofThreadsinGPGPUThreadGroup = dispatch.threads,
4887
.SharedLocalMemorySize = encode_slm_size(GFX_VER,
4888
prog_data->base.total_shared),
4889
.BarrierEnable = prog_data->uses_barrier,
4890
};
4891
}
4892
}
4893
4894
#else /* #if GFX_VERx10 >= 125 */
4895
4896
static inline void
4897
emit_gpgpu_walker(struct anv_cmd_buffer *cmd_buffer,
4898
const struct anv_compute_pipeline *pipeline, bool indirect,
4899
const struct brw_cs_prog_data *prog_data,
4900
uint32_t groupCountX, uint32_t groupCountY,
4901
uint32_t groupCountZ)
4902
{
4903
bool predicate = (GFX_VER <= 7 && indirect) ||
4904
cmd_buffer->state.conditional_render_enabled;
4905
4906
const struct intel_device_info *devinfo = &pipeline->base.device->info;
4907
const struct brw_cs_dispatch_info dispatch =
4908
brw_cs_get_dispatch_info(devinfo, prog_data, NULL);
4909
4910
anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER), ggw) {
4911
ggw.IndirectParameterEnable = indirect;
4912
ggw.PredicateEnable = predicate;
4913
ggw.SIMDSize = dispatch.simd_size / 16;
4914
ggw.ThreadDepthCounterMaximum = 0;
4915
ggw.ThreadHeightCounterMaximum = 0;
4916
ggw.ThreadWidthCounterMaximum = dispatch.threads - 1;
4917
ggw.ThreadGroupIDXDimension = groupCountX;
4918
ggw.ThreadGroupIDYDimension = groupCountY;
4919
ggw.ThreadGroupIDZDimension = groupCountZ;
4920
ggw.RightExecutionMask = dispatch.right_mask;
4921
ggw.BottomExecutionMask = 0xffffffff;
4922
}
4923
4924
anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH), msf);
4925
}
4926
4927
#endif /* #if GFX_VERx10 >= 125 */
4928
4929
static inline void
4930
emit_cs_walker(struct anv_cmd_buffer *cmd_buffer,
4931
const struct anv_compute_pipeline *pipeline, bool indirect,
4932
const struct brw_cs_prog_data *prog_data,
4933
uint32_t groupCountX, uint32_t groupCountY,
4934
uint32_t groupCountZ)
4935
{
4936
#if GFX_VERx10 >= 125
4937
emit_compute_walker(cmd_buffer, pipeline, indirect, prog_data, groupCountX,
4938
groupCountY, groupCountZ);
4939
#else
4940
emit_gpgpu_walker(cmd_buffer, pipeline, indirect, prog_data, groupCountX,
4941
groupCountY, groupCountZ);
4942
#endif
4943
}
4944
4945
void genX(CmdDispatchBase)(
4946
VkCommandBuffer commandBuffer,
4947
uint32_t baseGroupX,
4948
uint32_t baseGroupY,
4949
uint32_t baseGroupZ,
4950
uint32_t groupCountX,
4951
uint32_t groupCountY,
4952
uint32_t groupCountZ)
4953
{
4954
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
4955
struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline;
4956
const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
4957
4958
anv_cmd_buffer_push_base_group_id(cmd_buffer, baseGroupX,
4959
baseGroupY, baseGroupZ);
4960
4961
if (anv_batch_has_error(&cmd_buffer->batch))
4962
return;
4963
4964
anv_measure_snapshot(cmd_buffer,
4965
INTEL_SNAPSHOT_COMPUTE,
4966
"compute",
4967
groupCountX * groupCountY * groupCountZ *
4968
prog_data->local_size[0] * prog_data->local_size[1] *
4969
prog_data->local_size[2]);
4970
4971
if (prog_data->uses_num_work_groups) {
4972
struct anv_state state =
4973
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 12, 4);
4974
uint32_t *sizes = state.map;
4975
sizes[0] = groupCountX;
4976
sizes[1] = groupCountY;
4977
sizes[2] = groupCountZ;
4978
cmd_buffer->state.compute.num_workgroups = (struct anv_address) {
4979
.bo = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
4980
.offset = state.offset,
4981
};
4982
4983
/* The num_workgroups buffer goes in the binding table */
4984
cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
4985
}
4986
4987
genX(cmd_buffer_flush_compute_state)(cmd_buffer);
4988
4989
if (cmd_buffer->state.conditional_render_enabled)
4990
genX(cmd_emit_conditional_render_predicate)(cmd_buffer);
4991
4992
emit_cs_walker(cmd_buffer, pipeline, false, prog_data, groupCountX,
4993
groupCountY, groupCountZ);
4994
}
4995
4996
#define GPGPU_DISPATCHDIMX 0x2500
4997
#define GPGPU_DISPATCHDIMY 0x2504
4998
#define GPGPU_DISPATCHDIMZ 0x2508
4999
5000
void genX(CmdDispatchIndirect)(
5001
VkCommandBuffer commandBuffer,
5002
VkBuffer _buffer,
5003
VkDeviceSize offset)
5004
{
5005
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
5006
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
5007
struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline;
5008
const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
5009
struct anv_address addr = anv_address_add(buffer->address, offset);
5010
UNUSED struct anv_batch *batch = &cmd_buffer->batch;
5011
5012
anv_cmd_buffer_push_base_group_id(cmd_buffer, 0, 0, 0);
5013
5014
#if GFX_VER == 7
5015
/* Linux 4.4 added command parser version 5 which allows the GPGPU
5016
* indirect dispatch registers to be written.
5017
*/
5018
if (verify_cmd_parser(cmd_buffer->device, 5,
5019
"vkCmdDispatchIndirect") != VK_SUCCESS)
5020
return;
5021
#endif
5022
5023
anv_measure_snapshot(cmd_buffer,
5024
INTEL_SNAPSHOT_COMPUTE,
5025
"compute indirect",
5026
0);
5027
5028
if (prog_data->uses_num_work_groups) {
5029
cmd_buffer->state.compute.num_workgroups = addr;
5030
5031
/* The num_workgroups buffer goes in the binding table */
5032
cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
5033
}
5034
5035
genX(cmd_buffer_flush_compute_state)(cmd_buffer);
5036
5037
struct mi_builder b;
5038
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
5039
5040
struct mi_value size_x = mi_mem32(anv_address_add(addr, 0));
5041
struct mi_value size_y = mi_mem32(anv_address_add(addr, 4));
5042
struct mi_value size_z = mi_mem32(anv_address_add(addr, 8));
5043
5044
mi_store(&b, mi_reg32(GPGPU_DISPATCHDIMX), size_x);
5045
mi_store(&b, mi_reg32(GPGPU_DISPATCHDIMY), size_y);
5046
mi_store(&b, mi_reg32(GPGPU_DISPATCHDIMZ), size_z);
5047
5048
#if GFX_VER <= 7
5049
/* predicate = (compute_dispatch_indirect_x_size == 0); */
5050
mi_store(&b, mi_reg64(MI_PREDICATE_SRC0), size_x);
5051
mi_store(&b, mi_reg64(MI_PREDICATE_SRC1), mi_imm(0));
5052
anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
5053
mip.LoadOperation = LOAD_LOAD;
5054
mip.CombineOperation = COMBINE_SET;
5055
mip.CompareOperation = COMPARE_SRCS_EQUAL;
5056
}
5057
5058
/* predicate |= (compute_dispatch_indirect_y_size == 0); */
5059
mi_store(&b, mi_reg32(MI_PREDICATE_SRC0), size_y);
5060
anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
5061
mip.LoadOperation = LOAD_LOAD;
5062
mip.CombineOperation = COMBINE_OR;
5063
mip.CompareOperation = COMPARE_SRCS_EQUAL;
5064
}
5065
5066
/* predicate |= (compute_dispatch_indirect_z_size == 0); */
5067
mi_store(&b, mi_reg32(MI_PREDICATE_SRC0), size_z);
5068
anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
5069
mip.LoadOperation = LOAD_LOAD;
5070
mip.CombineOperation = COMBINE_OR;
5071
mip.CompareOperation = COMPARE_SRCS_EQUAL;
5072
}
5073
5074
/* predicate = !predicate; */
5075
anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
5076
mip.LoadOperation = LOAD_LOADINV;
5077
mip.CombineOperation = COMBINE_OR;
5078
mip.CompareOperation = COMPARE_FALSE;
5079
}
5080
5081
#if GFX_VERx10 == 75
5082
if (cmd_buffer->state.conditional_render_enabled) {
5083
/* predicate &= !(conditional_rendering_predicate == 0); */
5084
mi_store(&b, mi_reg32(MI_PREDICATE_SRC0),
5085
mi_reg32(ANV_PREDICATE_RESULT_REG));
5086
anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
5087
mip.LoadOperation = LOAD_LOADINV;
5088
mip.CombineOperation = COMBINE_AND;
5089
mip.CompareOperation = COMPARE_SRCS_EQUAL;
5090
}
5091
}
5092
#endif
5093
5094
#else /* GFX_VER > 7 */
5095
if (cmd_buffer->state.conditional_render_enabled)
5096
genX(cmd_emit_conditional_render_predicate)(cmd_buffer);
5097
#endif
5098
5099
emit_cs_walker(cmd_buffer, pipeline, true, prog_data, 0, 0, 0);
5100
}
5101
5102
#if GFX_VERx10 >= 125
5103
static void
5104
calc_local_trace_size(uint8_t local_shift[3], const uint32_t global[3])
5105
{
5106
unsigned total_shift = 0;
5107
memset(local_shift, 0, 3);
5108
5109
bool progress;
5110
do {
5111
progress = false;
5112
for (unsigned i = 0; i < 3; i++) {
5113
assert(global[i] > 0);
5114
if ((1 << local_shift[i]) < global[i]) {
5115
progress = true;
5116
local_shift[i]++;
5117
total_shift++;
5118
}
5119
5120
if (total_shift == 3)
5121
return;
5122
}
5123
} while(progress);
5124
5125
/* Assign whatever's left to x */
5126
local_shift[0] += 3 - total_shift;
5127
}
5128
5129
static struct GFX_RT_SHADER_TABLE
5130
vk_sdar_to_shader_table(const VkStridedDeviceAddressRegionKHR *region)
5131
{
5132
return (struct GFX_RT_SHADER_TABLE) {
5133
.BaseAddress = anv_address_from_u64(region->deviceAddress),
5134
.Stride = region->stride,
5135
};
5136
}
5137
5138
static void
5139
cmd_buffer_trace_rays(struct anv_cmd_buffer *cmd_buffer,
5140
const VkStridedDeviceAddressRegionKHR *raygen_sbt,
5141
const VkStridedDeviceAddressRegionKHR *miss_sbt,
5142
const VkStridedDeviceAddressRegionKHR *hit_sbt,
5143
const VkStridedDeviceAddressRegionKHR *callable_sbt,
5144
bool is_indirect,
5145
uint32_t launch_width,
5146
uint32_t launch_height,
5147
uint32_t launch_depth,
5148
uint64_t launch_size_addr)
5149
{
5150
struct anv_cmd_ray_tracing_state *rt = &cmd_buffer->state.rt;
5151
struct anv_ray_tracing_pipeline *pipeline = rt->pipeline;
5152
5153
if (anv_batch_has_error(&cmd_buffer->batch))
5154
return;
5155
5156
/* If we have a known degenerate launch size, just bail */
5157
if (!is_indirect &&
5158
(launch_width == 0 || launch_height == 0 || launch_depth == 0))
5159
return;
5160
5161
genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->base.l3_config);
5162
genX(flush_pipeline_select_gpgpu)(cmd_buffer);
5163
5164
cmd_buffer->state.rt.pipeline_dirty = false;
5165
5166
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
5167
5168
/* Add these to the reloc list as they're internal buffers that don't
5169
* actually have relocs to pick them up manually.
5170
*
5171
* TODO(RT): This is a bit of a hack
5172
*/
5173
anv_reloc_list_add_bo(cmd_buffer->batch.relocs,
5174
cmd_buffer->batch.alloc,
5175
rt->scratch.bo);
5176
5177
/* Allocate and set up our RT_DISPATCH_GLOBALS */
5178
struct anv_state rtdg_state =
5179
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
5180
BRW_RT_PUSH_CONST_OFFSET +
5181
sizeof(struct anv_push_constants),
5182
64);
5183
5184
struct GFX_RT_DISPATCH_GLOBALS rtdg = {
5185
.MemBaseAddress = (struct anv_address) {
5186
.bo = rt->scratch.bo,
5187
.offset = rt->scratch.layout.ray_stack_start,
5188
},
5189
.CallStackHandler =
5190
anv_shader_bin_get_bsr(cmd_buffer->device->rt_trivial_return, 0),
5191
.AsyncRTStackSize = rt->scratch.layout.ray_stack_stride / 64,
5192
.NumDSSRTStacks = rt->scratch.layout.stack_ids_per_dss,
5193
.MaxBVHLevels = BRW_RT_MAX_BVH_LEVELS,
5194
.Flags = RT_DEPTH_TEST_LESS_EQUAL,
5195
.HitGroupTable = vk_sdar_to_shader_table(hit_sbt),
5196
.MissGroupTable = vk_sdar_to_shader_table(miss_sbt),
5197
.SWStackSize = rt->scratch.layout.sw_stack_size / 64,
5198
.LaunchWidth = launch_width,
5199
.LaunchHeight = launch_height,
5200
.LaunchDepth = launch_depth,
5201
.CallableGroupTable = vk_sdar_to_shader_table(callable_sbt),
5202
};
5203
GFX_RT_DISPATCH_GLOBALS_pack(NULL, rtdg_state.map, &rtdg);
5204
5205
/* Push constants go after the RT_DISPATCH_GLOBALS */
5206
assert(GFX_RT_DISPATCH_GLOBALS_length * 4 <= BRW_RT_PUSH_CONST_OFFSET);
5207
memcpy(rtdg_state.map + BRW_RT_PUSH_CONST_OFFSET,
5208
&cmd_buffer->state.rt.base.push_constants,
5209
sizeof(struct anv_push_constants));
5210
5211
struct anv_address rtdg_addr = {
5212
.bo = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
5213
.offset = rtdg_state.offset,
5214
};
5215
5216
uint8_t local_size_log2[3];
5217
uint32_t global_size[3] = {};
5218
if (is_indirect) {
5219
/* Pick a local size that's probably ok. We assume most TraceRays calls
5220
* will use a two-dimensional dispatch size. Worst case, our initial
5221
* dispatch will be a little slower than it has to be.
5222
*/
5223
local_size_log2[0] = 2;
5224
local_size_log2[1] = 1;
5225
local_size_log2[2] = 0;
5226
5227
struct mi_builder b;
5228
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
5229
5230
struct mi_value launch_size[3] = {
5231
mi_mem32(anv_address_from_u64(launch_size_addr + 0)),
5232
mi_mem32(anv_address_from_u64(launch_size_addr + 4)),
5233
mi_mem32(anv_address_from_u64(launch_size_addr + 8)),
5234
};
5235
5236
/* Store the original launch size into RT_DISPATCH_GLOBALS
5237
*
5238
* TODO: Pull values from genX_bits.h once RT_DISPATCH_GLOBALS gets
5239
* moved into a genX version.
5240
*/
5241
mi_store(&b, mi_mem32(anv_address_add(rtdg_addr, 52)),
5242
mi_value_ref(&b, launch_size[0]));
5243
mi_store(&b, mi_mem32(anv_address_add(rtdg_addr, 56)),
5244
mi_value_ref(&b, launch_size[1]));
5245
mi_store(&b, mi_mem32(anv_address_add(rtdg_addr, 60)),
5246
mi_value_ref(&b, launch_size[2]));
5247
5248
/* Compute the global dispatch size */
5249
for (unsigned i = 0; i < 3; i++) {
5250
if (local_size_log2[i] == 0)
5251
continue;
5252
5253
/* global_size = DIV_ROUND_UP(launch_size, local_size)
5254
*
5255
* Fortunately for us MI_ALU math is 64-bit and , mi_ushr32_imm
5256
* has the semantics of shifting the enture 64-bit value and taking
5257
* the bottom 32 so we don't have to worry about roll-over.
5258
*/
5259
uint32_t local_size = 1 << local_size_log2[i];
5260
launch_size[i] = mi_iadd(&b, launch_size[i],
5261
mi_imm(local_size - 1));
5262
launch_size[i] = mi_ushr32_imm(&b, launch_size[i],
5263
local_size_log2[i]);
5264
}
5265
5266
mi_store(&b, mi_reg32(GPGPU_DISPATCHDIMX), launch_size[0]);
5267
mi_store(&b, mi_reg32(GPGPU_DISPATCHDIMY), launch_size[1]);
5268
mi_store(&b, mi_reg32(GPGPU_DISPATCHDIMZ), launch_size[2]);
5269
} else {
5270
uint32_t launch_size[3] = { launch_width, launch_height, launch_depth };
5271
calc_local_trace_size(local_size_log2, launch_size);
5272
5273
for (unsigned i = 0; i < 3; i++) {
5274
/* We have to be a bit careful here because DIV_ROUND_UP adds to the
5275
* numerator value may overflow. Cast to uint64_t to avoid this.
5276
*/
5277
uint32_t local_size = 1 << local_size_log2[i];
5278
global_size[i] = DIV_ROUND_UP((uint64_t)launch_size[i], local_size);
5279
}
5280
}
5281
5282
anv_batch_emit(&cmd_buffer->batch, GENX(COMPUTE_WALKER), cw) {
5283
cw.IndirectParameterEnable = is_indirect;
5284
cw.PredicateEnable = false;
5285
cw.SIMDSize = SIMD8;
5286
cw.LocalXMaximum = (1 << local_size_log2[0]) - 1;
5287
cw.LocalYMaximum = (1 << local_size_log2[1]) - 1;
5288
cw.LocalZMaximum = (1 << local_size_log2[2]) - 1;
5289
cw.ThreadGroupIDXDimension = global_size[0];
5290
cw.ThreadGroupIDYDimension = global_size[1];
5291
cw.ThreadGroupIDZDimension = global_size[2];
5292
cw.ExecutionMask = 0xff;
5293
cw.EmitInlineParameter = true;
5294
5295
const gl_shader_stage s = MESA_SHADER_RAYGEN;
5296
struct anv_device *device = cmd_buffer->device;
5297
struct anv_state *surfaces = &cmd_buffer->state.binding_tables[s];
5298
struct anv_state *samplers = &cmd_buffer->state.samplers[s];
5299
cw.InterfaceDescriptor = (struct GENX(INTERFACE_DESCRIPTOR_DATA)) {
5300
.KernelStartPointer = device->rt_trampoline->kernel.offset,
5301
.SamplerStatePointer = samplers->offset,
5302
/* i965: DIV_ROUND_UP(CLAMP(stage_state->sampler_count, 0, 16), 4), */
5303
.SamplerCount = 0,
5304
.BindingTablePointer = surfaces->offset,
5305
.NumberofThreadsinGPGPUThreadGroup = 1,
5306
.BTDMode = true,
5307
};
5308
5309
struct brw_rt_raygen_trampoline_params trampoline_params = {
5310
.rt_disp_globals_addr = anv_address_physical(rtdg_addr),
5311
.raygen_bsr_addr = raygen_sbt->deviceAddress,
5312
.is_indirect = is_indirect,
5313
.local_group_size_log2 = {
5314
local_size_log2[0],
5315
local_size_log2[1],
5316
local_size_log2[2],
5317
},
5318
};
5319
STATIC_ASSERT(sizeof(trampoline_params) == 32);
5320
memcpy(cw.InlineData, &trampoline_params, sizeof(trampoline_params));
5321
}
5322
}
5323
5324
void
5325
genX(CmdTraceRaysKHR)(
5326
VkCommandBuffer commandBuffer,
5327
const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable,
5328
const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable,
5329
const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable,
5330
const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable,
5331
uint32_t width,
5332
uint32_t height,
5333
uint32_t depth)
5334
{
5335
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
5336
5337
cmd_buffer_trace_rays(cmd_buffer,
5338
pRaygenShaderBindingTable,
5339
pMissShaderBindingTable,
5340
pHitShaderBindingTable,
5341
pCallableShaderBindingTable,
5342
false /* is_indirect */,
5343
width, height, depth,
5344
0 /* launch_size_addr */);
5345
}
5346
5347
void
5348
genX(CmdTraceRaysIndirectKHR)(
5349
VkCommandBuffer commandBuffer,
5350
const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable,
5351
const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable,
5352
const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable,
5353
const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable,
5354
VkDeviceAddress indirectDeviceAddress)
5355
{
5356
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
5357
5358
cmd_buffer_trace_rays(cmd_buffer,
5359
pRaygenShaderBindingTable,
5360
pMissShaderBindingTable,
5361
pHitShaderBindingTable,
5362
pCallableShaderBindingTable,
5363
true /* is_indirect */,
5364
0, 0, 0, /* width, height, depth, */
5365
indirectDeviceAddress);
5366
}
5367
#endif /* GFX_VERx10 >= 125 */
5368
5369
static void
5370
genX(flush_pipeline_select)(struct anv_cmd_buffer *cmd_buffer,
5371
uint32_t pipeline)
5372
{
5373
UNUSED const struct intel_device_info *devinfo = &cmd_buffer->device->info;
5374
5375
if (cmd_buffer->state.current_pipeline == pipeline)
5376
return;
5377
5378
#if GFX_VER >= 8 && GFX_VER < 10
5379
/* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
5380
*
5381
* Software must clear the COLOR_CALC_STATE Valid field in
5382
* 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
5383
* with Pipeline Select set to GPGPU.
5384
*
5385
* The internal hardware docs recommend the same workaround for Gfx9
5386
* hardware too.
5387
*/
5388
if (pipeline == GPGPU)
5389
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
5390
#endif
5391
5392
#if GFX_VER == 9
5393
if (pipeline == _3D) {
5394
/* There is a mid-object preemption workaround which requires you to
5395
* re-emit MEDIA_VFE_STATE after switching from GPGPU to 3D. However,
5396
* even without preemption, we have issues with geometry flickering when
5397
* GPGPU and 3D are back-to-back and this seems to fix it. We don't
5398
* really know why.
5399
*/
5400
const uint32_t subslices =
5401
MAX2(cmd_buffer->device->physical->subslice_total, 1);
5402
anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_VFE_STATE), vfe) {
5403
vfe.MaximumNumberofThreads =
5404
devinfo->max_cs_threads * subslices - 1;
5405
vfe.NumberofURBEntries = 2;
5406
vfe.URBEntryAllocationSize = 2;
5407
}
5408
5409
/* We just emitted a dummy MEDIA_VFE_STATE so now that packet is
5410
* invalid. Set the compute pipeline to dirty to force a re-emit of the
5411
* pipeline in case we get back-to-back dispatch calls with the same
5412
* pipeline and a PIPELINE_SELECT in between.
5413
*/
5414
cmd_buffer->state.compute.pipeline_dirty = true;
5415
}
5416
#endif
5417
5418
/* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
5419
* PIPELINE_SELECT [DevBWR+]":
5420
*
5421
* Project: DEVSNB+
5422
*
5423
* Software must ensure all the write caches are flushed through a
5424
* stalling PIPE_CONTROL command followed by another PIPE_CONTROL
5425
* command to invalidate read only caches prior to programming
5426
* MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
5427
*/
5428
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
5429
pc.RenderTargetCacheFlushEnable = true;
5430
pc.DepthCacheFlushEnable = true;
5431
#if GFX_VER >= 12
5432
pc.HDCPipelineFlushEnable = true;
5433
#else
5434
pc.DCFlushEnable = true;
5435
#endif
5436
pc.PostSyncOperation = NoWrite;
5437
pc.CommandStreamerStallEnable = true;
5438
#if GFX_VER >= 12
5439
/* Wa_1409600907: "PIPE_CONTROL with Depth Stall Enable bit must be
5440
* set with any PIPE_CONTROL with Depth Flush Enable bit set.
5441
*/
5442
pc.DepthStallEnable = true;
5443
#endif
5444
anv_debug_dump_pc(pc);
5445
}
5446
5447
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
5448
pc.TextureCacheInvalidationEnable = true;
5449
pc.ConstantCacheInvalidationEnable = true;
5450
pc.StateCacheInvalidationEnable = true;
5451
pc.InstructionCacheInvalidateEnable = true;
5452
pc.PostSyncOperation = NoWrite;
5453
anv_debug_dump_pc(pc);
5454
}
5455
5456
anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
5457
#if GFX_VER >= 9
5458
ps.MaskBits = GFX_VER >= 12 ? 0x13 : 3;
5459
ps.MediaSamplerDOPClockGateEnable = GFX_VER >= 12;
5460
#endif
5461
ps.PipelineSelection = pipeline;
5462
}
5463
5464
#if GFX_VER == 9
5465
if (devinfo->is_geminilake) {
5466
/* Project: DevGLK
5467
*
5468
* "This chicken bit works around a hardware issue with barrier logic
5469
* encountered when switching between GPGPU and 3D pipelines. To
5470
* workaround the issue, this mode bit should be set after a pipeline
5471
* is selected."
5472
*/
5473
anv_batch_write_reg(&cmd_buffer->batch, GENX(SLICE_COMMON_ECO_CHICKEN1), scec1) {
5474
scec1.GLKBarrierMode = pipeline == GPGPU ? GLK_BARRIER_MODE_GPGPU
5475
: GLK_BARRIER_MODE_3D_HULL;
5476
scec1.GLKBarrierModeMask = 1;
5477
}
5478
}
5479
#endif
5480
5481
cmd_buffer->state.current_pipeline = pipeline;
5482
}
5483
5484
void
5485
genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer)
5486
{
5487
genX(flush_pipeline_select)(cmd_buffer, _3D);
5488
}
5489
5490
void
5491
genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer)
5492
{
5493
genX(flush_pipeline_select)(cmd_buffer, GPGPU);
5494
}
5495
5496
void
5497
genX(cmd_buffer_emit_gfx7_depth_flush)(struct anv_cmd_buffer *cmd_buffer)
5498
{
5499
if (GFX_VER >= 8)
5500
return;
5501
5502
/* From the Haswell PRM, documentation for 3DSTATE_DEPTH_BUFFER:
5503
*
5504
* "Restriction: Prior to changing Depth/Stencil Buffer state (i.e., any
5505
* combination of 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS,
5506
* 3DSTATE_STENCIL_BUFFER, 3DSTATE_HIER_DEPTH_BUFFER) SW must first
5507
* issue a pipelined depth stall (PIPE_CONTROL with Depth Stall bit
5508
* set), followed by a pipelined depth cache flush (PIPE_CONTROL with
5509
* Depth Flush Bit set, followed by another pipelined depth stall
5510
* (PIPE_CONTROL with Depth Stall Bit set), unless SW can otherwise
5511
* guarantee that the pipeline from WM onwards is already flushed (e.g.,
5512
* via a preceding MI_FLUSH)."
5513
*/
5514
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
5515
pipe.DepthStallEnable = true;
5516
anv_debug_dump_pc(pipe);
5517
}
5518
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
5519
pipe.DepthCacheFlushEnable = true;
5520
#if GFX_VER >= 12
5521
pipe.TileCacheFlushEnable = true;
5522
#endif
5523
anv_debug_dump_pc(pipe);
5524
}
5525
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
5526
pipe.DepthStallEnable = true;
5527
anv_debug_dump_pc(pipe);
5528
}
5529
}
5530
5531
/* From the Skylake PRM, 3DSTATE_VERTEX_BUFFERS:
5532
*
5533
* "The VF cache needs to be invalidated before binding and then using
5534
* Vertex Buffers that overlap with any previously bound Vertex Buffer
5535
* (at a 64B granularity) since the last invalidation. A VF cache
5536
* invalidate is performed by setting the "VF Cache Invalidation Enable"
5537
* bit in PIPE_CONTROL."
5538
*
5539
* This is implemented by carefully tracking all vertex and index buffer
5540
* bindings and flushing if the cache ever ends up with a range in the cache
5541
* that would exceed 4 GiB. This is implemented in three parts:
5542
*
5543
* 1. genX(cmd_buffer_set_binding_for_gfx8_vb_flush)() which must be called
5544
* every time a 3DSTATE_VERTEX_BUFFER packet is emitted and informs the
5545
* tracking code of the new binding. If this new binding would cause
5546
* the cache to have a too-large range on the next draw call, a pipeline
5547
* stall and VF cache invalidate are added to pending_pipeline_bits.
5548
*
5549
* 2. genX(cmd_buffer_apply_pipe_flushes)() resets the cache tracking to
5550
* empty whenever we emit a VF invalidate.
5551
*
5552
* 3. genX(cmd_buffer_update_dirty_vbs_for_gfx8_vb_flush)() must be called
5553
* after every 3DPRIMITIVE and copies the bound range into the dirty
5554
* range for each used buffer. This has to be a separate step because
5555
* we don't always re-bind all buffers and so 1. can't know which
5556
* buffers are actually bound.
5557
*/
5558
void
5559
genX(cmd_buffer_set_binding_for_gfx8_vb_flush)(struct anv_cmd_buffer *cmd_buffer,
5560
int vb_index,
5561
struct anv_address vb_address,
5562
uint32_t vb_size)
5563
{
5564
if (GFX_VER < 8 || GFX_VER > 9 ||
5565
!anv_use_softpin(cmd_buffer->device->physical))
5566
return;
5567
5568
struct anv_vb_cache_range *bound, *dirty;
5569
if (vb_index == -1) {
5570
bound = &cmd_buffer->state.gfx.ib_bound_range;
5571
dirty = &cmd_buffer->state.gfx.ib_dirty_range;
5572
} else {
5573
assert(vb_index >= 0);
5574
assert(vb_index < ARRAY_SIZE(cmd_buffer->state.gfx.vb_bound_ranges));
5575
assert(vb_index < ARRAY_SIZE(cmd_buffer->state.gfx.vb_dirty_ranges));
5576
bound = &cmd_buffer->state.gfx.vb_bound_ranges[vb_index];
5577
dirty = &cmd_buffer->state.gfx.vb_dirty_ranges[vb_index];
5578
}
5579
5580
if (vb_size == 0) {
5581
bound->start = 0;
5582
bound->end = 0;
5583
return;
5584
}
5585
5586
assert(vb_address.bo && (vb_address.bo->flags & EXEC_OBJECT_PINNED));
5587
bound->start = intel_48b_address(anv_address_physical(vb_address));
5588
bound->end = bound->start + vb_size;
5589
assert(bound->end > bound->start); /* No overflow */
5590
5591
/* Align everything to a cache line */
5592
bound->start &= ~(64ull - 1ull);
5593
bound->end = align_u64(bound->end, 64);
5594
5595
/* Compute the dirty range */
5596
dirty->start = MIN2(dirty->start, bound->start);
5597
dirty->end = MAX2(dirty->end, bound->end);
5598
5599
/* If our range is larger than 32 bits, we have to flush */
5600
assert(bound->end - bound->start <= (1ull << 32));
5601
if (dirty->end - dirty->start > (1ull << 32)) {
5602
anv_add_pending_pipe_bits(cmd_buffer,
5603
ANV_PIPE_CS_STALL_BIT |
5604
ANV_PIPE_VF_CACHE_INVALIDATE_BIT,
5605
"vb > 32b range");
5606
}
5607
}
5608
5609
void
5610
genX(cmd_buffer_update_dirty_vbs_for_gfx8_vb_flush)(struct anv_cmd_buffer *cmd_buffer,
5611
uint32_t access_type,
5612
uint64_t vb_used)
5613
{
5614
if (GFX_VER < 8 || GFX_VER > 9 ||
5615
!anv_use_softpin(cmd_buffer->device->physical))
5616
return;
5617
5618
if (access_type == RANDOM) {
5619
/* We have an index buffer */
5620
struct anv_vb_cache_range *bound = &cmd_buffer->state.gfx.ib_bound_range;
5621
struct anv_vb_cache_range *dirty = &cmd_buffer->state.gfx.ib_dirty_range;
5622
5623
if (bound->end > bound->start) {
5624
dirty->start = MIN2(dirty->start, bound->start);
5625
dirty->end = MAX2(dirty->end, bound->end);
5626
}
5627
}
5628
5629
uint64_t mask = vb_used;
5630
while (mask) {
5631
int i = u_bit_scan64(&mask);
5632
assert(i >= 0);
5633
assert(i < ARRAY_SIZE(cmd_buffer->state.gfx.vb_bound_ranges));
5634
assert(i < ARRAY_SIZE(cmd_buffer->state.gfx.vb_dirty_ranges));
5635
5636
struct anv_vb_cache_range *bound, *dirty;
5637
bound = &cmd_buffer->state.gfx.vb_bound_ranges[i];
5638
dirty = &cmd_buffer->state.gfx.vb_dirty_ranges[i];
5639
5640
if (bound->end > bound->start) {
5641
dirty->start = MIN2(dirty->start, bound->start);
5642
dirty->end = MAX2(dirty->end, bound->end);
5643
}
5644
}
5645
}
5646
5647
/**
5648
* Update the pixel hashing modes that determine the balancing of PS threads
5649
* across subslices and slices.
5650
*
5651
* \param width Width bound of the rendering area (already scaled down if \p
5652
* scale is greater than 1).
5653
* \param height Height bound of the rendering area (already scaled down if \p
5654
* scale is greater than 1).
5655
* \param scale The number of framebuffer samples that could potentially be
5656
* affected by an individual channel of the PS thread. This is
5657
* typically one for single-sampled rendering, but for operations
5658
* like CCS resolves and fast clears a single PS invocation may
5659
* update a huge number of pixels, in which case a finer
5660
* balancing is desirable in order to maximally utilize the
5661
* bandwidth available. UINT_MAX can be used as shorthand for
5662
* "finest hashing mode available".
5663
*/
5664
void
5665
genX(cmd_buffer_emit_hashing_mode)(struct anv_cmd_buffer *cmd_buffer,
5666
unsigned width, unsigned height,
5667
unsigned scale)
5668
{
5669
#if GFX_VER == 9
5670
const struct intel_device_info *devinfo = &cmd_buffer->device->info;
5671
const unsigned slice_hashing[] = {
5672
/* Because all Gfx9 platforms with more than one slice require
5673
* three-way subslice hashing, a single "normal" 16x16 slice hashing
5674
* block is guaranteed to suffer from substantial imbalance, with one
5675
* subslice receiving twice as much work as the other two in the
5676
* slice.
5677
*
5678
* The performance impact of that would be particularly severe when
5679
* three-way hashing is also in use for slice balancing (which is the
5680
* case for all Gfx9 GT4 platforms), because one of the slices
5681
* receives one every three 16x16 blocks in either direction, which
5682
* is roughly the periodicity of the underlying subslice imbalance
5683
* pattern ("roughly" because in reality the hardware's
5684
* implementation of three-way hashing doesn't do exact modulo 3
5685
* arithmetic, which somewhat decreases the magnitude of this effect
5686
* in practice). This leads to a systematic subslice imbalance
5687
* within that slice regardless of the size of the primitive. The
5688
* 32x32 hashing mode guarantees that the subslice imbalance within a
5689
* single slice hashing block is minimal, largely eliminating this
5690
* effect.
5691
*/
5692
_32x32,
5693
/* Finest slice hashing mode available. */
5694
NORMAL
5695
};
5696
const unsigned subslice_hashing[] = {
5697
/* 16x16 would provide a slight cache locality benefit especially
5698
* visible in the sampler L1 cache efficiency of low-bandwidth
5699
* non-LLC platforms, but it comes at the cost of greater subslice
5700
* imbalance for primitives of dimensions approximately intermediate
5701
* between 16x4 and 16x16.
5702
*/
5703
_16x4,
5704
/* Finest subslice hashing mode available. */
5705
_8x4
5706
};
5707
/* Dimensions of the smallest hashing block of a given hashing mode. If
5708
* the rendering area is smaller than this there can't possibly be any
5709
* benefit from switching to this mode, so we optimize out the
5710
* transition.
5711
*/
5712
const unsigned min_size[][2] = {
5713
{ 16, 4 },
5714
{ 8, 4 }
5715
};
5716
const unsigned idx = scale > 1;
5717
5718
if (cmd_buffer->state.current_hash_scale != scale &&
5719
(width > min_size[idx][0] || height > min_size[idx][1])) {
5720
anv_add_pending_pipe_bits(cmd_buffer,
5721
ANV_PIPE_CS_STALL_BIT |
5722
ANV_PIPE_STALL_AT_SCOREBOARD_BIT,
5723
"change pixel hash mode");
5724
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
5725
5726
anv_batch_write_reg(&cmd_buffer->batch, GENX(GT_MODE), gt) {
5727
gt.SliceHashing = (devinfo->num_slices > 1 ? slice_hashing[idx] : 0);
5728
gt.SliceHashingMask = (devinfo->num_slices > 1 ? -1 : 0);
5729
gt.SubsliceHashing = subslice_hashing[idx];
5730
gt.SubsliceHashingMask = -1;
5731
}
5732
5733
cmd_buffer->state.current_hash_scale = scale;
5734
}
5735
#endif
5736
}
5737
5738
static void
5739
cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
5740
{
5741
struct anv_device *device = cmd_buffer->device;
5742
const struct anv_image_view *iview =
5743
anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
5744
const struct anv_image *image = iview ? iview->image : NULL;
5745
5746
/* FIXME: Width and Height are wrong */
5747
5748
genX(cmd_buffer_emit_gfx7_depth_flush)(cmd_buffer);
5749
5750
uint32_t *dw = anv_batch_emit_dwords(&cmd_buffer->batch,
5751
device->isl_dev.ds.size / 4);
5752
if (dw == NULL)
5753
return;
5754
5755
struct isl_depth_stencil_hiz_emit_info info = { };
5756
5757
if (iview)
5758
info.view = &iview->planes[0].isl;
5759
5760
if (image && (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
5761
uint32_t depth_plane =
5762
anv_image_aspect_to_plane(image->aspects, VK_IMAGE_ASPECT_DEPTH_BIT);
5763
const struct anv_surface *depth_surface =
5764
&image->planes[depth_plane].primary_surface;
5765
const struct anv_address depth_address =
5766
anv_image_address(image, &depth_surface->memory_range);
5767
5768
info.depth_surf = &depth_surface->isl;
5769
5770
info.depth_address =
5771
anv_batch_emit_reloc(&cmd_buffer->batch,
5772
dw + device->isl_dev.ds.depth_offset / 4,
5773
depth_address.bo, depth_address.offset);
5774
info.mocs =
5775
anv_mocs(device, depth_address.bo, ISL_SURF_USAGE_DEPTH_BIT);
5776
5777
const uint32_t ds =
5778
cmd_buffer->state.subpass->depth_stencil_attachment->attachment;
5779
info.hiz_usage = cmd_buffer->state.attachments[ds].aux_usage;
5780
if (info.hiz_usage != ISL_AUX_USAGE_NONE) {
5781
assert(isl_aux_usage_has_hiz(info.hiz_usage));
5782
5783
const struct anv_surface *hiz_surface =
5784
&image->planes[depth_plane].aux_surface;
5785
const struct anv_address hiz_address =
5786
anv_image_address(image, &hiz_surface->memory_range);
5787
5788
info.hiz_surf = &hiz_surface->isl;
5789
5790
info.hiz_address =
5791
anv_batch_emit_reloc(&cmd_buffer->batch,
5792
dw + device->isl_dev.ds.hiz_offset / 4,
5793
hiz_address.bo, hiz_address.offset);
5794
5795
info.depth_clear_value = ANV_HZ_FC_VAL;
5796
}
5797
}
5798
5799
if (image && (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT)) {
5800
uint32_t stencil_plane =
5801
anv_image_aspect_to_plane(image->aspects, VK_IMAGE_ASPECT_STENCIL_BIT);
5802
const struct anv_surface *stencil_surface =
5803
&image->planes[stencil_plane].primary_surface;
5804
const struct anv_address stencil_address =
5805
anv_image_address(image, &stencil_surface->memory_range);
5806
5807
info.stencil_surf = &stencil_surface->isl;
5808
5809
info.stencil_aux_usage = image->planes[stencil_plane].aux_usage;
5810
info.stencil_address =
5811
anv_batch_emit_reloc(&cmd_buffer->batch,
5812
dw + device->isl_dev.ds.stencil_offset / 4,
5813
stencil_address.bo, stencil_address.offset);
5814
info.mocs =
5815
anv_mocs(device, stencil_address.bo, ISL_SURF_USAGE_STENCIL_BIT);
5816
}
5817
5818
isl_emit_depth_stencil_hiz_s(&device->isl_dev, dw, &info);
5819
5820
if (GFX_VER >= 12) {
5821
cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_POST_SYNC_BIT;
5822
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
5823
5824
/* Wa_1408224581
5825
*
5826
* Workaround: Gfx12LP Astep only An additional pipe control with
5827
* post-sync = store dword operation would be required.( w/a is to
5828
* have an additional pipe control after the stencil state whenever
5829
* the surface state bits of this state is changing).
5830
*/
5831
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
5832
pc.PostSyncOperation = WriteImmediateData;
5833
pc.Address = cmd_buffer->device->workaround_address;
5834
}
5835
}
5836
cmd_buffer->state.hiz_enabled = isl_aux_usage_has_hiz(info.hiz_usage);
5837
}
5838
5839
/**
5840
* This ANDs the view mask of the current subpass with the pending clear
5841
* views in the attachment to get the mask of views active in the subpass
5842
* that still need to be cleared.
5843
*/
5844
static inline uint32_t
5845
get_multiview_subpass_clear_mask(const struct anv_cmd_state *cmd_state,
5846
const struct anv_attachment_state *att_state)
5847
{
5848
return cmd_state->subpass->view_mask & att_state->pending_clear_views;
5849
}
5850
5851
static inline bool
5852
do_first_layer_clear(const struct anv_cmd_state *cmd_state,
5853
const struct anv_attachment_state *att_state)
5854
{
5855
if (!cmd_state->subpass->view_mask)
5856
return true;
5857
5858
uint32_t pending_clear_mask =
5859
get_multiview_subpass_clear_mask(cmd_state, att_state);
5860
5861
return pending_clear_mask & 1;
5862
}
5863
5864
static inline bool
5865
current_subpass_is_last_for_attachment(const struct anv_cmd_state *cmd_state,
5866
uint32_t att_idx)
5867
{
5868
const uint32_t last_subpass_idx =
5869
cmd_state->pass->attachments[att_idx].last_subpass_idx;
5870
const struct anv_subpass *last_subpass =
5871
&cmd_state->pass->subpasses[last_subpass_idx];
5872
return last_subpass == cmd_state->subpass;
5873
}
5874
5875
static void
5876
cmd_buffer_begin_subpass(struct anv_cmd_buffer *cmd_buffer,
5877
uint32_t subpass_id)
5878
{
5879
struct anv_cmd_state *cmd_state = &cmd_buffer->state;
5880
struct anv_render_pass *pass = cmd_state->pass;
5881
struct anv_subpass *subpass = &pass->subpasses[subpass_id];
5882
cmd_state->subpass = subpass;
5883
5884
cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
5885
5886
/* Our implementation of VK_KHR_multiview uses instancing to draw the
5887
* different views. If the client asks for instancing, we need to use the
5888
* Instance Data Step Rate to ensure that we repeat the client's
5889
* per-instance data once for each view. Since this bit is in
5890
* VERTEX_BUFFER_STATE on gfx7, we need to dirty vertex buffers at the top
5891
* of each subpass.
5892
*/
5893
if (GFX_VER == 7)
5894
cmd_buffer->state.gfx.vb_dirty |= ~0;
5895
5896
/* It is possible to start a render pass with an old pipeline. Because the
5897
* render pass and subpass index are both baked into the pipeline, this is
5898
* highly unlikely. In order to do so, it requires that you have a render
5899
* pass with a single subpass and that you use that render pass twice
5900
* back-to-back and use the same pipeline at the start of the second render
5901
* pass as at the end of the first. In order to avoid unpredictable issues
5902
* with this edge case, we just dirty the pipeline at the start of every
5903
* subpass.
5904
*/
5905
cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE;
5906
5907
/* Accumulate any subpass flushes that need to happen before the subpass */
5908
anv_add_pending_pipe_bits(cmd_buffer,
5909
cmd_buffer->state.pass->subpass_flushes[subpass_id],
5910
"begin subpass deps/attachments");
5911
5912
VkRect2D render_area = cmd_buffer->state.render_area;
5913
struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
5914
5915
bool is_multiview = subpass->view_mask != 0;
5916
5917
for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
5918
const uint32_t a = subpass->attachments[i].attachment;
5919
if (a == VK_ATTACHMENT_UNUSED)
5920
continue;
5921
5922
assert(a < cmd_state->pass->attachment_count);
5923
struct anv_attachment_state *att_state = &cmd_state->attachments[a];
5924
5925
struct anv_image_view *iview = cmd_state->attachments[a].image_view;
5926
const struct anv_image *image = iview->image;
5927
5928
VkImageLayout target_layout = subpass->attachments[i].layout;
5929
VkImageLayout target_stencil_layout =
5930
subpass->attachments[i].stencil_layout;
5931
5932
uint32_t level = iview->planes[0].isl.base_level;
5933
uint32_t width = anv_minify(iview->image->extent.width, level);
5934
uint32_t height = anv_minify(iview->image->extent.height, level);
5935
bool full_surface_draw =
5936
render_area.offset.x == 0 && render_area.offset.y == 0 &&
5937
render_area.extent.width == width &&
5938
render_area.extent.height == height;
5939
5940
uint32_t base_layer, layer_count;
5941
if (image->type == VK_IMAGE_TYPE_3D) {
5942
base_layer = 0;
5943
layer_count = anv_minify(iview->image->extent.depth, level);
5944
} else {
5945
base_layer = iview->planes[0].isl.base_array_layer;
5946
layer_count = fb->layers;
5947
}
5948
5949
if (image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
5950
bool will_full_fast_clear =
5951
(att_state->pending_clear_aspects & VK_IMAGE_ASPECT_COLOR_BIT) &&
5952
att_state->fast_clear && full_surface_draw;
5953
5954
assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
5955
transition_color_buffer(cmd_buffer, image, VK_IMAGE_ASPECT_COLOR_BIT,
5956
level, 1, base_layer, layer_count,
5957
att_state->current_layout, target_layout,
5958
VK_QUEUE_FAMILY_IGNORED,
5959
VK_QUEUE_FAMILY_IGNORED,
5960
will_full_fast_clear);
5961
att_state->aux_usage =
5962
anv_layout_to_aux_usage(&cmd_buffer->device->info, image,
5963
VK_IMAGE_ASPECT_COLOR_BIT,
5964
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
5965
target_layout);
5966
}
5967
5968
if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
5969
bool will_full_fast_clear =
5970
(att_state->pending_clear_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
5971
att_state->fast_clear && full_surface_draw;
5972
5973
transition_depth_buffer(cmd_buffer, image,
5974
base_layer, layer_count,
5975
att_state->current_layout, target_layout,
5976
will_full_fast_clear);
5977
att_state->aux_usage =
5978
anv_layout_to_aux_usage(&cmd_buffer->device->info, image,
5979
VK_IMAGE_ASPECT_DEPTH_BIT,
5980
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
5981
target_layout);
5982
}
5983
5984
if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
5985
bool will_full_fast_clear =
5986
(att_state->pending_clear_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
5987
att_state->fast_clear && full_surface_draw;
5988
5989
transition_stencil_buffer(cmd_buffer, image,
5990
level, 1, base_layer, layer_count,
5991
att_state->current_stencil_layout,
5992
target_stencil_layout,
5993
will_full_fast_clear);
5994
}
5995
att_state->current_layout = target_layout;
5996
att_state->current_stencil_layout = target_stencil_layout;
5997
5998
if (att_state->pending_clear_aspects & VK_IMAGE_ASPECT_COLOR_BIT) {
5999
assert(att_state->pending_clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT);
6000
6001
/* Multi-planar images are not supported as attachments */
6002
assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
6003
assert(image->n_planes == 1);
6004
6005
uint32_t base_clear_layer = iview->planes[0].isl.base_array_layer;
6006
uint32_t clear_layer_count = fb->layers;
6007
6008
if (att_state->fast_clear &&
6009
do_first_layer_clear(cmd_state, att_state)) {
6010
/* We only support fast-clears on the first layer */
6011
assert(level == 0 && base_layer == 0);
6012
6013
union isl_color_value clear_color = {};
6014
anv_clear_color_from_att_state(&clear_color, att_state, iview);
6015
if (iview->image->samples == 1) {
6016
anv_image_ccs_op(cmd_buffer, image,
6017
iview->planes[0].isl.format,
6018
iview->planes[0].isl.swizzle,
6019
VK_IMAGE_ASPECT_COLOR_BIT,
6020
0, 0, 1, ISL_AUX_OP_FAST_CLEAR,
6021
&clear_color,
6022
false);
6023
} else {
6024
anv_image_mcs_op(cmd_buffer, image,
6025
iview->planes[0].isl.format,
6026
iview->planes[0].isl.swizzle,
6027
VK_IMAGE_ASPECT_COLOR_BIT,
6028
0, 1, ISL_AUX_OP_FAST_CLEAR,
6029
&clear_color,
6030
false);
6031
}
6032
base_clear_layer++;
6033
clear_layer_count--;
6034
if (is_multiview)
6035
att_state->pending_clear_views &= ~1;
6036
6037
if (isl_color_value_is_zero(clear_color,
6038
iview->planes[0].isl.format)) {
6039
/* This image has the auxiliary buffer enabled. We can mark the
6040
* subresource as not needing a resolve because the clear color
6041
* will match what's in every RENDER_SURFACE_STATE object when
6042
* it's being used for sampling.
6043
*/
6044
set_image_fast_clear_state(cmd_buffer, iview->image,
6045
VK_IMAGE_ASPECT_COLOR_BIT,
6046
ANV_FAST_CLEAR_DEFAULT_VALUE);
6047
} else {
6048
set_image_fast_clear_state(cmd_buffer, iview->image,
6049
VK_IMAGE_ASPECT_COLOR_BIT,
6050
ANV_FAST_CLEAR_ANY);
6051
}
6052
}
6053
6054
/* From the VkFramebufferCreateInfo spec:
6055
*
6056
* "If the render pass uses multiview, then layers must be one and each
6057
* attachment requires a number of layers that is greater than the
6058
* maximum bit index set in the view mask in the subpasses in which it
6059
* is used."
6060
*
6061
* So if multiview is active we ignore the number of layers in the
6062
* framebuffer and instead we honor the view mask from the subpass.
6063
*/
6064
if (is_multiview) {
6065
assert(image->n_planes == 1);
6066
uint32_t pending_clear_mask =
6067
get_multiview_subpass_clear_mask(cmd_state, att_state);
6068
6069
u_foreach_bit(layer_idx, pending_clear_mask) {
6070
uint32_t layer =
6071
iview->planes[0].isl.base_array_layer + layer_idx;
6072
6073
anv_image_clear_color(cmd_buffer, image,
6074
VK_IMAGE_ASPECT_COLOR_BIT,
6075
att_state->aux_usage,
6076
iview->planes[0].isl.format,
6077
iview->planes[0].isl.swizzle,
6078
level, layer, 1,
6079
render_area,
6080
vk_to_isl_color(att_state->clear_value.color));
6081
}
6082
6083
att_state->pending_clear_views &= ~pending_clear_mask;
6084
} else if (clear_layer_count > 0) {
6085
assert(image->n_planes == 1);
6086
anv_image_clear_color(cmd_buffer, image, VK_IMAGE_ASPECT_COLOR_BIT,
6087
att_state->aux_usage,
6088
iview->planes[0].isl.format,
6089
iview->planes[0].isl.swizzle,
6090
level, base_clear_layer, clear_layer_count,
6091
render_area,
6092
vk_to_isl_color(att_state->clear_value.color));
6093
}
6094
} else if (att_state->pending_clear_aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
6095
VK_IMAGE_ASPECT_STENCIL_BIT)) {
6096
if (att_state->fast_clear &&
6097
(att_state->pending_clear_aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
6098
/* We currently only support HiZ for single-LOD images */
6099
assert(isl_aux_usage_has_hiz(iview->image->planes[0].aux_usage));
6100
assert(iview->planes[0].isl.base_level == 0);
6101
assert(iview->planes[0].isl.levels == 1);
6102
}
6103
6104
if (is_multiview) {
6105
uint32_t pending_clear_mask =
6106
get_multiview_subpass_clear_mask(cmd_state, att_state);
6107
6108
u_foreach_bit(layer_idx, pending_clear_mask) {
6109
uint32_t layer =
6110
iview->planes[0].isl.base_array_layer + layer_idx;
6111
6112
if (att_state->fast_clear) {
6113
anv_image_hiz_clear(cmd_buffer, image,
6114
att_state->pending_clear_aspects,
6115
level, layer, 1, render_area,
6116
att_state->clear_value.depthStencil.stencil);
6117
} else {
6118
anv_image_clear_depth_stencil(cmd_buffer, image,
6119
att_state->pending_clear_aspects,
6120
att_state->aux_usage,
6121
level, layer, 1, render_area,
6122
att_state->clear_value.depthStencil.depth,
6123
att_state->clear_value.depthStencil.stencil);
6124
}
6125
}
6126
6127
att_state->pending_clear_views &= ~pending_clear_mask;
6128
} else {
6129
if (att_state->fast_clear) {
6130
anv_image_hiz_clear(cmd_buffer, image,
6131
att_state->pending_clear_aspects,
6132
level, base_layer, layer_count,
6133
render_area,
6134
att_state->clear_value.depthStencil.stencil);
6135
} else {
6136
anv_image_clear_depth_stencil(cmd_buffer, image,
6137
att_state->pending_clear_aspects,
6138
att_state->aux_usage,
6139
level, base_layer, layer_count,
6140
render_area,
6141
att_state->clear_value.depthStencil.depth,
6142
att_state->clear_value.depthStencil.stencil);
6143
}
6144
}
6145
} else {
6146
assert(att_state->pending_clear_aspects == 0);
6147
}
6148
6149
/* If multiview is enabled, then we are only done clearing when we no
6150
* longer have pending layers to clear, or when we have processed the
6151
* last subpass that uses this attachment.
6152
*/
6153
if (!is_multiview ||
6154
att_state->pending_clear_views == 0 ||
6155
current_subpass_is_last_for_attachment(cmd_state, a)) {
6156
att_state->pending_clear_aspects = 0;
6157
}
6158
6159
att_state->pending_load_aspects = 0;
6160
}
6161
6162
/* We've transitioned all our images possibly fast clearing them. Now we
6163
* can fill out the surface states that we will use as render targets
6164
* during actual subpass rendering.
6165
*/
6166
VkResult result = genX(cmd_buffer_alloc_att_surf_states)(cmd_buffer,
6167
pass, subpass);
6168
if (result != VK_SUCCESS)
6169
return;
6170
6171
isl_null_fill_state(&cmd_buffer->device->isl_dev,
6172
cmd_state->null_surface_state.map,
6173
.size = isl_extent3d(fb->width, fb->height, fb->layers));
6174
6175
for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
6176
const uint32_t att = subpass->attachments[i].attachment;
6177
if (att == VK_ATTACHMENT_UNUSED)
6178
continue;
6179
6180
assert(att < cmd_state->pass->attachment_count);
6181
struct anv_render_pass_attachment *pass_att = &pass->attachments[att];
6182
struct anv_attachment_state *att_state = &cmd_state->attachments[att];
6183
struct anv_image_view *iview = att_state->image_view;
6184
6185
if (!vk_format_is_color(pass_att->format))
6186
continue;
6187
6188
const VkImageUsageFlagBits att_usage = subpass->attachments[i].usage;
6189
assert(util_bitcount(att_usage) == 1);
6190
6191
struct anv_surface_state *surface_state;
6192
isl_surf_usage_flags_t isl_surf_usage;
6193
enum isl_aux_usage isl_aux_usage;
6194
if (att_usage == VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) {
6195
surface_state = &att_state->color;
6196
isl_surf_usage = ISL_SURF_USAGE_RENDER_TARGET_BIT;
6197
isl_aux_usage = att_state->aux_usage;
6198
} else if (att_usage == VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) {
6199
surface_state = &att_state->input;
6200
isl_surf_usage = ISL_SURF_USAGE_TEXTURE_BIT;
6201
isl_aux_usage =
6202
anv_layout_to_aux_usage(&cmd_buffer->device->info, iview->image,
6203
VK_IMAGE_ASPECT_COLOR_BIT,
6204
VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT,
6205
att_state->current_layout);
6206
} else {
6207
continue;
6208
}
6209
6210
/* We had better have a surface state when we get here */
6211
assert(surface_state->state.map);
6212
6213
union isl_color_value clear_color = { .u32 = { 0, } };
6214
if (pass_att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR &&
6215
att_state->fast_clear)
6216
anv_clear_color_from_att_state(&clear_color, att_state, iview);
6217
6218
anv_image_fill_surface_state(cmd_buffer->device,
6219
iview->image,
6220
VK_IMAGE_ASPECT_COLOR_BIT,
6221
&iview->planes[0].isl,
6222
isl_surf_usage,
6223
isl_aux_usage,
6224
&clear_color,
6225
0,
6226
surface_state,
6227
NULL);
6228
6229
add_surface_state_relocs(cmd_buffer, *surface_state);
6230
6231
if (GFX_VER < 10 &&
6232
pass_att->load_op == VK_ATTACHMENT_LOAD_OP_LOAD &&
6233
iview->image->planes[0].aux_usage != ISL_AUX_USAGE_NONE &&
6234
iview->planes[0].isl.base_level == 0 &&
6235
iview->planes[0].isl.base_array_layer == 0) {
6236
genX(copy_fast_clear_dwords)(cmd_buffer, surface_state->state,
6237
iview->image,
6238
VK_IMAGE_ASPECT_COLOR_BIT,
6239
false /* copy to ss */);
6240
}
6241
}
6242
6243
#if GFX_VER >= 11
6244
/* The PIPE_CONTROL command description says:
6245
*
6246
* "Whenever a Binding Table Index (BTI) used by a Render Taget Message
6247
* points to a different RENDER_SURFACE_STATE, SW must issue a Render
6248
* Target Cache Flush by enabling this bit. When render target flush
6249
* is set due to new association of BTI, PS Scoreboard Stall bit must
6250
* be set in this packet."
6251
*/
6252
anv_add_pending_pipe_bits(cmd_buffer,
6253
ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
6254
ANV_PIPE_STALL_AT_SCOREBOARD_BIT,
6255
"change RT");
6256
#endif
6257
6258
#if GFX_VERx10 == 120
6259
/* Wa_14010455700
6260
*
6261
* ISL will change some CHICKEN registers depending on the depth surface
6262
* format, along with emitting the depth and stencil packets. In that case,
6263
* we want to do a depth flush and stall, so the pipeline is not using these
6264
* settings while we change the registers.
6265
*/
6266
anv_add_pending_pipe_bits(cmd_buffer,
6267
ANV_PIPE_DEPTH_CACHE_FLUSH_BIT |
6268
ANV_PIPE_DEPTH_STALL_BIT |
6269
ANV_PIPE_END_OF_PIPE_SYNC_BIT,
6270
"change DS");
6271
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
6272
#endif
6273
6274
cmd_buffer_emit_depth_stencil(cmd_buffer);
6275
}
6276
6277
static enum blorp_filter
6278
vk_to_blorp_resolve_mode(VkResolveModeFlagBitsKHR vk_mode)
6279
{
6280
switch (vk_mode) {
6281
case VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR:
6282
return BLORP_FILTER_SAMPLE_0;
6283
case VK_RESOLVE_MODE_AVERAGE_BIT_KHR:
6284
return BLORP_FILTER_AVERAGE;
6285
case VK_RESOLVE_MODE_MIN_BIT_KHR:
6286
return BLORP_FILTER_MIN_SAMPLE;
6287
case VK_RESOLVE_MODE_MAX_BIT_KHR:
6288
return BLORP_FILTER_MAX_SAMPLE;
6289
default:
6290
return BLORP_FILTER_NONE;
6291
}
6292
}
6293
6294
static void
6295
cmd_buffer_end_subpass(struct anv_cmd_buffer *cmd_buffer)
6296
{
6297
struct anv_cmd_state *cmd_state = &cmd_buffer->state;
6298
struct anv_subpass *subpass = cmd_state->subpass;
6299
uint32_t subpass_id = anv_get_subpass_id(&cmd_buffer->state);
6300
struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
6301
6302
/* We are done with the previous subpass and all rendering directly to that
6303
* subpass is now complete. Zero out all the surface states so we don't
6304
* accidentally use them between now and the next subpass.
6305
*/
6306
for (uint32_t i = 0; i < cmd_state->pass->attachment_count; ++i) {
6307
memset(&cmd_state->attachments[i].color, 0,
6308
sizeof(cmd_state->attachments[i].color));
6309
memset(&cmd_state->attachments[i].input, 0,
6310
sizeof(cmd_state->attachments[i].input));
6311
}
6312
cmd_state->null_surface_state = ANV_STATE_NULL;
6313
cmd_state->attachment_states = ANV_STATE_NULL;
6314
6315
for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
6316
const uint32_t a = subpass->attachments[i].attachment;
6317
if (a == VK_ATTACHMENT_UNUSED)
6318
continue;
6319
6320
assert(a < cmd_state->pass->attachment_count);
6321
struct anv_attachment_state *att_state = &cmd_state->attachments[a];
6322
struct anv_image_view *iview = att_state->image_view;
6323
6324
assert(util_bitcount(subpass->attachments[i].usage) == 1);
6325
if (subpass->attachments[i].usage ==
6326
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) {
6327
/* We assume that if we're ending a subpass, we did do some rendering
6328
* so we may end up with compressed data.
6329
*/
6330
genX(cmd_buffer_mark_image_written)(cmd_buffer, iview->image,
6331
VK_IMAGE_ASPECT_COLOR_BIT,
6332
att_state->aux_usage,
6333
iview->planes[0].isl.base_level,
6334
iview->planes[0].isl.base_array_layer,
6335
fb->layers);
6336
} else if (subpass->attachments[i].usage ==
6337
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
6338
/* We may be writing depth or stencil so we need to mark the surface.
6339
* Unfortunately, there's no way to know at this point whether the
6340
* depth or stencil tests used will actually write to the surface.
6341
*
6342
* Even though stencil may be plane 1, it always shares a base_level
6343
* with depth.
6344
*/
6345
const struct isl_view *ds_view = &iview->planes[0].isl;
6346
if (iview->aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) {
6347
genX(cmd_buffer_mark_image_written)(cmd_buffer, iview->image,
6348
VK_IMAGE_ASPECT_DEPTH_BIT,
6349
att_state->aux_usage,
6350
ds_view->base_level,
6351
ds_view->base_array_layer,
6352
fb->layers);
6353
}
6354
if (iview->aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) {
6355
/* Even though stencil may be plane 1, it always shares a
6356
* base_level with depth.
6357
*/
6358
genX(cmd_buffer_mark_image_written)(cmd_buffer, iview->image,
6359
VK_IMAGE_ASPECT_STENCIL_BIT,
6360
ISL_AUX_USAGE_NONE,
6361
ds_view->base_level,
6362
ds_view->base_array_layer,
6363
fb->layers);
6364
}
6365
}
6366
}
6367
6368
if (subpass->has_color_resolve) {
6369
/* We are about to do some MSAA resolves. We need to flush so that the
6370
* result of writes to the MSAA color attachments show up in the sampler
6371
* when we blit to the single-sampled resolve target.
6372
*/
6373
anv_add_pending_pipe_bits(cmd_buffer,
6374
ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
6375
ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT,
6376
"MSAA resolve");
6377
6378
for (uint32_t i = 0; i < subpass->color_count; ++i) {
6379
uint32_t src_att = subpass->color_attachments[i].attachment;
6380
uint32_t dst_att = subpass->resolve_attachments[i].attachment;
6381
6382
if (dst_att == VK_ATTACHMENT_UNUSED)
6383
continue;
6384
6385
assert(src_att < cmd_buffer->state.pass->attachment_count);
6386
assert(dst_att < cmd_buffer->state.pass->attachment_count);
6387
6388
if (cmd_buffer->state.attachments[dst_att].pending_clear_aspects) {
6389
/* From the Vulkan 1.0 spec:
6390
*
6391
* If the first use of an attachment in a render pass is as a
6392
* resolve attachment, then the loadOp is effectively ignored
6393
* as the resolve is guaranteed to overwrite all pixels in the
6394
* render area.
6395
*/
6396
cmd_buffer->state.attachments[dst_att].pending_clear_aspects = 0;
6397
}
6398
6399
struct anv_image_view *src_iview = cmd_state->attachments[src_att].image_view;
6400
struct anv_image_view *dst_iview = cmd_state->attachments[dst_att].image_view;
6401
6402
const VkRect2D render_area = cmd_buffer->state.render_area;
6403
6404
enum isl_aux_usage src_aux_usage =
6405
cmd_buffer->state.attachments[src_att].aux_usage;
6406
enum isl_aux_usage dst_aux_usage =
6407
cmd_buffer->state.attachments[dst_att].aux_usage;
6408
6409
assert(src_iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT &&
6410
dst_iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT);
6411
6412
anv_image_msaa_resolve(cmd_buffer,
6413
src_iview->image, src_aux_usage,
6414
src_iview->planes[0].isl.base_level,
6415
src_iview->planes[0].isl.base_array_layer,
6416
dst_iview->image, dst_aux_usage,
6417
dst_iview->planes[0].isl.base_level,
6418
dst_iview->planes[0].isl.base_array_layer,
6419
VK_IMAGE_ASPECT_COLOR_BIT,
6420
render_area.offset.x, render_area.offset.y,
6421
render_area.offset.x, render_area.offset.y,
6422
render_area.extent.width,
6423
render_area.extent.height,
6424
fb->layers, BLORP_FILTER_NONE);
6425
}
6426
}
6427
6428
if (subpass->ds_resolve_attachment) {
6429
/* We are about to do some MSAA resolves. We need to flush so that the
6430
* result of writes to the MSAA depth attachments show up in the sampler
6431
* when we blit to the single-sampled resolve target.
6432
*/
6433
anv_add_pending_pipe_bits(cmd_buffer,
6434
ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
6435
ANV_PIPE_DEPTH_CACHE_FLUSH_BIT,
6436
"MSAA resolve");
6437
6438
uint32_t src_att = subpass->depth_stencil_attachment->attachment;
6439
uint32_t dst_att = subpass->ds_resolve_attachment->attachment;
6440
6441
assert(src_att < cmd_buffer->state.pass->attachment_count);
6442
assert(dst_att < cmd_buffer->state.pass->attachment_count);
6443
6444
if (cmd_buffer->state.attachments[dst_att].pending_clear_aspects) {
6445
/* From the Vulkan 1.0 spec:
6446
*
6447
* If the first use of an attachment in a render pass is as a
6448
* resolve attachment, then the loadOp is effectively ignored
6449
* as the resolve is guaranteed to overwrite all pixels in the
6450
* render area.
6451
*/
6452
cmd_buffer->state.attachments[dst_att].pending_clear_aspects = 0;
6453
}
6454
6455
struct anv_image_view *src_iview = cmd_state->attachments[src_att].image_view;
6456
struct anv_image_view *dst_iview = cmd_state->attachments[dst_att].image_view;
6457
6458
const VkRect2D render_area = cmd_buffer->state.render_area;
6459
6460
struct anv_attachment_state *src_state =
6461
&cmd_state->attachments[src_att];
6462
struct anv_attachment_state *dst_state =
6463
&cmd_state->attachments[dst_att];
6464
6465
if ((src_iview->image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
6466
subpass->depth_resolve_mode != VK_RESOLVE_MODE_NONE_KHR) {
6467
6468
/* MSAA resolves sample from the source attachment. Transition the
6469
* depth attachment first to get rid of any HiZ that we may not be
6470
* able to handle.
6471
*/
6472
transition_depth_buffer(cmd_buffer, src_iview->image,
6473
src_iview->planes[0].isl.base_array_layer,
6474
fb->layers,
6475
src_state->current_layout,
6476
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
6477
false /* will_full_fast_clear */);
6478
src_state->aux_usage =
6479
anv_layout_to_aux_usage(&cmd_buffer->device->info, src_iview->image,
6480
VK_IMAGE_ASPECT_DEPTH_BIT,
6481
VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
6482
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
6483
src_state->current_layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
6484
6485
/* MSAA resolves write to the resolve attachment as if it were any
6486
* other transfer op. Transition the resolve attachment accordingly.
6487
*/
6488
VkImageLayout dst_initial_layout = dst_state->current_layout;
6489
6490
/* If our render area is the entire size of the image, we're going to
6491
* blow it all away so we can claim the initial layout is UNDEFINED
6492
* and we'll get a HiZ ambiguate instead of a resolve.
6493
*/
6494
if (dst_iview->image->type != VK_IMAGE_TYPE_3D &&
6495
render_area.offset.x == 0 && render_area.offset.y == 0 &&
6496
render_area.extent.width == dst_iview->extent.width &&
6497
render_area.extent.height == dst_iview->extent.height)
6498
dst_initial_layout = VK_IMAGE_LAYOUT_UNDEFINED;
6499
6500
transition_depth_buffer(cmd_buffer, dst_iview->image,
6501
dst_iview->planes[0].isl.base_array_layer,
6502
fb->layers,
6503
dst_initial_layout,
6504
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
6505
false /* will_full_fast_clear */);
6506
dst_state->aux_usage =
6507
anv_layout_to_aux_usage(&cmd_buffer->device->info, dst_iview->image,
6508
VK_IMAGE_ASPECT_DEPTH_BIT,
6509
VK_IMAGE_USAGE_TRANSFER_DST_BIT,
6510
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
6511
dst_state->current_layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
6512
6513
enum blorp_filter filter =
6514
vk_to_blorp_resolve_mode(subpass->depth_resolve_mode);
6515
6516
anv_image_msaa_resolve(cmd_buffer,
6517
src_iview->image, src_state->aux_usage,
6518
src_iview->planes[0].isl.base_level,
6519
src_iview->planes[0].isl.base_array_layer,
6520
dst_iview->image, dst_state->aux_usage,
6521
dst_iview->planes[0].isl.base_level,
6522
dst_iview->planes[0].isl.base_array_layer,
6523
VK_IMAGE_ASPECT_DEPTH_BIT,
6524
render_area.offset.x, render_area.offset.y,
6525
render_area.offset.x, render_area.offset.y,
6526
render_area.extent.width,
6527
render_area.extent.height,
6528
fb->layers, filter);
6529
}
6530
6531
if ((src_iview->image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
6532
subpass->stencil_resolve_mode != VK_RESOLVE_MODE_NONE_KHR) {
6533
6534
src_state->current_stencil_layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
6535
dst_state->current_stencil_layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
6536
6537
enum isl_aux_usage src_aux_usage = ISL_AUX_USAGE_NONE;
6538
uint32_t plane = anv_image_aspect_to_plane(dst_iview->image->aspects,
6539
VK_IMAGE_ASPECT_STENCIL_BIT);
6540
enum isl_aux_usage dst_aux_usage =
6541
dst_iview->image->planes[plane].aux_usage;
6542
6543
enum blorp_filter filter =
6544
vk_to_blorp_resolve_mode(subpass->stencil_resolve_mode);
6545
6546
anv_image_msaa_resolve(cmd_buffer,
6547
src_iview->image, src_aux_usage,
6548
src_iview->planes[0].isl.base_level,
6549
src_iview->planes[0].isl.base_array_layer,
6550
dst_iview->image, dst_aux_usage,
6551
dst_iview->planes[0].isl.base_level,
6552
dst_iview->planes[0].isl.base_array_layer,
6553
VK_IMAGE_ASPECT_STENCIL_BIT,
6554
render_area.offset.x, render_area.offset.y,
6555
render_area.offset.x, render_area.offset.y,
6556
render_area.extent.width,
6557
render_area.extent.height,
6558
fb->layers, filter);
6559
}
6560
}
6561
6562
#if GFX_VER == 7
6563
/* On gfx7, we have to store a texturable version of the stencil buffer in
6564
* a shadow whenever VK_IMAGE_USAGE_SAMPLED_BIT is set and copy back and
6565
* forth at strategic points. Stencil writes are only allowed in following
6566
* layouts:
6567
*
6568
* - VK_IMAGE_LAYOUT_GENERAL
6569
* - VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
6570
* - VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
6571
* - VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL
6572
* - VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR
6573
*
6574
* For general, we have no nice opportunity to transition so we do the copy
6575
* to the shadow unconditionally at the end of the subpass. For transfer
6576
* destinations, we can update it as part of the transfer op. For the other
6577
* layouts, we delay the copy until a transition into some other layout.
6578
*/
6579
if (subpass->depth_stencil_attachment) {
6580
uint32_t a = subpass->depth_stencil_attachment->attachment;
6581
assert(a != VK_ATTACHMENT_UNUSED);
6582
6583
struct anv_attachment_state *att_state = &cmd_state->attachments[a];
6584
struct anv_image_view *iview = cmd_state->attachments[a].image_view;;
6585
const struct anv_image *image = iview->image;
6586
6587
if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
6588
uint32_t plane = anv_image_aspect_to_plane(image->aspects,
6589
VK_IMAGE_ASPECT_STENCIL_BIT);
6590
6591
if (anv_surface_is_valid(&image->planes[plane].shadow_surface) &&
6592
att_state->current_stencil_layout == VK_IMAGE_LAYOUT_GENERAL) {
6593
assert(image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT);
6594
anv_image_copy_to_shadow(cmd_buffer, image,
6595
VK_IMAGE_ASPECT_STENCIL_BIT,
6596
iview->planes[plane].isl.base_level, 1,
6597
iview->planes[plane].isl.base_array_layer,
6598
fb->layers);
6599
}
6600
}
6601
}
6602
#endif /* GFX_VER == 7 */
6603
6604
for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
6605
const uint32_t a = subpass->attachments[i].attachment;
6606
if (a == VK_ATTACHMENT_UNUSED)
6607
continue;
6608
6609
if (cmd_state->pass->attachments[a].last_subpass_idx != subpass_id)
6610
continue;
6611
6612
assert(a < cmd_state->pass->attachment_count);
6613
struct anv_attachment_state *att_state = &cmd_state->attachments[a];
6614
struct anv_image_view *iview = cmd_state->attachments[a].image_view;
6615
const struct anv_image *image = iview->image;
6616
6617
/* Transition the image into the final layout for this render pass */
6618
VkImageLayout target_layout =
6619
cmd_state->pass->attachments[a].final_layout;
6620
VkImageLayout target_stencil_layout =
6621
cmd_state->pass->attachments[a].stencil_final_layout;
6622
6623
uint32_t base_layer, layer_count;
6624
if (image->type == VK_IMAGE_TYPE_3D) {
6625
base_layer = 0;
6626
layer_count = anv_minify(iview->image->extent.depth,
6627
iview->planes[0].isl.base_level);
6628
} else {
6629
base_layer = iview->planes[0].isl.base_array_layer;
6630
layer_count = fb->layers;
6631
}
6632
6633
if (image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
6634
assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
6635
transition_color_buffer(cmd_buffer, image, VK_IMAGE_ASPECT_COLOR_BIT,
6636
iview->planes[0].isl.base_level, 1,
6637
base_layer, layer_count,
6638
att_state->current_layout, target_layout,
6639
VK_QUEUE_FAMILY_IGNORED,
6640
VK_QUEUE_FAMILY_IGNORED,
6641
false /* will_full_fast_clear */);
6642
}
6643
6644
if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
6645
transition_depth_buffer(cmd_buffer, image,
6646
base_layer, layer_count,
6647
att_state->current_layout, target_layout,
6648
false /* will_full_fast_clear */);
6649
}
6650
6651
if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
6652
transition_stencil_buffer(cmd_buffer, image,
6653
iview->planes[0].isl.base_level, 1,
6654
base_layer, layer_count,
6655
att_state->current_stencil_layout,
6656
target_stencil_layout,
6657
false /* will_full_fast_clear */);
6658
}
6659
}
6660
6661
/* Accumulate any subpass flushes that need to happen after the subpass.
6662
* Yes, they do get accumulated twice in the NextSubpass case but since
6663
* genX_CmdNextSubpass just calls end/begin back-to-back, we just end up
6664
* ORing the bits in twice so it's harmless.
6665
*/
6666
anv_add_pending_pipe_bits(cmd_buffer,
6667
cmd_buffer->state.pass->subpass_flushes[subpass_id + 1],
6668
"end subpass deps/attachments");
6669
}
6670
6671
void genX(CmdBeginRenderPass2)(
6672
VkCommandBuffer commandBuffer,
6673
const VkRenderPassBeginInfo* pRenderPassBeginInfo,
6674
const VkSubpassBeginInfoKHR* pSubpassBeginInfo)
6675
{
6676
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6677
ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBeginInfo->renderPass);
6678
ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBeginInfo->framebuffer);
6679
VkResult result;
6680
6681
cmd_buffer->state.framebuffer = framebuffer;
6682
cmd_buffer->state.pass = pass;
6683
cmd_buffer->state.render_area = pRenderPassBeginInfo->renderArea;
6684
6685
anv_measure_beginrenderpass(cmd_buffer);
6686
6687
result = genX(cmd_buffer_setup_attachments)(cmd_buffer, pass,
6688
framebuffer,
6689
pRenderPassBeginInfo);
6690
if (result != VK_SUCCESS) {
6691
assert(anv_batch_has_error(&cmd_buffer->batch));
6692
return;
6693
}
6694
6695
genX(flush_pipeline_select_3d)(cmd_buffer);
6696
6697
cmd_buffer_begin_subpass(cmd_buffer, 0);
6698
}
6699
6700
void genX(CmdNextSubpass2)(
6701
VkCommandBuffer commandBuffer,
6702
const VkSubpassBeginInfoKHR* pSubpassBeginInfo,
6703
const VkSubpassEndInfoKHR* pSubpassEndInfo)
6704
{
6705
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6706
6707
if (anv_batch_has_error(&cmd_buffer->batch))
6708
return;
6709
6710
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
6711
6712
uint32_t prev_subpass = anv_get_subpass_id(&cmd_buffer->state);
6713
cmd_buffer_end_subpass(cmd_buffer);
6714
cmd_buffer_begin_subpass(cmd_buffer, prev_subpass + 1);
6715
}
6716
6717
void genX(CmdEndRenderPass2)(
6718
VkCommandBuffer commandBuffer,
6719
const VkSubpassEndInfoKHR* pSubpassEndInfo)
6720
{
6721
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6722
6723
if (anv_batch_has_error(&cmd_buffer->batch))
6724
return;
6725
6726
cmd_buffer_end_subpass(cmd_buffer);
6727
6728
cmd_buffer->state.hiz_enabled = false;
6729
6730
/* Remove references to render pass specific state. This enables us to
6731
* detect whether or not we're in a renderpass.
6732
*/
6733
cmd_buffer->state.framebuffer = NULL;
6734
cmd_buffer->state.pass = NULL;
6735
cmd_buffer->state.subpass = NULL;
6736
}
6737
6738
void
6739
genX(cmd_emit_conditional_render_predicate)(struct anv_cmd_buffer *cmd_buffer)
6740
{
6741
#if GFX_VERx10 >= 75
6742
struct mi_builder b;
6743
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
6744
6745
mi_store(&b, mi_reg64(MI_PREDICATE_SRC0),
6746
mi_reg32(ANV_PREDICATE_RESULT_REG));
6747
mi_store(&b, mi_reg64(MI_PREDICATE_SRC1), mi_imm(0));
6748
6749
anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) {
6750
mip.LoadOperation = LOAD_LOADINV;
6751
mip.CombineOperation = COMBINE_SET;
6752
mip.CompareOperation = COMPARE_SRCS_EQUAL;
6753
}
6754
#endif
6755
}
6756
6757
#if GFX_VERx10 >= 75
6758
void genX(CmdBeginConditionalRenderingEXT)(
6759
VkCommandBuffer commandBuffer,
6760
const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin)
6761
{
6762
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6763
ANV_FROM_HANDLE(anv_buffer, buffer, pConditionalRenderingBegin->buffer);
6764
struct anv_cmd_state *cmd_state = &cmd_buffer->state;
6765
struct anv_address value_address =
6766
anv_address_add(buffer->address, pConditionalRenderingBegin->offset);
6767
6768
const bool isInverted = pConditionalRenderingBegin->flags &
6769
VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT;
6770
6771
cmd_state->conditional_render_enabled = true;
6772
6773
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
6774
6775
struct mi_builder b;
6776
mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
6777
6778
/* Section 19.4 of the Vulkan 1.1.85 spec says:
6779
*
6780
* If the value of the predicate in buffer memory changes
6781
* while conditional rendering is active, the rendering commands
6782
* may be discarded in an implementation-dependent way.
6783
* Some implementations may latch the value of the predicate
6784
* upon beginning conditional rendering while others
6785
* may read it before every rendering command.
6786
*
6787
* So it's perfectly fine to read a value from the buffer once.
6788
*/
6789
struct mi_value value = mi_mem32(value_address);
6790
6791
/* Precompute predicate result, it is necessary to support secondary
6792
* command buffers since it is unknown if conditional rendering is
6793
* inverted when populating them.
6794
*/
6795
mi_store(&b, mi_reg64(ANV_PREDICATE_RESULT_REG),
6796
isInverted ? mi_uge(&b, mi_imm(0), value) :
6797
mi_ult(&b, mi_imm(0), value));
6798
}
6799
6800
void genX(CmdEndConditionalRenderingEXT)(
6801
VkCommandBuffer commandBuffer)
6802
{
6803
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6804
struct anv_cmd_state *cmd_state = &cmd_buffer->state;
6805
6806
cmd_state->conditional_render_enabled = false;
6807
}
6808
#endif
6809
6810
/* Set of stage bits for which are pipelined, i.e. they get queued by the
6811
* command streamer for later execution.
6812
*/
6813
#define ANV_PIPELINE_STAGE_PIPELINED_BITS \
6814
(VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | \
6815
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | \
6816
VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | \
6817
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | \
6818
VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | \
6819
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | \
6820
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | \
6821
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | \
6822
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | \
6823
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | \
6824
VK_PIPELINE_STAGE_TRANSFER_BIT | \
6825
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT | \
6826
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT | \
6827
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)
6828
6829
void genX(CmdSetEvent)(
6830
VkCommandBuffer commandBuffer,
6831
VkEvent _event,
6832
VkPipelineStageFlags stageMask)
6833
{
6834
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6835
ANV_FROM_HANDLE(anv_event, event, _event);
6836
6837
cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_POST_SYNC_BIT;
6838
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
6839
6840
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
6841
if (stageMask & ANV_PIPELINE_STAGE_PIPELINED_BITS) {
6842
pc.StallAtPixelScoreboard = true;
6843
pc.CommandStreamerStallEnable = true;
6844
}
6845
6846
pc.DestinationAddressType = DAT_PPGTT,
6847
pc.PostSyncOperation = WriteImmediateData,
6848
pc.Address = (struct anv_address) {
6849
cmd_buffer->device->dynamic_state_pool.block_pool.bo,
6850
event->state.offset
6851
};
6852
pc.ImmediateData = VK_EVENT_SET;
6853
anv_debug_dump_pc(pc);
6854
}
6855
}
6856
6857
void genX(CmdResetEvent)(
6858
VkCommandBuffer commandBuffer,
6859
VkEvent _event,
6860
VkPipelineStageFlags stageMask)
6861
{
6862
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6863
ANV_FROM_HANDLE(anv_event, event, _event);
6864
6865
cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_POST_SYNC_BIT;
6866
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
6867
6868
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
6869
if (stageMask & ANV_PIPELINE_STAGE_PIPELINED_BITS) {
6870
pc.StallAtPixelScoreboard = true;
6871
pc.CommandStreamerStallEnable = true;
6872
}
6873
6874
pc.DestinationAddressType = DAT_PPGTT;
6875
pc.PostSyncOperation = WriteImmediateData;
6876
pc.Address = (struct anv_address) {
6877
cmd_buffer->device->dynamic_state_pool.block_pool.bo,
6878
event->state.offset
6879
};
6880
pc.ImmediateData = VK_EVENT_RESET;
6881
anv_debug_dump_pc(pc);
6882
}
6883
}
6884
6885
void genX(CmdWaitEvents)(
6886
VkCommandBuffer commandBuffer,
6887
uint32_t eventCount,
6888
const VkEvent* pEvents,
6889
VkPipelineStageFlags srcStageMask,
6890
VkPipelineStageFlags destStageMask,
6891
uint32_t memoryBarrierCount,
6892
const VkMemoryBarrier* pMemoryBarriers,
6893
uint32_t bufferMemoryBarrierCount,
6894
const VkBufferMemoryBarrier* pBufferMemoryBarriers,
6895
uint32_t imageMemoryBarrierCount,
6896
const VkImageMemoryBarrier* pImageMemoryBarriers)
6897
{
6898
#if GFX_VER >= 8
6899
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6900
6901
for (uint32_t i = 0; i < eventCount; i++) {
6902
ANV_FROM_HANDLE(anv_event, event, pEvents[i]);
6903
6904
anv_batch_emit(&cmd_buffer->batch, GENX(MI_SEMAPHORE_WAIT), sem) {
6905
sem.WaitMode = PollingMode,
6906
sem.CompareOperation = COMPARE_SAD_EQUAL_SDD,
6907
sem.SemaphoreDataDword = VK_EVENT_SET,
6908
sem.SemaphoreAddress = (struct anv_address) {
6909
cmd_buffer->device->dynamic_state_pool.block_pool.bo,
6910
event->state.offset
6911
};
6912
}
6913
}
6914
#else
6915
anv_finishme("Implement events on gfx7");
6916
#endif
6917
6918
genX(CmdPipelineBarrier)(commandBuffer, srcStageMask, destStageMask,
6919
false, /* byRegion */
6920
memoryBarrierCount, pMemoryBarriers,
6921
bufferMemoryBarrierCount, pBufferMemoryBarriers,
6922
imageMemoryBarrierCount, pImageMemoryBarriers);
6923
}
6924
6925
VkResult genX(CmdSetPerformanceOverrideINTEL)(
6926
VkCommandBuffer commandBuffer,
6927
const VkPerformanceOverrideInfoINTEL* pOverrideInfo)
6928
{
6929
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6930
6931
switch (pOverrideInfo->type) {
6932
case VK_PERFORMANCE_OVERRIDE_TYPE_NULL_HARDWARE_INTEL: {
6933
#if GFX_VER >= 9
6934
anv_batch_write_reg(&cmd_buffer->batch, GENX(CS_DEBUG_MODE2), csdm2) {
6935
csdm2._3DRenderingInstructionDisable = pOverrideInfo->enable;
6936
csdm2.MediaInstructionDisable = pOverrideInfo->enable;
6937
csdm2._3DRenderingInstructionDisableMask = true;
6938
csdm2.MediaInstructionDisableMask = true;
6939
}
6940
#else
6941
anv_batch_write_reg(&cmd_buffer->batch, GENX(INSTPM), instpm) {
6942
instpm._3DRenderingInstructionDisable = pOverrideInfo->enable;
6943
instpm.MediaInstructionDisable = pOverrideInfo->enable;
6944
instpm._3DRenderingInstructionDisableMask = true;
6945
instpm.MediaInstructionDisableMask = true;
6946
}
6947
#endif
6948
break;
6949
}
6950
6951
case VK_PERFORMANCE_OVERRIDE_TYPE_FLUSH_GPU_CACHES_INTEL:
6952
if (pOverrideInfo->enable) {
6953
/* FLUSH ALL THE THINGS! As requested by the MDAPI team. */
6954
anv_add_pending_pipe_bits(cmd_buffer,
6955
ANV_PIPE_FLUSH_BITS |
6956
ANV_PIPE_INVALIDATE_BITS,
6957
"perf counter isolation");
6958
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
6959
}
6960
break;
6961
6962
default:
6963
unreachable("Invalid override");
6964
}
6965
6966
return VK_SUCCESS;
6967
}
6968
6969
VkResult genX(CmdSetPerformanceStreamMarkerINTEL)(
6970
VkCommandBuffer commandBuffer,
6971
const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo)
6972
{
6973
/* TODO: Waiting on the register to write, might depend on generation. */
6974
6975
return VK_SUCCESS;
6976
}
6977
6978
void genX(cmd_emit_timestamp)(struct anv_batch *batch,
6979
struct anv_bo *bo,
6980
uint32_t offset) {
6981
anv_batch_emit(batch, GENX(PIPE_CONTROL), pc) {
6982
pc.CommandStreamerStallEnable = true;
6983
pc.PostSyncOperation = WriteTimestamp;
6984
pc.Address = (struct anv_address) {bo, offset};
6985
anv_debug_dump_pc(pc);
6986
}
6987
}
6988
6989