Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/radeonsi/si_gfx_cs.c
4570 views
1
/*
2
* Copyright 2010 Jerome Glisse <[email protected]>
3
* Copyright 2018 Advanced Micro Devices, Inc.
4
* All Rights Reserved.
5
*
6
* Permission is hereby granted, free of charge, to any person obtaining a
7
* copy of this software and associated documentation files (the "Software"),
8
* to deal in the Software without restriction, including without limitation
9
* on the rights to use, copy, modify, merge, publish, distribute, sub
10
* license, and/or sell copies of the Software, and to permit persons to whom
11
* the Software is furnished to do so, subject to the following conditions:
12
*
13
* The above copyright notice and this permission notice (including the next
14
* paragraph) shall be included in all copies or substantial portions of the
15
* Software.
16
*
17
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23
* USE OR OTHER DEALINGS IN THE SOFTWARE.
24
*/
25
26
#include "si_build_pm4.h"
27
#include "si_pipe.h"
28
#include "sid.h"
29
#include "util/os_time.h"
30
#include "util/u_log.h"
31
#include "util/u_upload_mgr.h"
32
#include "ac_debug.h"
33
34
/* initialize */
35
void si_need_gfx_cs_space(struct si_context *ctx, unsigned num_draws)
36
{
37
struct radeon_cmdbuf *cs = &ctx->gfx_cs;
38
39
/* There are two memory usage counters in the winsys for all buffers
40
* that have been added (cs_add_buffer) and two counters in the pipe
41
* driver for those that haven't been added yet.
42
*/
43
if (unlikely(!radeon_cs_memory_below_limit(ctx->screen, &ctx->gfx_cs, ctx->vram_kb, ctx->gtt_kb))) {
44
ctx->gtt_kb = 0;
45
ctx->vram_kb = 0;
46
si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
47
return;
48
}
49
ctx->gtt_kb = 0;
50
ctx->vram_kb = 0;
51
52
unsigned need_dwords = si_get_minimum_num_gfx_cs_dwords(ctx, num_draws);
53
if (!ctx->ws->cs_check_space(cs, need_dwords, false))
54
si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
55
}
56
57
void si_flush_gfx_cs(struct si_context *ctx, unsigned flags, struct pipe_fence_handle **fence)
58
{
59
struct radeon_cmdbuf *cs = &ctx->gfx_cs;
60
struct radeon_winsys *ws = ctx->ws;
61
struct si_screen *sscreen = ctx->screen;
62
const unsigned wait_ps_cs = SI_CONTEXT_PS_PARTIAL_FLUSH | SI_CONTEXT_CS_PARTIAL_FLUSH;
63
unsigned wait_flags = 0;
64
65
if (ctx->gfx_flush_in_progress)
66
return;
67
68
/* The amdgpu kernel driver synchronizes execution for shared DMABUFs between
69
* processes on DRM >= 3.39.0, so we don't have to wait at the end of IBs to
70
* make sure everything is idle.
71
*
72
* The amdgpu winsys synchronizes execution for buffers shared by different
73
* contexts within the same process.
74
*
75
* Interop with AMDVLK, RADV, or OpenCL within the same process requires
76
* explicit fences or glFinish.
77
*/
78
if (sscreen->info.is_amdgpu && sscreen->info.drm_minor >= 39)
79
flags |= RADEON_FLUSH_START_NEXT_GFX_IB_NOW;
80
81
if (!sscreen->info.kernel_flushes_tc_l2_after_ib) {
82
wait_flags |= wait_ps_cs | SI_CONTEXT_INV_L2;
83
} else if (ctx->chip_class == GFX6) {
84
/* The kernel flushes L2 before shaders are finished. */
85
wait_flags |= wait_ps_cs;
86
} else if (!(flags & RADEON_FLUSH_START_NEXT_GFX_IB_NOW) ||
87
((flags & RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION) &&
88
!ws->cs_is_secure(cs))) {
89
/* TODO: this workaround fixes subtitles rendering with mpv -vo=vaapi and
90
* tmz but shouldn't be necessary.
91
*/
92
wait_flags |= wait_ps_cs;
93
}
94
95
/* Drop this flush if it's a no-op. */
96
if (!radeon_emitted(cs, ctx->initial_gfx_cs_size) &&
97
(!wait_flags || !ctx->gfx_last_ib_is_busy) &&
98
!(flags & RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION)) {
99
tc_driver_internal_flush_notify(ctx->tc);
100
return;
101
}
102
103
/* Non-aux contexts must set up no-op API dispatch on GPU resets. This is
104
* similar to si_get_reset_status but here we can ignore soft-recoveries,
105
* while si_get_reset_status can't. */
106
if (!(ctx->context_flags & SI_CONTEXT_FLAG_AUX) &&
107
ctx->device_reset_callback.reset) {
108
enum pipe_reset_status status = ctx->ws->ctx_query_reset_status(ctx->ctx, true, NULL);
109
if (status != PIPE_NO_RESET)
110
ctx->device_reset_callback.reset(ctx->device_reset_callback.data, status);
111
}
112
113
if (sscreen->debug_flags & DBG(CHECK_VM))
114
flags &= ~PIPE_FLUSH_ASYNC;
115
116
ctx->gfx_flush_in_progress = true;
117
118
if (radeon_emitted(&ctx->prim_discard_compute_cs, 0))
119
si_compute_signal_gfx(ctx);
120
121
if (ctx->has_graphics) {
122
if (!list_is_empty(&ctx->active_queries))
123
si_suspend_queries(ctx);
124
125
ctx->streamout.suspended = false;
126
if (ctx->streamout.begin_emitted) {
127
si_emit_streamout_end(ctx);
128
ctx->streamout.suspended = true;
129
130
/* Since NGG streamout uses GDS, we need to make GDS
131
* idle when we leave the IB, otherwise another process
132
* might overwrite it while our shaders are busy.
133
*/
134
if (sscreen->use_ngg_streamout)
135
wait_flags |= SI_CONTEXT_PS_PARTIAL_FLUSH;
136
}
137
}
138
139
/* Make sure CP DMA is idle at the end of IBs after L2 prefetches
140
* because the kernel doesn't wait for it. */
141
if (ctx->chip_class >= GFX7)
142
si_cp_dma_wait_for_idle(ctx, &ctx->gfx_cs);
143
144
/* Wait for draw calls to finish if needed. */
145
if (wait_flags) {
146
ctx->flags |= wait_flags;
147
ctx->emit_cache_flush(ctx, &ctx->gfx_cs);
148
}
149
ctx->gfx_last_ib_is_busy = (wait_flags & wait_ps_cs) != wait_ps_cs;
150
151
if (ctx->current_saved_cs) {
152
si_trace_emit(ctx);
153
154
/* Save the IB for debug contexts. */
155
si_save_cs(ws, cs, &ctx->current_saved_cs->gfx, true);
156
ctx->current_saved_cs->flushed = true;
157
ctx->current_saved_cs->time_flush = os_time_get_nano();
158
159
si_log_hw_flush(ctx);
160
}
161
162
if (si_compute_prim_discard_enabled(ctx)) {
163
/* The compute IB can start after the previous gfx IB starts. */
164
if (radeon_emitted(&ctx->prim_discard_compute_cs, 0) && ctx->last_gfx_fence) {
165
ctx->ws->cs_add_fence_dependency(
166
&ctx->gfx_cs, ctx->last_gfx_fence,
167
RADEON_DEPENDENCY_PARALLEL_COMPUTE_ONLY | RADEON_DEPENDENCY_START_FENCE);
168
}
169
170
/* Remember the last execution barrier. It's in the IB.
171
* It will signal the start of the next compute IB.
172
*/
173
if (flags & RADEON_FLUSH_START_NEXT_GFX_IB_NOW && ctx->last_pkt3_write_data) {
174
*ctx->last_pkt3_write_data = PKT3(PKT3_WRITE_DATA, 3, 0);
175
ctx->last_pkt3_write_data = NULL;
176
177
si_resource_reference(&ctx->last_ib_barrier_buf, ctx->barrier_buf);
178
ctx->last_ib_barrier_buf_offset = ctx->barrier_buf_offset;
179
si_resource_reference(&ctx->barrier_buf, NULL);
180
181
ws->fence_reference(&ctx->last_ib_barrier_fence, NULL);
182
}
183
}
184
185
if (ctx->is_noop)
186
flags |= RADEON_FLUSH_NOOP;
187
188
/* Flush the CS. */
189
ws->cs_flush(cs, flags, &ctx->last_gfx_fence);
190
191
tc_driver_internal_flush_notify(ctx->tc);
192
if (fence)
193
ws->fence_reference(fence, ctx->last_gfx_fence);
194
195
ctx->num_gfx_cs_flushes++;
196
197
if (si_compute_prim_discard_enabled(ctx)) {
198
/* Remember the last execution barrier, which is the last fence
199
* in this case.
200
*/
201
if (!(flags & RADEON_FLUSH_START_NEXT_GFX_IB_NOW)) {
202
ctx->last_pkt3_write_data = NULL;
203
si_resource_reference(&ctx->last_ib_barrier_buf, NULL);
204
ws->fence_reference(&ctx->last_ib_barrier_fence, ctx->last_gfx_fence);
205
}
206
}
207
208
/* Check VM faults if needed. */
209
if (sscreen->debug_flags & DBG(CHECK_VM)) {
210
/* Use conservative timeout 800ms, after which we won't wait any
211
* longer and assume the GPU is hung.
212
*/
213
ctx->ws->fence_wait(ctx->ws, ctx->last_gfx_fence, 800 * 1000 * 1000);
214
215
si_check_vm_faults(ctx, &ctx->current_saved_cs->gfx, RING_GFX);
216
}
217
218
if (unlikely(ctx->thread_trace &&
219
(flags & PIPE_FLUSH_END_OF_FRAME))) {
220
si_handle_thread_trace(ctx, &ctx->gfx_cs);
221
}
222
223
if (ctx->current_saved_cs)
224
si_saved_cs_reference(&ctx->current_saved_cs, NULL);
225
226
si_begin_new_gfx_cs(ctx, false);
227
ctx->gfx_flush_in_progress = false;
228
}
229
230
static void si_begin_gfx_cs_debug(struct si_context *ctx)
231
{
232
static const uint32_t zeros[1];
233
assert(!ctx->current_saved_cs);
234
235
ctx->current_saved_cs = calloc(1, sizeof(*ctx->current_saved_cs));
236
if (!ctx->current_saved_cs)
237
return;
238
239
pipe_reference_init(&ctx->current_saved_cs->reference, 1);
240
241
ctx->current_saved_cs->trace_buf =
242
si_resource(pipe_buffer_create(ctx->b.screen, 0, PIPE_USAGE_STAGING, 8));
243
if (!ctx->current_saved_cs->trace_buf) {
244
free(ctx->current_saved_cs);
245
ctx->current_saved_cs = NULL;
246
return;
247
}
248
249
pipe_buffer_write_nooverlap(&ctx->b, &ctx->current_saved_cs->trace_buf->b.b, 0, sizeof(zeros),
250
zeros);
251
ctx->current_saved_cs->trace_id = 0;
252
253
si_trace_emit(ctx);
254
255
radeon_add_to_buffer_list(ctx, &ctx->gfx_cs, ctx->current_saved_cs->trace_buf,
256
RADEON_USAGE_READWRITE, RADEON_PRIO_TRACE);
257
}
258
259
static void si_add_gds_to_buffer_list(struct si_context *sctx)
260
{
261
if (sctx->gds) {
262
sctx->ws->cs_add_buffer(&sctx->gfx_cs, sctx->gds, RADEON_USAGE_READWRITE, 0, 0);
263
if (sctx->gds_oa) {
264
sctx->ws->cs_add_buffer(&sctx->gfx_cs, sctx->gds_oa, RADEON_USAGE_READWRITE, 0, 0);
265
}
266
}
267
}
268
269
void si_allocate_gds(struct si_context *sctx)
270
{
271
struct radeon_winsys *ws = sctx->ws;
272
273
if (sctx->gds)
274
return;
275
276
assert(sctx->screen->use_ngg_streamout);
277
278
/* 4 streamout GDS counters.
279
* We need 256B (64 dw) of GDS, otherwise streamout hangs.
280
*/
281
sctx->gds = ws->buffer_create(ws, 256, 4, RADEON_DOMAIN_GDS, RADEON_FLAG_DRIVER_INTERNAL);
282
sctx->gds_oa = ws->buffer_create(ws, 4, 1, RADEON_DOMAIN_OA, RADEON_FLAG_DRIVER_INTERNAL);
283
284
assert(sctx->gds && sctx->gds_oa);
285
si_add_gds_to_buffer_list(sctx);
286
}
287
288
void si_set_tracked_regs_to_clear_state(struct si_context *ctx)
289
{
290
STATIC_ASSERT(SI_NUM_TRACKED_REGS <= sizeof(ctx->tracked_regs.reg_saved) * 8);
291
292
ctx->tracked_regs.reg_value[SI_TRACKED_DB_RENDER_CONTROL] = 0x00000000;
293
ctx->tracked_regs.reg_value[SI_TRACKED_DB_COUNT_CONTROL] = 0x00000000;
294
ctx->tracked_regs.reg_value[SI_TRACKED_DB_RENDER_OVERRIDE2] = 0x00000000;
295
ctx->tracked_regs.reg_value[SI_TRACKED_DB_SHADER_CONTROL] = 0x00000000;
296
ctx->tracked_regs.reg_value[SI_TRACKED_CB_TARGET_MASK] = 0xffffffff;
297
ctx->tracked_regs.reg_value[SI_TRACKED_CB_DCC_CONTROL] = 0x00000000;
298
ctx->tracked_regs.reg_value[SI_TRACKED_SX_PS_DOWNCONVERT] = 0x00000000;
299
ctx->tracked_regs.reg_value[SI_TRACKED_SX_BLEND_OPT_EPSILON] = 0x00000000;
300
ctx->tracked_regs.reg_value[SI_TRACKED_SX_BLEND_OPT_CONTROL] = 0x00000000;
301
ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_LINE_CNTL] = 0x00001000;
302
ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_AA_CONFIG] = 0x00000000;
303
ctx->tracked_regs.reg_value[SI_TRACKED_DB_EQAA] = 0x00000000;
304
ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_MODE_CNTL_1] = 0x00000000;
305
ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_PRIM_FILTER_CNTL] = 0;
306
ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_SMALL_PRIM_FILTER_CNTL] = 0x00000000;
307
ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_VS_OUT_CNTL__VS] = 0x00000000;
308
ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_VS_OUT_CNTL__CL] = 0x00000000;
309
ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_CLIP_CNTL] = 0x00090000;
310
ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_BINNER_CNTL_0] = 0x00000003;
311
ctx->tracked_regs.reg_value[SI_TRACKED_DB_VRS_OVERRIDE_CNTL] = 0x00000000;
312
ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_GB_VERT_CLIP_ADJ] = 0x3f800000;
313
ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_GB_VERT_DISC_ADJ] = 0x3f800000;
314
ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_GB_HORZ_CLIP_ADJ] = 0x3f800000;
315
ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_GB_HORZ_DISC_ADJ] = 0x3f800000;
316
ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET] = 0;
317
ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_VTX_CNTL] = 0x00000005;
318
ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_CLIPRECT_RULE] = 0xffff;
319
ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_LINE_STIPPLE] = 0;
320
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_ESGS_RING_ITEMSIZE] = 0x00000000;
321
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_OFFSET_1] = 0x00000000;
322
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_OFFSET_2] = 0x00000000;
323
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_OFFSET_3] = 0x00000000;
324
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_ITEMSIZE] = 0x00000000;
325
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_MAX_VERT_OUT] = 0x00000000;
326
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_VERT_ITEMSIZE] = 0x00000000;
327
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_VERT_ITEMSIZE_1] = 0x00000000;
328
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_VERT_ITEMSIZE_2] = 0x00000000;
329
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_VERT_ITEMSIZE_3] = 0x00000000;
330
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_INSTANCE_CNT] = 0x00000000;
331
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_ONCHIP_CNTL] = 0x00000000;
332
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_MAX_PRIMS_PER_SUBGROUP] = 0x00000000;
333
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_MODE] = 0x00000000;
334
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_PRIMITIVEID_EN] = 0x00000000;
335
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_REUSE_OFF] = 0x00000000;
336
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_VS_OUT_CONFIG] = 0x00000000;
337
ctx->tracked_regs.reg_value[SI_TRACKED_GE_MAX_OUTPUT_PER_SUBGROUP] = 0x00000000;
338
ctx->tracked_regs.reg_value[SI_TRACKED_GE_NGG_SUBGRP_CNTL] = 0x00000000;
339
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_IDX_FORMAT] = 0x00000000;
340
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_POS_FORMAT] = 0x00000000;
341
ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_VTE_CNTL] = 0x00000000;
342
ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_NGG_CNTL] = 0x00000000;
343
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_PS_INPUT_ENA] = 0x00000000;
344
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_PS_INPUT_ADDR] = 0x00000000;
345
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_BARYC_CNTL] = 0x00000000;
346
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_PS_IN_CONTROL] = 0x00000002;
347
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_Z_FORMAT] = 0x00000000;
348
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_COL_FORMAT] = 0x00000000;
349
ctx->tracked_regs.reg_value[SI_TRACKED_CB_SHADER_MASK] = 0xffffffff;
350
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_TF_PARAM] = 0x00000000;
351
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL] = 0x0000001e; /* From GFX8 */
352
353
/* Set all cleared context registers to saved. */
354
ctx->tracked_regs.reg_saved = ~(1ull << SI_TRACKED_GE_PC_ALLOC); /* uconfig reg */
355
ctx->last_gs_out_prim = 0; /* cleared by CLEAR_STATE */
356
}
357
358
void si_install_draw_wrapper(struct si_context *sctx, pipe_draw_vbo_func wrapper)
359
{
360
if (wrapper) {
361
if (wrapper != sctx->b.draw_vbo) {
362
assert (!sctx->real_draw_vbo);
363
sctx->real_draw_vbo = sctx->b.draw_vbo;
364
sctx->b.draw_vbo = wrapper;
365
}
366
} else if (sctx->real_draw_vbo) {
367
sctx->real_draw_vbo = NULL;
368
si_select_draw_vbo(sctx);
369
}
370
}
371
372
static void si_draw_vbo_tmz_preamble(struct pipe_context *ctx,
373
const struct pipe_draw_info *info,
374
unsigned drawid_offset,
375
const struct pipe_draw_indirect_info *indirect,
376
const struct pipe_draw_start_count_bias *draws,
377
unsigned num_draws) {
378
struct si_context *sctx = (struct si_context *)ctx;
379
380
bool secure = si_gfx_resources_check_encrypted(sctx);
381
if (secure != sctx->ws->cs_is_secure(&sctx->gfx_cs)) {
382
si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW |
383
RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION, NULL);
384
}
385
386
sctx->real_draw_vbo(ctx, info, drawid_offset, indirect, draws, num_draws);
387
}
388
389
void si_begin_new_gfx_cs(struct si_context *ctx, bool first_cs)
390
{
391
bool is_secure = false;
392
393
if (unlikely(radeon_uses_secure_bos(ctx->ws))) {
394
/* Disable features that don't work with TMZ:
395
* - primitive discard
396
*/
397
ctx->prim_discard_vertex_count_threshold = UINT_MAX;
398
399
is_secure = ctx->ws->cs_is_secure(&ctx->gfx_cs);
400
401
si_install_draw_wrapper(ctx, si_draw_vbo_tmz_preamble);
402
}
403
404
if (ctx->is_debug)
405
si_begin_gfx_cs_debug(ctx);
406
407
si_add_gds_to_buffer_list(ctx);
408
409
/* Always invalidate caches at the beginning of IBs, because external
410
* users (e.g. BO evictions and SDMA/UVD/VCE IBs) can modify our
411
* buffers.
412
*
413
* Note that the cache flush done by the kernel at the end of GFX IBs
414
* isn't useful here, because that flush can finish after the following
415
* IB starts drawing.
416
*
417
* TODO: Do we also need to invalidate CB & DB caches?
418
*/
419
ctx->flags |= SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE |
420
SI_CONTEXT_INV_L2 | SI_CONTEXT_START_PIPELINE_STATS;
421
ctx->pipeline_stats_enabled = -1;
422
423
/* We don't know if the last draw call used GS fast launch, so assume it didn't. */
424
if (ctx->chip_class == GFX10 && ctx->ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_ALL)
425
ctx->flags |= SI_CONTEXT_VGT_FLUSH;
426
427
if (ctx->border_color_buffer) {
428
radeon_add_to_buffer_list(ctx, &ctx->gfx_cs, ctx->border_color_buffer,
429
RADEON_USAGE_READ, RADEON_PRIO_BORDER_COLORS);
430
}
431
if (ctx->shadowed_regs) {
432
radeon_add_to_buffer_list(ctx, &ctx->gfx_cs, ctx->shadowed_regs,
433
RADEON_USAGE_READWRITE,
434
RADEON_PRIO_DESCRIPTORS);
435
}
436
437
si_add_all_descriptors_to_bo_list(ctx);
438
439
if (first_cs || !ctx->shadowed_regs) {
440
si_shader_pointers_mark_dirty(ctx);
441
ctx->cs_shader_state.initialized = false;
442
}
443
444
if (!ctx->has_graphics) {
445
ctx->initial_gfx_cs_size = ctx->gfx_cs.current.cdw;
446
return;
447
}
448
449
if (ctx->tess_rings) {
450
radeon_add_to_buffer_list(ctx, &ctx->gfx_cs,
451
unlikely(is_secure) ? si_resource(ctx->tess_rings_tmz) : si_resource(ctx->tess_rings),
452
RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RINGS);
453
}
454
455
/* set all valid group as dirty so they get reemited on
456
* next draw command
457
*/
458
si_pm4_reset_emitted(ctx, first_cs);
459
460
/* The CS initialization should be emitted before everything else. */
461
if (ctx->cs_preamble_state)
462
si_pm4_emit(ctx, ctx->cs_preamble_state);
463
if (ctx->cs_preamble_tess_rings)
464
si_pm4_emit(ctx, unlikely(is_secure) ? ctx->cs_preamble_tess_rings_tmz :
465
ctx->cs_preamble_tess_rings);
466
if (ctx->cs_preamble_gs_rings)
467
si_pm4_emit(ctx, ctx->cs_preamble_gs_rings);
468
469
if (ctx->queued.named.ls)
470
ctx->prefetch_L2_mask |= SI_PREFETCH_LS;
471
if (ctx->queued.named.hs)
472
ctx->prefetch_L2_mask |= SI_PREFETCH_HS;
473
if (ctx->queued.named.es)
474
ctx->prefetch_L2_mask |= SI_PREFETCH_ES;
475
if (ctx->queued.named.gs)
476
ctx->prefetch_L2_mask |= SI_PREFETCH_GS;
477
if (ctx->queued.named.vs)
478
ctx->prefetch_L2_mask |= SI_PREFETCH_VS;
479
if (ctx->queued.named.ps)
480
ctx->prefetch_L2_mask |= SI_PREFETCH_PS;
481
482
/* CLEAR_STATE disables all colorbuffers, so only enable bound ones. */
483
bool has_clear_state = ctx->screen->info.has_clear_state;
484
if (has_clear_state || ctx->shadowed_regs) {
485
ctx->framebuffer.dirty_cbufs =
486
u_bit_consecutive(0, ctx->framebuffer.state.nr_cbufs);
487
/* CLEAR_STATE disables the zbuffer, so only enable it if it's bound. */
488
ctx->framebuffer.dirty_zsbuf = ctx->framebuffer.state.zsbuf != NULL;
489
} else {
490
ctx->framebuffer.dirty_cbufs = u_bit_consecutive(0, 8);
491
ctx->framebuffer.dirty_zsbuf = true;
492
}
493
494
/* Even with shadowed registers, we have to add buffers to the buffer list.
495
* These atoms are the only ones that add buffers.
496
*/
497
si_mark_atom_dirty(ctx, &ctx->atoms.s.framebuffer);
498
si_mark_atom_dirty(ctx, &ctx->atoms.s.render_cond);
499
if (ctx->screen->use_ngg_culling)
500
si_mark_atom_dirty(ctx, &ctx->atoms.s.ngg_cull_state);
501
502
if (first_cs || !ctx->shadowed_regs) {
503
/* These don't add any buffers, so skip them with shadowing. */
504
si_mark_atom_dirty(ctx, &ctx->atoms.s.clip_regs);
505
/* CLEAR_STATE sets zeros. */
506
if (!has_clear_state || ctx->clip_state_any_nonzeros)
507
si_mark_atom_dirty(ctx, &ctx->atoms.s.clip_state);
508
ctx->sample_locs_num_samples = 0;
509
si_mark_atom_dirty(ctx, &ctx->atoms.s.msaa_sample_locs);
510
si_mark_atom_dirty(ctx, &ctx->atoms.s.msaa_config);
511
/* CLEAR_STATE sets 0xffff. */
512
if (!has_clear_state || ctx->sample_mask != 0xffff)
513
si_mark_atom_dirty(ctx, &ctx->atoms.s.sample_mask);
514
si_mark_atom_dirty(ctx, &ctx->atoms.s.cb_render_state);
515
/* CLEAR_STATE sets zeros. */
516
if (!has_clear_state || ctx->blend_color_any_nonzeros)
517
si_mark_atom_dirty(ctx, &ctx->atoms.s.blend_color);
518
si_mark_atom_dirty(ctx, &ctx->atoms.s.db_render_state);
519
if (ctx->chip_class >= GFX9)
520
si_mark_atom_dirty(ctx, &ctx->atoms.s.dpbb_state);
521
si_mark_atom_dirty(ctx, &ctx->atoms.s.stencil_ref);
522
si_mark_atom_dirty(ctx, &ctx->atoms.s.spi_map);
523
if (!ctx->screen->use_ngg_streamout)
524
si_mark_atom_dirty(ctx, &ctx->atoms.s.streamout_enable);
525
/* CLEAR_STATE disables all window rectangles. */
526
if (!has_clear_state || ctx->num_window_rectangles > 0)
527
si_mark_atom_dirty(ctx, &ctx->atoms.s.window_rectangles);
528
si_mark_atom_dirty(ctx, &ctx->atoms.s.guardband);
529
si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
530
si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
531
532
/* Invalidate various draw states so that they are emitted before
533
* the first draw call. */
534
si_invalidate_draw_constants(ctx);
535
ctx->last_index_size = -1;
536
ctx->last_primitive_restart_en = -1;
537
ctx->last_restart_index = SI_RESTART_INDEX_UNKNOWN;
538
ctx->last_prim = -1;
539
ctx->last_multi_vgt_param = -1;
540
ctx->last_vs_state = ~0;
541
ctx->last_ls = NULL;
542
ctx->last_tcs = NULL;
543
ctx->last_tes_sh_base = -1;
544
ctx->last_num_tcs_input_cp = -1;
545
ctx->last_ls_hs_config = -1; /* impossible value */
546
ctx->last_binning_enabled = -1;
547
548
if (has_clear_state) {
549
si_set_tracked_regs_to_clear_state(ctx);
550
} else {
551
/* Set all register values to unknown. */
552
ctx->tracked_regs.reg_saved = 0;
553
ctx->last_gs_out_prim = -1; /* unknown */
554
}
555
556
/* 0xffffffff is an impossible value to register SPI_PS_INPUT_CNTL_n */
557
memset(ctx->tracked_regs.spi_ps_input_cntl, 0xff, sizeof(uint32_t) * 32);
558
}
559
560
si_mark_atom_dirty(ctx, &ctx->atoms.s.scratch_state);
561
if (ctx->scratch_buffer) {
562
si_context_add_resource_size(ctx, &ctx->scratch_buffer->b.b);
563
}
564
565
if (ctx->streamout.suspended) {
566
ctx->streamout.append_bitmask = ctx->streamout.enabled_mask;
567
si_streamout_buffers_dirty(ctx);
568
}
569
570
if (!list_is_empty(&ctx->active_queries))
571
si_resume_queries(ctx);
572
573
assert(!ctx->gfx_cs.prev_dw);
574
ctx->initial_gfx_cs_size = ctx->gfx_cs.current.cdw;
575
ctx->prim_discard_compute_ib_initialized = false;
576
577
/* Compute-based primitive discard:
578
* The index ring is divided into 2 halves. Switch between the halves
579
* in the same fashion as doublebuffering.
580
*/
581
if (ctx->index_ring_base)
582
ctx->index_ring_base = 0;
583
else
584
ctx->index_ring_base = ctx->index_ring_size_per_ib;
585
586
ctx->index_ring_offset = 0;
587
588
/* All buffer references are removed on a flush, so si_check_needs_implicit_sync
589
* cannot determine if si_make_CB_shader_coherent() needs to be called.
590
* ctx->force_cb_shader_coherent will be cleared by the first call to
591
* si_make_CB_shader_coherent.
592
*/
593
ctx->force_cb_shader_coherent = true;
594
}
595
596
void si_trace_emit(struct si_context *sctx)
597
{
598
struct radeon_cmdbuf *cs = &sctx->gfx_cs;
599
uint32_t trace_id = ++sctx->current_saved_cs->trace_id;
600
601
si_cp_write_data(sctx, sctx->current_saved_cs->trace_buf, 0, 4, V_370_MEM, V_370_ME, &trace_id);
602
603
radeon_begin(cs);
604
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
605
radeon_emit(cs, AC_ENCODE_TRACE_POINT(trace_id));
606
radeon_end();
607
608
if (sctx->log)
609
u_log_flush(sctx->log);
610
}
611
612
void si_prim_discard_signal_next_compute_ib_start(struct si_context *sctx)
613
{
614
if (!si_compute_prim_discard_enabled(sctx))
615
return;
616
617
if (!sctx->barrier_buf) {
618
u_suballocator_alloc(&sctx->allocator_zeroed_memory, 4, 4, &sctx->barrier_buf_offset,
619
(struct pipe_resource **)&sctx->barrier_buf);
620
}
621
622
/* Emit a placeholder to signal the next compute IB to start.
623
* See si_compute_prim_discard.c for explanation.
624
*/
625
uint32_t signal = 1;
626
si_cp_write_data(sctx, sctx->barrier_buf, sctx->barrier_buf_offset, 4, V_370_MEM, V_370_ME,
627
&signal);
628
629
sctx->last_pkt3_write_data = &sctx->gfx_cs.current.buf[sctx->gfx_cs.current.cdw - 5];
630
631
/* Only the last occurrence of WRITE_DATA will be executed.
632
* The packet will be enabled in si_flush_gfx_cs.
633
*/
634
*sctx->last_pkt3_write_data = PKT3(PKT3_NOP, 3, 0);
635
}
636
637
void si_emit_surface_sync(struct si_context *sctx, struct radeon_cmdbuf *cs, unsigned cp_coher_cntl)
638
{
639
bool compute_ib = !sctx->has_graphics || cs == &sctx->prim_discard_compute_cs;
640
641
assert(sctx->chip_class <= GFX9);
642
643
/* This seems problematic with GFX7 (see #4764) */
644
if (sctx->chip_class != GFX7)
645
cp_coher_cntl |= 1u << 31; /* don't sync PFP, i.e. execute the sync in ME */
646
647
radeon_begin(cs);
648
649
if (sctx->chip_class == GFX9 || compute_ib) {
650
/* Flush caches and wait for the caches to assert idle. */
651
radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
652
radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
653
radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
654
radeon_emit(cs, 0xffffff); /* CP_COHER_SIZE_HI */
655
radeon_emit(cs, 0); /* CP_COHER_BASE */
656
radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
657
radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
658
} else {
659
/* ACQUIRE_MEM is only required on a compute ring. */
660
radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0));
661
radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
662
radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
663
radeon_emit(cs, 0); /* CP_COHER_BASE */
664
radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
665
}
666
radeon_end();
667
668
/* ACQUIRE_MEM has an implicit context roll if the current context
669
* is busy. */
670
if (!compute_ib)
671
sctx->context_roll = true;
672
}
673
674
void gfx10_emit_cache_flush(struct si_context *ctx, struct radeon_cmdbuf *cs)
675
{
676
uint32_t gcr_cntl = 0;
677
unsigned cb_db_event = 0;
678
unsigned flags = ctx->flags;
679
680
if (!ctx->has_graphics) {
681
/* Only process compute flags. */
682
flags &= SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE |
683
SI_CONTEXT_INV_L2 | SI_CONTEXT_WB_L2 | SI_CONTEXT_INV_L2_METADATA |
684
SI_CONTEXT_CS_PARTIAL_FLUSH;
685
}
686
687
/* We don't need these. */
688
assert(!(flags & (SI_CONTEXT_VGT_STREAMOUT_SYNC | SI_CONTEXT_FLUSH_AND_INV_DB_META)));
689
690
radeon_begin(cs);
691
692
if (flags & SI_CONTEXT_VGT_FLUSH) {
693
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
694
radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
695
}
696
697
if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
698
ctx->num_cb_cache_flushes++;
699
if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
700
ctx->num_db_cache_flushes++;
701
702
if (flags & SI_CONTEXT_INV_ICACHE)
703
gcr_cntl |= S_586_GLI_INV(V_586_GLI_ALL);
704
if (flags & SI_CONTEXT_INV_SCACHE) {
705
/* TODO: When writing to the SMEM L1 cache, we need to set SEQ
706
* to FORWARD when both L1 and L2 are written out (WB or INV).
707
*/
708
gcr_cntl |= S_586_GL1_INV(1) | S_586_GLK_INV(1);
709
}
710
if (flags & SI_CONTEXT_INV_VCACHE)
711
gcr_cntl |= S_586_GL1_INV(1) | S_586_GLV_INV(1);
712
713
/* The L2 cache ops are:
714
* - INV: - invalidate lines that reflect memory (were loaded from memory)
715
* - don't touch lines that were overwritten (were stored by gfx clients)
716
* - WB: - don't touch lines that reflect memory
717
* - write back lines that were overwritten
718
* - WB | INV: - invalidate lines that reflect memory
719
* - write back lines that were overwritten
720
*
721
* GLM doesn't support WB alone. If WB is set, INV must be set too.
722
*/
723
if (flags & SI_CONTEXT_INV_L2) {
724
/* Writeback and invalidate everything in L2. */
725
gcr_cntl |= S_586_GL2_INV(1) | S_586_GL2_WB(1) | S_586_GLM_INV(1) | S_586_GLM_WB(1);
726
ctx->num_L2_invalidates++;
727
} else if (flags & SI_CONTEXT_WB_L2) {
728
gcr_cntl |= S_586_GL2_WB(1) | S_586_GLM_WB(1) | S_586_GLM_INV(1);
729
} else if (flags & SI_CONTEXT_INV_L2_METADATA) {
730
gcr_cntl |= S_586_GLM_INV(1) | S_586_GLM_WB(1);
731
}
732
733
if (flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) {
734
if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
735
/* Flush CMASK/FMASK/DCC. Will wait for idle later. */
736
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
737
radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
738
}
739
if (flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
740
/* Flush HTILE. Will wait for idle later. */
741
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
742
radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
743
}
744
745
/* First flush CB/DB, then L1/L2. */
746
gcr_cntl |= S_586_SEQ(V_586_SEQ_FORWARD);
747
748
if ((flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) ==
749
(SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) {
750
cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
751
} else if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
752
cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
753
} else if (flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
754
cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
755
} else {
756
assert(0);
757
}
758
} else {
759
/* Wait for graphics shaders to go idle if requested. */
760
if (flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
761
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
762
radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
763
/* Only count explicit shader flushes, not implicit ones. */
764
ctx->num_vs_flushes++;
765
ctx->num_ps_flushes++;
766
} else if (flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
767
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
768
radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
769
ctx->num_vs_flushes++;
770
}
771
}
772
773
if (flags & SI_CONTEXT_CS_PARTIAL_FLUSH && ctx->compute_is_busy) {
774
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
775
radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
776
ctx->num_cs_flushes++;
777
ctx->compute_is_busy = false;
778
}
779
radeon_end();
780
781
if (cb_db_event) {
782
struct si_resource* wait_mem_scratch = unlikely(ctx->ws->cs_is_secure(cs)) ?
783
ctx->wait_mem_scratch_tmz : ctx->wait_mem_scratch;
784
/* CB/DB flush and invalidate (or possibly just a wait for a
785
* meta flush) via RELEASE_MEM.
786
*
787
* Combine this with other cache flushes when possible; this
788
* requires affected shaders to be idle, so do it after the
789
* CS_PARTIAL_FLUSH before (VS/PS partial flushes are always
790
* implied).
791
*/
792
uint64_t va;
793
794
/* Do the flush (enqueue the event and wait for it). */
795
va = wait_mem_scratch->gpu_address;
796
ctx->wait_mem_number++;
797
798
/* Get GCR_CNTL fields, because the encoding is different in RELEASE_MEM. */
799
unsigned glm_wb = G_586_GLM_WB(gcr_cntl);
800
unsigned glm_inv = G_586_GLM_INV(gcr_cntl);
801
unsigned glv_inv = G_586_GLV_INV(gcr_cntl);
802
unsigned gl1_inv = G_586_GL1_INV(gcr_cntl);
803
assert(G_586_GL2_US(gcr_cntl) == 0);
804
assert(G_586_GL2_RANGE(gcr_cntl) == 0);
805
assert(G_586_GL2_DISCARD(gcr_cntl) == 0);
806
unsigned gl2_inv = G_586_GL2_INV(gcr_cntl);
807
unsigned gl2_wb = G_586_GL2_WB(gcr_cntl);
808
unsigned gcr_seq = G_586_SEQ(gcr_cntl);
809
810
gcr_cntl &= C_586_GLM_WB & C_586_GLM_INV & C_586_GLV_INV & C_586_GL1_INV & C_586_GL2_INV &
811
C_586_GL2_WB; /* keep SEQ */
812
813
si_cp_release_mem(ctx, cs, cb_db_event,
814
S_490_GLM_WB(glm_wb) | S_490_GLM_INV(glm_inv) | S_490_GLV_INV(glv_inv) |
815
S_490_GL1_INV(gl1_inv) | S_490_GL2_INV(gl2_inv) | S_490_GL2_WB(gl2_wb) |
816
S_490_SEQ(gcr_seq),
817
EOP_DST_SEL_MEM, EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM,
818
EOP_DATA_SEL_VALUE_32BIT, wait_mem_scratch, va, ctx->wait_mem_number,
819
SI_NOT_QUERY);
820
821
if (unlikely(ctx->thread_trace_enabled)) {
822
si_sqtt_describe_barrier_start(ctx, &ctx->gfx_cs);
823
}
824
825
si_cp_wait_mem(ctx, cs, va, ctx->wait_mem_number, 0xffffffff, WAIT_REG_MEM_EQUAL);
826
827
if (unlikely(ctx->thread_trace_enabled)) {
828
si_sqtt_describe_barrier_end(ctx, &ctx->gfx_cs, flags);
829
}
830
}
831
832
radeon_begin_again(cs);
833
834
/* Ignore fields that only modify the behavior of other fields. */
835
if (gcr_cntl & C_586_GL1_RANGE & C_586_GL2_RANGE & C_586_SEQ) {
836
unsigned dont_sync_pfp = (!(flags & SI_CONTEXT_PFP_SYNC_ME)) << 31;
837
838
/* Flush caches and wait for the caches to assert idle.
839
* The cache flush is executed in the ME, but the PFP waits
840
* for completion.
841
*/
842
radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
843
radeon_emit(cs, dont_sync_pfp); /* CP_COHER_CNTL */
844
radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
845
radeon_emit(cs, 0xffffff); /* CP_COHER_SIZE_HI */
846
radeon_emit(cs, 0); /* CP_COHER_BASE */
847
radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
848
radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
849
radeon_emit(cs, gcr_cntl); /* GCR_CNTL */
850
} else if (flags & SI_CONTEXT_PFP_SYNC_ME) {
851
/* Synchronize PFP with ME. (this stalls PFP) */
852
radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
853
radeon_emit(cs, 0);
854
}
855
856
if (flags & SI_CONTEXT_START_PIPELINE_STATS && ctx->pipeline_stats_enabled != 1) {
857
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
858
radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) | EVENT_INDEX(0));
859
ctx->pipeline_stats_enabled = 1;
860
} else if (flags & SI_CONTEXT_STOP_PIPELINE_STATS && ctx->pipeline_stats_enabled != 0) {
861
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
862
radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) | EVENT_INDEX(0));
863
ctx->pipeline_stats_enabled = 0;
864
}
865
radeon_end();
866
867
ctx->flags = 0;
868
}
869
870
void si_emit_cache_flush(struct si_context *sctx, struct radeon_cmdbuf *cs)
871
{
872
uint32_t flags = sctx->flags;
873
874
if (!sctx->has_graphics) {
875
/* Only process compute flags. */
876
flags &= SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE |
877
SI_CONTEXT_INV_L2 | SI_CONTEXT_WB_L2 | SI_CONTEXT_INV_L2_METADATA |
878
SI_CONTEXT_CS_PARTIAL_FLUSH;
879
}
880
881
uint32_t cp_coher_cntl = 0;
882
const uint32_t flush_cb_db = flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB);
883
const bool is_barrier =
884
flush_cb_db ||
885
/* INV_ICACHE == beginning of gfx IB. Checking
886
* INV_ICACHE fixes corruption for DeusExMD with
887
* compute-based culling, but I don't know why.
888
*/
889
flags & (SI_CONTEXT_INV_ICACHE | SI_CONTEXT_PS_PARTIAL_FLUSH | SI_CONTEXT_VS_PARTIAL_FLUSH) ||
890
(flags & SI_CONTEXT_CS_PARTIAL_FLUSH && sctx->compute_is_busy);
891
892
assert(sctx->chip_class <= GFX9);
893
894
if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
895
sctx->num_cb_cache_flushes++;
896
if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
897
sctx->num_db_cache_flushes++;
898
899
/* GFX6 has a bug that it always flushes ICACHE and KCACHE if either
900
* bit is set. An alternative way is to write SQC_CACHES, but that
901
* doesn't seem to work reliably. Since the bug doesn't affect
902
* correctness (it only does more work than necessary) and
903
* the performance impact is likely negligible, there is no plan
904
* to add a workaround for it.
905
*/
906
907
if (flags & SI_CONTEXT_INV_ICACHE)
908
cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
909
if (flags & SI_CONTEXT_INV_SCACHE)
910
cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
911
912
if (sctx->chip_class <= GFX8) {
913
if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
914
cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) | S_0085F0_CB0_DEST_BASE_ENA(1) |
915
S_0085F0_CB1_DEST_BASE_ENA(1) | S_0085F0_CB2_DEST_BASE_ENA(1) |
916
S_0085F0_CB3_DEST_BASE_ENA(1) | S_0085F0_CB4_DEST_BASE_ENA(1) |
917
S_0085F0_CB5_DEST_BASE_ENA(1) | S_0085F0_CB6_DEST_BASE_ENA(1) |
918
S_0085F0_CB7_DEST_BASE_ENA(1);
919
920
/* Necessary for DCC */
921
if (sctx->chip_class == GFX8)
922
si_cp_release_mem(sctx, cs, V_028A90_FLUSH_AND_INV_CB_DATA_TS, 0, EOP_DST_SEL_MEM,
923
EOP_INT_SEL_NONE, EOP_DATA_SEL_DISCARD, NULL, 0, 0, SI_NOT_QUERY);
924
}
925
if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
926
cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) | S_0085F0_DB_DEST_BASE_ENA(1);
927
}
928
929
radeon_begin(cs);
930
931
if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
932
/* Flush CMASK/FMASK/DCC. SURFACE_SYNC will wait for idle. */
933
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
934
radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
935
}
936
if (flags & (SI_CONTEXT_FLUSH_AND_INV_DB | SI_CONTEXT_FLUSH_AND_INV_DB_META)) {
937
/* Flush HTILE. SURFACE_SYNC will wait for idle. */
938
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
939
radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
940
}
941
942
/* Wait for shader engines to go idle.
943
* VS and PS waits are unnecessary if SURFACE_SYNC is going to wait
944
* for everything including CB/DB cache flushes.
945
*/
946
if (!flush_cb_db) {
947
if (flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
948
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
949
radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
950
/* Only count explicit shader flushes, not implicit ones
951
* done by SURFACE_SYNC.
952
*/
953
sctx->num_vs_flushes++;
954
sctx->num_ps_flushes++;
955
} else if (flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
956
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
957
radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
958
sctx->num_vs_flushes++;
959
}
960
}
961
962
if (flags & SI_CONTEXT_CS_PARTIAL_FLUSH && sctx->compute_is_busy) {
963
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
964
radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
965
sctx->num_cs_flushes++;
966
sctx->compute_is_busy = false;
967
}
968
969
/* VGT state synchronization. */
970
if (flags & SI_CONTEXT_VGT_FLUSH) {
971
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
972
radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
973
}
974
if (flags & SI_CONTEXT_VGT_STREAMOUT_SYNC) {
975
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
976
radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
977
}
978
979
radeon_end();
980
981
/* GFX9: Wait for idle if we're flushing CB or DB. ACQUIRE_MEM doesn't
982
* wait for idle on GFX9. We have to use a TS event.
983
*/
984
if (sctx->chip_class == GFX9 && flush_cb_db) {
985
uint64_t va;
986
unsigned tc_flags, cb_db_event;
987
988
/* Set the CB/DB flush event. */
989
switch (flush_cb_db) {
990
case SI_CONTEXT_FLUSH_AND_INV_CB:
991
cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
992
break;
993
case SI_CONTEXT_FLUSH_AND_INV_DB:
994
cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
995
break;
996
default:
997
/* both CB & DB */
998
cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
999
}
1000
1001
/* These are the only allowed combinations. If you need to
1002
* do multiple operations at once, do them separately.
1003
* All operations that invalidate L2 also seem to invalidate
1004
* metadata. Volatile (VOL) and WC flushes are not listed here.
1005
*
1006
* TC | TC_WB = writeback & invalidate L2 & L1
1007
* TC | TC_WB | TC_NC = writeback & invalidate L2 for MTYPE == NC
1008
* TC_WB | TC_NC = writeback L2 for MTYPE == NC
1009
* TC | TC_NC = invalidate L2 for MTYPE == NC
1010
* TC | TC_MD = writeback & invalidate L2 metadata (DCC, etc.)
1011
* TCL1 = invalidate L1
1012
*/
1013
tc_flags = 0;
1014
1015
if (flags & SI_CONTEXT_INV_L2_METADATA) {
1016
tc_flags = EVENT_TC_ACTION_ENA | EVENT_TC_MD_ACTION_ENA;
1017
}
1018
1019
/* Ideally flush TC together with CB/DB. */
1020
if (flags & SI_CONTEXT_INV_L2) {
1021
/* Writeback and invalidate everything in L2 & L1. */
1022
tc_flags = EVENT_TC_ACTION_ENA | EVENT_TC_WB_ACTION_ENA;
1023
1024
/* Clear the flags. */
1025
flags &= ~(SI_CONTEXT_INV_L2 | SI_CONTEXT_WB_L2 | SI_CONTEXT_INV_VCACHE);
1026
sctx->num_L2_invalidates++;
1027
}
1028
1029
/* Do the flush (enqueue the event and wait for it). */
1030
struct si_resource* wait_mem_scratch = unlikely(sctx->ws->cs_is_secure(cs)) ?
1031
sctx->wait_mem_scratch_tmz : sctx->wait_mem_scratch;
1032
va = wait_mem_scratch->gpu_address;
1033
sctx->wait_mem_number++;
1034
1035
si_cp_release_mem(sctx, cs, cb_db_event, tc_flags, EOP_DST_SEL_MEM,
1036
EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM, EOP_DATA_SEL_VALUE_32BIT,
1037
wait_mem_scratch, va, sctx->wait_mem_number, SI_NOT_QUERY);
1038
1039
if (unlikely(sctx->thread_trace_enabled)) {
1040
si_sqtt_describe_barrier_start(sctx, &sctx->gfx_cs);
1041
}
1042
1043
si_cp_wait_mem(sctx, cs, va, sctx->wait_mem_number, 0xffffffff, WAIT_REG_MEM_EQUAL);
1044
1045
if (unlikely(sctx->thread_trace_enabled)) {
1046
si_sqtt_describe_barrier_end(sctx, &sctx->gfx_cs, sctx->flags);
1047
}
1048
}
1049
1050
/* GFX6-GFX8 only:
1051
* When one of the CP_COHER_CNTL.DEST_BASE flags is set, SURFACE_SYNC
1052
* waits for idle, so it should be last. SURFACE_SYNC is done in PFP.
1053
*
1054
* cp_coher_cntl should contain all necessary flags except TC and PFP flags
1055
* at this point.
1056
*
1057
* GFX6-GFX7 don't support L2 write-back.
1058
*/
1059
if (flags & SI_CONTEXT_INV_L2 || (sctx->chip_class <= GFX7 && (flags & SI_CONTEXT_WB_L2))) {
1060
/* Invalidate L1 & L2. (L1 is always invalidated on GFX6)
1061
* WB must be set on GFX8+ when TC_ACTION is set.
1062
*/
1063
si_emit_surface_sync(sctx, cs,
1064
cp_coher_cntl | S_0085F0_TC_ACTION_ENA(1) | S_0085F0_TCL1_ACTION_ENA(1) |
1065
S_0301F0_TC_WB_ACTION_ENA(sctx->chip_class >= GFX8));
1066
cp_coher_cntl = 0;
1067
sctx->num_L2_invalidates++;
1068
} else {
1069
/* L1 invalidation and L2 writeback must be done separately,
1070
* because both operations can't be done together.
1071
*/
1072
if (flags & SI_CONTEXT_WB_L2) {
1073
/* WB = write-back
1074
* NC = apply to non-coherent MTYPEs
1075
* (i.e. MTYPE <= 1, which is what we use everywhere)
1076
*
1077
* WB doesn't work without NC.
1078
*/
1079
si_emit_surface_sync(
1080
sctx, cs,
1081
cp_coher_cntl | S_0301F0_TC_WB_ACTION_ENA(1) | S_0301F0_TC_NC_ACTION_ENA(1));
1082
cp_coher_cntl = 0;
1083
sctx->num_L2_writebacks++;
1084
}
1085
if (flags & SI_CONTEXT_INV_VCACHE) {
1086
/* Invalidate per-CU VMEM L1. */
1087
si_emit_surface_sync(sctx, cs, cp_coher_cntl | S_0085F0_TCL1_ACTION_ENA(1));
1088
cp_coher_cntl = 0;
1089
}
1090
}
1091
1092
/* If TC flushes haven't cleared this... */
1093
if (cp_coher_cntl)
1094
si_emit_surface_sync(sctx, cs, cp_coher_cntl);
1095
1096
if (flags & SI_CONTEXT_PFP_SYNC_ME) {
1097
radeon_begin(cs);
1098
radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1099
radeon_emit(cs, 0);
1100
radeon_end();
1101
}
1102
1103
if (is_barrier)
1104
si_prim_discard_signal_next_compute_ib_start(sctx);
1105
1106
if (flags & SI_CONTEXT_START_PIPELINE_STATS && sctx->pipeline_stats_enabled != 1) {
1107
radeon_begin(cs);
1108
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1109
radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) | EVENT_INDEX(0));
1110
radeon_end();
1111
sctx->pipeline_stats_enabled = 1;
1112
} else if (flags & SI_CONTEXT_STOP_PIPELINE_STATS && sctx->pipeline_stats_enabled != 0) {
1113
radeon_begin(cs);
1114
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1115
radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) | EVENT_INDEX(0));
1116
radeon_end();
1117
sctx->pipeline_stats_enabled = 0;
1118
}
1119
1120
sctx->flags = 0;
1121
}
1122
1123