Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/amd/vulkan/radv_sqtt.c
7354 views
1
/*
2
* Copyright © 2020 Valve Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include <inttypes.h>
25
26
#include "radv_cs.h"
27
#include "radv_private.h"
28
#include "sid.h"
29
30
#define SQTT_BUFFER_ALIGN_SHIFT 12
31
32
static bool
33
radv_se_is_disabled(struct radv_device *device, unsigned se)
34
{
35
/* No active CU on the SE means it is disabled. */
36
return device->physical_device->rad_info.cu_mask[se][0] == 0;
37
}
38
39
static void
40
radv_emit_thread_trace_start(struct radv_device *device, struct radeon_cmdbuf *cs,
41
uint32_t queue_family_index)
42
{
43
uint32_t shifted_size = device->thread_trace.buffer_size >> SQTT_BUFFER_ALIGN_SHIFT;
44
struct radeon_info *rad_info = &device->physical_device->rad_info;
45
unsigned max_se = rad_info->max_se;
46
47
assert(device->physical_device->rad_info.chip_class >= GFX8);
48
49
for (unsigned se = 0; se < max_se; se++) {
50
uint64_t va = radv_buffer_get_va(device->thread_trace.bo);
51
uint64_t data_va = ac_thread_trace_get_data_va(rad_info, &device->thread_trace, va, se);
52
uint64_t shifted_va = data_va >> SQTT_BUFFER_ALIGN_SHIFT;
53
int first_active_cu = ffs(device->physical_device->rad_info.cu_mask[se][0]);
54
55
if (radv_se_is_disabled(device, se))
56
continue;
57
58
/* Target SEx and SH0. */
59
radeon_set_uconfig_reg(
60
cs, R_030800_GRBM_GFX_INDEX,
61
S_030800_SE_INDEX(se) | S_030800_SH_INDEX(0) | S_030800_INSTANCE_BROADCAST_WRITES(1));
62
63
if (device->physical_device->rad_info.chip_class >= GFX10) {
64
/* Order seems important for the following 2 registers. */
65
radeon_set_privileged_config_reg(
66
cs, R_008D04_SQ_THREAD_TRACE_BUF0_SIZE,
67
S_008D04_SIZE(shifted_size) | S_008D04_BASE_HI(shifted_va >> 32));
68
69
radeon_set_privileged_config_reg(cs, R_008D00_SQ_THREAD_TRACE_BUF0_BASE, shifted_va);
70
71
radeon_set_privileged_config_reg(
72
cs, R_008D14_SQ_THREAD_TRACE_MASK,
73
S_008D14_WTYPE_INCLUDE(0x7f) | /* all shader stages */
74
S_008D14_SA_SEL(0) | S_008D14_WGP_SEL(first_active_cu / 2) | S_008D14_SIMD_SEL(0));
75
76
uint32_t thread_trace_token_mask = S_008D18_REG_INCLUDE(
77
V_008D18_REG_INCLUDE_SQDEC | V_008D18_REG_INCLUDE_SHDEC | V_008D18_REG_INCLUDE_GFXUDEC |
78
V_008D18_REG_INCLUDE_COMP | V_008D18_REG_INCLUDE_CONTEXT | V_008D18_REG_INCLUDE_CONFIG);
79
80
/* Performance counters with SQTT are considered
81
* deprecated.
82
*/
83
thread_trace_token_mask |= S_008D18_TOKEN_EXCLUDE(V_008D18_TOKEN_EXCLUDE_PERF);
84
85
radeon_set_privileged_config_reg(cs, R_008D18_SQ_THREAD_TRACE_TOKEN_MASK,
86
thread_trace_token_mask);
87
88
uint32_t thread_trace_ctrl = S_008D1C_MODE(1) | S_008D1C_HIWATER(5) |
89
S_008D1C_UTIL_TIMER(1) | S_008D1C_RT_FREQ(2) | /* 4096 clk */
90
S_008D1C_DRAW_EVENT_EN(1) | S_008D1C_REG_STALL_EN(1) |
91
S_008D1C_SPI_STALL_EN(1) | S_008D1C_SQ_STALL_EN(1) |
92
S_008D1C_REG_DROP_ON_STALL(0);
93
94
if (device->physical_device->rad_info.chip_class == GFX10_3)
95
thread_trace_ctrl |= S_008D1C_LOWATER_OFFSET(4);
96
97
/* Should be emitted last (it enables thread traces). */
98
radeon_set_privileged_config_reg(cs, R_008D1C_SQ_THREAD_TRACE_CTRL, thread_trace_ctrl);
99
} else {
100
/* Order seems important for the following 4 registers. */
101
radeon_set_uconfig_reg(cs, R_030CDC_SQ_THREAD_TRACE_BASE2,
102
S_030CDC_ADDR_HI(shifted_va >> 32));
103
104
radeon_set_uconfig_reg(cs, R_030CC0_SQ_THREAD_TRACE_BASE, shifted_va);
105
106
radeon_set_uconfig_reg(cs, R_030CC4_SQ_THREAD_TRACE_SIZE, S_030CC4_SIZE(shifted_size));
107
108
radeon_set_uconfig_reg(cs, R_030CD4_SQ_THREAD_TRACE_CTRL, S_030CD4_RESET_BUFFER(1));
109
110
uint32_t thread_trace_mask = S_030CC8_CU_SEL(first_active_cu) | S_030CC8_SH_SEL(0) |
111
S_030CC8_SIMD_EN(0xf) | S_030CC8_VM_ID_MASK(0) |
112
S_030CC8_REG_STALL_EN(1) | S_030CC8_SPI_STALL_EN(1) |
113
S_030CC8_SQ_STALL_EN(1);
114
115
if (device->physical_device->rad_info.chip_class < GFX9) {
116
thread_trace_mask |= S_030CC8_RANDOM_SEED(0xffff);
117
}
118
119
radeon_set_uconfig_reg(cs, R_030CC8_SQ_THREAD_TRACE_MASK, thread_trace_mask);
120
121
/* Trace all tokens and registers. */
122
radeon_set_uconfig_reg(
123
cs, R_030CCC_SQ_THREAD_TRACE_TOKEN_MASK,
124
S_030CCC_TOKEN_MASK(0xbfff) | S_030CCC_REG_MASK(0xff) | S_030CCC_REG_DROP_ON_STALL(0));
125
126
/* Enable SQTT perf counters for all CUs. */
127
radeon_set_uconfig_reg(cs, R_030CD0_SQ_THREAD_TRACE_PERF_MASK,
128
S_030CD0_SH0_MASK(0xffff) | S_030CD0_SH1_MASK(0xffff));
129
130
radeon_set_uconfig_reg(cs, R_030CE0_SQ_THREAD_TRACE_TOKEN_MASK2, 0xffffffff);
131
132
radeon_set_uconfig_reg(cs, R_030CEC_SQ_THREAD_TRACE_HIWATER, S_030CEC_HIWATER(4));
133
134
if (device->physical_device->rad_info.chip_class == GFX9) {
135
/* Reset thread trace status errors. */
136
radeon_set_uconfig_reg(cs, R_030CE8_SQ_THREAD_TRACE_STATUS, S_030CE8_UTC_ERROR(0));
137
}
138
139
/* Enable the thread trace mode. */
140
uint32_t thread_trace_mode =
141
S_030CD8_MASK_PS(1) | S_030CD8_MASK_VS(1) | S_030CD8_MASK_GS(1) | S_030CD8_MASK_ES(1) |
142
S_030CD8_MASK_HS(1) | S_030CD8_MASK_LS(1) | S_030CD8_MASK_CS(1) |
143
S_030CD8_AUTOFLUSH_EN(1) | /* periodically flush SQTT data to memory */
144
S_030CD8_MODE(1);
145
146
if (device->physical_device->rad_info.chip_class == GFX9) {
147
/* Count SQTT traffic in TCC perf counters. */
148
thread_trace_mode |= S_030CD8_TC_PERF_EN(1);
149
}
150
151
radeon_set_uconfig_reg(cs, R_030CD8_SQ_THREAD_TRACE_MODE, thread_trace_mode);
152
}
153
}
154
155
/* Restore global broadcasting. */
156
radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
157
S_030800_SE_BROADCAST_WRITES(1) | S_030800_SH_BROADCAST_WRITES(1) |
158
S_030800_INSTANCE_BROADCAST_WRITES(1));
159
160
/* Start the thread trace with a different event based on the queue. */
161
if (queue_family_index == RADV_QUEUE_COMPUTE &&
162
device->physical_device->rad_info.chip_class >= GFX7) {
163
radeon_set_sh_reg(cs, R_00B878_COMPUTE_THREAD_TRACE_ENABLE, S_00B878_THREAD_TRACE_ENABLE(1));
164
} else {
165
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
166
radeon_emit(cs, EVENT_TYPE(V_028A90_THREAD_TRACE_START) | EVENT_INDEX(0));
167
}
168
}
169
170
static const uint32_t gfx8_thread_trace_info_regs[] = {
171
R_030CE4_SQ_THREAD_TRACE_WPTR,
172
R_030CE8_SQ_THREAD_TRACE_STATUS,
173
R_008E40_SQ_THREAD_TRACE_CNTR,
174
};
175
176
static const uint32_t gfx9_thread_trace_info_regs[] = {
177
R_030CE4_SQ_THREAD_TRACE_WPTR,
178
R_030CE8_SQ_THREAD_TRACE_STATUS,
179
R_030CF0_SQ_THREAD_TRACE_CNTR,
180
};
181
182
static const uint32_t gfx10_thread_trace_info_regs[] = {
183
R_008D10_SQ_THREAD_TRACE_WPTR,
184
R_008D20_SQ_THREAD_TRACE_STATUS,
185
R_008D24_SQ_THREAD_TRACE_DROPPED_CNTR,
186
};
187
188
static void
189
radv_copy_thread_trace_info_regs(struct radv_device *device, struct radeon_cmdbuf *cs,
190
unsigned se_index)
191
{
192
const uint32_t *thread_trace_info_regs = NULL;
193
194
if (device->physical_device->rad_info.chip_class >= GFX10) {
195
thread_trace_info_regs = gfx10_thread_trace_info_regs;
196
} else if (device->physical_device->rad_info.chip_class == GFX9) {
197
thread_trace_info_regs = gfx9_thread_trace_info_regs;
198
} else {
199
assert(device->physical_device->rad_info.chip_class == GFX8);
200
thread_trace_info_regs = gfx8_thread_trace_info_regs;
201
}
202
203
/* Get the VA where the info struct is stored for this SE. */
204
uint64_t va = radv_buffer_get_va(device->thread_trace.bo);
205
uint64_t info_va = ac_thread_trace_get_info_va(va, se_index);
206
207
/* Copy back the info struct one DWORD at a time. */
208
for (unsigned i = 0; i < 3; i++) {
209
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
210
radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_PERF) | COPY_DATA_DST_SEL(COPY_DATA_TC_L2) |
211
COPY_DATA_WR_CONFIRM);
212
radeon_emit(cs, thread_trace_info_regs[i] >> 2);
213
radeon_emit(cs, 0); /* unused */
214
radeon_emit(cs, (info_va + i * 4));
215
radeon_emit(cs, (info_va + i * 4) >> 32);
216
}
217
}
218
219
static void
220
radv_emit_thread_trace_stop(struct radv_device *device, struct radeon_cmdbuf *cs,
221
uint32_t queue_family_index)
222
{
223
unsigned max_se = device->physical_device->rad_info.max_se;
224
225
assert(device->physical_device->rad_info.chip_class >= GFX8);
226
227
/* Stop the thread trace with a different event based on the queue. */
228
if (queue_family_index == RADV_QUEUE_COMPUTE &&
229
device->physical_device->rad_info.chip_class >= GFX7) {
230
radeon_set_sh_reg(cs, R_00B878_COMPUTE_THREAD_TRACE_ENABLE, S_00B878_THREAD_TRACE_ENABLE(0));
231
} else {
232
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
233
radeon_emit(cs, EVENT_TYPE(V_028A90_THREAD_TRACE_STOP) | EVENT_INDEX(0));
234
}
235
236
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
237
radeon_emit(cs, EVENT_TYPE(V_028A90_THREAD_TRACE_FINISH) | EVENT_INDEX(0));
238
239
for (unsigned se = 0; se < max_se; se++) {
240
if (radv_se_is_disabled(device, se))
241
continue;
242
243
/* Target SEi and SH0. */
244
radeon_set_uconfig_reg(
245
cs, R_030800_GRBM_GFX_INDEX,
246
S_030800_SE_INDEX(se) | S_030800_SH_INDEX(0) | S_030800_INSTANCE_BROADCAST_WRITES(1));
247
248
if (device->physical_device->rad_info.chip_class >= GFX10) {
249
/* Make sure to wait for the trace buffer. */
250
radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
251
radeon_emit(
252
cs,
253
WAIT_REG_MEM_NOT_EQUAL); /* wait until the register is equal to the reference value */
254
radeon_emit(cs, R_008D20_SQ_THREAD_TRACE_STATUS >> 2); /* register */
255
radeon_emit(cs, 0);
256
radeon_emit(cs, 0); /* reference value */
257
radeon_emit(cs, S_008D20_FINISH_DONE(1)); /* mask */
258
radeon_emit(cs, 4); /* poll interval */
259
260
/* Disable the thread trace mode. */
261
radeon_set_privileged_config_reg(cs, R_008D1C_SQ_THREAD_TRACE_CTRL, S_008D1C_MODE(0));
262
263
/* Wait for thread trace completion. */
264
radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
265
radeon_emit(
266
cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
267
radeon_emit(cs, R_008D20_SQ_THREAD_TRACE_STATUS >> 2); /* register */
268
radeon_emit(cs, 0);
269
radeon_emit(cs, 0); /* reference value */
270
radeon_emit(cs, S_008D20_BUSY(1)); /* mask */
271
radeon_emit(cs, 4); /* poll interval */
272
} else {
273
/* Disable the thread trace mode. */
274
radeon_set_uconfig_reg(cs, R_030CD8_SQ_THREAD_TRACE_MODE, S_030CD8_MODE(0));
275
276
/* Wait for thread trace completion. */
277
radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
278
radeon_emit(
279
cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
280
radeon_emit(cs, R_030CE8_SQ_THREAD_TRACE_STATUS >> 2); /* register */
281
radeon_emit(cs, 0);
282
radeon_emit(cs, 0); /* reference value */
283
radeon_emit(cs, S_030CE8_BUSY(1)); /* mask */
284
radeon_emit(cs, 4); /* poll interval */
285
}
286
287
radv_copy_thread_trace_info_regs(device, cs, se);
288
}
289
290
/* Restore global broadcasting. */
291
radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
292
S_030800_SE_BROADCAST_WRITES(1) | S_030800_SH_BROADCAST_WRITES(1) |
293
S_030800_INSTANCE_BROADCAST_WRITES(1));
294
}
295
296
void
297
radv_emit_thread_trace_userdata(const struct radv_device *device, struct radeon_cmdbuf *cs,
298
const void *data, uint32_t num_dwords)
299
{
300
const uint32_t *dwords = (uint32_t *)data;
301
302
while (num_dwords > 0) {
303
uint32_t count = MIN2(num_dwords, 2);
304
305
radeon_check_space(device->ws, cs, 2 + count);
306
307
/* Without the perfctr bit the CP might not always pass the
308
* write on correctly. */
309
if (device->physical_device->rad_info.chip_class >= GFX10)
310
radeon_set_uconfig_reg_seq_perfctr(cs, R_030D08_SQ_THREAD_TRACE_USERDATA_2, count);
311
else
312
radeon_set_uconfig_reg_seq(cs, R_030D08_SQ_THREAD_TRACE_USERDATA_2, count);
313
radeon_emit_array(cs, dwords, count);
314
315
dwords += count;
316
num_dwords -= count;
317
}
318
}
319
320
static void
321
radv_emit_spi_config_cntl(struct radv_device *device, struct radeon_cmdbuf *cs, bool enable)
322
{
323
if (device->physical_device->rad_info.chip_class >= GFX9) {
324
uint32_t spi_config_cntl =
325
S_031100_GPR_WRITE_PRIORITY(0x2c688) | S_031100_EXP_PRIORITY_ORDER(3) |
326
S_031100_ENABLE_SQG_TOP_EVENTS(enable) | S_031100_ENABLE_SQG_BOP_EVENTS(enable);
327
328
if (device->physical_device->rad_info.chip_class >= GFX10)
329
spi_config_cntl |= S_031100_PS_PKR_PRIORITY_CNTL(3);
330
331
radeon_set_uconfig_reg(cs, R_031100_SPI_CONFIG_CNTL, spi_config_cntl);
332
} else {
333
/* SPI_CONFIG_CNTL is a protected register on GFX6-GFX8. */
334
radeon_set_privileged_config_reg(
335
cs, R_009100_SPI_CONFIG_CNTL,
336
S_009100_ENABLE_SQG_TOP_EVENTS(enable) | S_009100_ENABLE_SQG_BOP_EVENTS(enable));
337
}
338
}
339
340
static void
341
radv_emit_inhibit_clockgating(struct radv_device *device, struct radeon_cmdbuf *cs, bool inhibit)
342
{
343
if (device->physical_device->rad_info.chip_class >= GFX10) {
344
radeon_set_uconfig_reg(cs, R_037390_RLC_PERFMON_CLK_CNTL,
345
S_037390_PERFMON_CLOCK_STATE(inhibit));
346
} else if (device->physical_device->rad_info.chip_class >= GFX8) {
347
radeon_set_uconfig_reg(cs, R_0372FC_RLC_PERFMON_CLK_CNTL,
348
S_0372FC_PERFMON_CLOCK_STATE(inhibit));
349
}
350
}
351
352
static void
353
radv_emit_wait_for_idle(struct radv_device *device, struct radeon_cmdbuf *cs, int family)
354
{
355
enum rgp_flush_bits sqtt_flush_bits = 0;
356
si_cs_emit_cache_flush(
357
cs, device->physical_device->rad_info.chip_class, NULL, 0,
358
family == RING_COMPUTE && device->physical_device->rad_info.chip_class >= GFX7,
359
(family == RADV_QUEUE_COMPUTE
360
? RADV_CMD_FLAG_CS_PARTIAL_FLUSH
361
: (RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH)) |
362
RADV_CMD_FLAG_INV_ICACHE | RADV_CMD_FLAG_INV_SCACHE | RADV_CMD_FLAG_INV_VCACHE |
363
RADV_CMD_FLAG_INV_L2,
364
&sqtt_flush_bits, 0);
365
}
366
367
static bool
368
radv_thread_trace_init_bo(struct radv_device *device)
369
{
370
unsigned max_se = device->physical_device->rad_info.max_se;
371
struct radeon_winsys *ws = device->ws;
372
uint64_t size;
373
374
/* The buffer size and address need to be aligned in HW regs. Align the
375
* size as early as possible so that we do all the allocation & addressing
376
* correctly. */
377
device->thread_trace.buffer_size =
378
align64(device->thread_trace.buffer_size, 1u << SQTT_BUFFER_ALIGN_SHIFT);
379
380
/* Compute total size of the thread trace BO for all SEs. */
381
size = align64(sizeof(struct ac_thread_trace_info) * max_se, 1 << SQTT_BUFFER_ALIGN_SHIFT);
382
size += device->thread_trace.buffer_size * (uint64_t)max_se;
383
384
struct radeon_winsys_bo *bo = NULL;
385
VkResult result = ws->buffer_create(
386
ws, size, 4096, RADEON_DOMAIN_VRAM,
387
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_ZERO_VRAM,
388
RADV_BO_PRIORITY_SCRATCH, 0, &bo);
389
device->thread_trace.bo = bo;
390
if (result != VK_SUCCESS)
391
return false;
392
393
device->thread_trace.ptr = ws->buffer_map(device->thread_trace.bo);
394
if (!device->thread_trace.ptr)
395
return false;
396
397
return true;
398
}
399
400
bool
401
radv_thread_trace_init(struct radv_device *device)
402
{
403
struct ac_thread_trace_data *thread_trace_data = &device->thread_trace;
404
405
/* Default buffer size set to 32MB per SE. */
406
device->thread_trace.buffer_size =
407
radv_get_int_debug_option("RADV_THREAD_TRACE_BUFFER_SIZE", 32 * 1024 * 1024);
408
device->thread_trace.start_frame = radv_get_int_debug_option("RADV_THREAD_TRACE", -1);
409
410
const char *trigger_file = getenv("RADV_THREAD_TRACE_TRIGGER");
411
if (trigger_file)
412
device->thread_trace.trigger_file = strdup(trigger_file);
413
414
if (!radv_thread_trace_init_bo(device))
415
return false;
416
417
list_inithead(&thread_trace_data->rgp_pso_correlation.record);
418
simple_mtx_init(&thread_trace_data->rgp_pso_correlation.lock, mtx_plain);
419
420
list_inithead(&thread_trace_data->rgp_loader_events.record);
421
simple_mtx_init(&thread_trace_data->rgp_loader_events.lock, mtx_plain);
422
423
list_inithead(&thread_trace_data->rgp_code_object.record);
424
simple_mtx_init(&thread_trace_data->rgp_code_object.lock, mtx_plain);
425
426
return true;
427
}
428
429
void
430
radv_thread_trace_finish(struct radv_device *device)
431
{
432
struct ac_thread_trace_data *thread_trace_data = &device->thread_trace;
433
struct radeon_winsys *ws = device->ws;
434
435
if (unlikely(device->thread_trace.bo))
436
ws->buffer_destroy(ws, device->thread_trace.bo);
437
438
for (unsigned i = 0; i < 2; i++) {
439
if (device->thread_trace.start_cs[i])
440
ws->cs_destroy(device->thread_trace.start_cs[i]);
441
if (device->thread_trace.stop_cs[i])
442
ws->cs_destroy(device->thread_trace.stop_cs[i]);
443
}
444
445
assert(thread_trace_data->rgp_pso_correlation.record_count == 0);
446
simple_mtx_destroy(&thread_trace_data->rgp_pso_correlation.lock);
447
448
assert(thread_trace_data->rgp_loader_events.record_count == 0);
449
simple_mtx_destroy(&thread_trace_data->rgp_loader_events.lock);
450
451
assert(thread_trace_data->rgp_code_object.record_count == 0);
452
simple_mtx_destroy(&thread_trace_data->rgp_code_object.lock);
453
}
454
455
static bool
456
radv_thread_trace_resize_bo(struct radv_device *device)
457
{
458
struct radeon_winsys *ws = device->ws;
459
460
/* Destroy the previous thread trace BO. */
461
ws->buffer_destroy(ws, device->thread_trace.bo);
462
463
/* Double the size of the thread trace buffer per SE. */
464
device->thread_trace.buffer_size *= 2;
465
466
fprintf(stderr,
467
"Failed to get the thread trace because the buffer "
468
"was too small, resizing to %d KB\n",
469
device->thread_trace.buffer_size / 1024);
470
471
/* Re-create the thread trace BO. */
472
return radv_thread_trace_init_bo(device);
473
}
474
475
bool
476
radv_begin_thread_trace(struct radv_queue *queue)
477
{
478
struct radv_device *device = queue->device;
479
int family = queue->queue_family_index;
480
struct radeon_winsys *ws = device->ws;
481
struct radeon_cmdbuf *cs;
482
VkResult result;
483
484
/* Destroy the previous start CS and create a new one. */
485
if (device->thread_trace.start_cs[family]) {
486
ws->cs_destroy(device->thread_trace.start_cs[family]);
487
device->thread_trace.start_cs[family] = NULL;
488
}
489
490
cs = ws->cs_create(ws, family);
491
if (!cs)
492
return false;
493
494
switch (family) {
495
case RADV_QUEUE_GENERAL:
496
radeon_emit(cs, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
497
radeon_emit(cs, CC0_UPDATE_LOAD_ENABLES(1));
498
radeon_emit(cs, CC1_UPDATE_SHADOW_ENABLES(1));
499
break;
500
case RADV_QUEUE_COMPUTE:
501
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
502
radeon_emit(cs, 0);
503
break;
504
}
505
506
radv_cs_add_buffer(ws, cs, device->thread_trace.bo);
507
508
/* Make sure to wait-for-idle before starting SQTT. */
509
radv_emit_wait_for_idle(device, cs, family);
510
511
/* Disable clock gating before starting SQTT. */
512
radv_emit_inhibit_clockgating(device, cs, true);
513
514
/* Enable SQG events that collects thread trace data. */
515
radv_emit_spi_config_cntl(device, cs, true);
516
517
/* Start SQTT. */
518
radv_emit_thread_trace_start(device, cs, family);
519
520
result = ws->cs_finalize(cs);
521
if (result != VK_SUCCESS) {
522
ws->cs_destroy(cs);
523
return false;
524
}
525
526
device->thread_trace.start_cs[family] = cs;
527
528
return radv_queue_internal_submit(queue, cs);
529
}
530
531
bool
532
radv_end_thread_trace(struct radv_queue *queue)
533
{
534
struct radv_device *device = queue->device;
535
int family = queue->queue_family_index;
536
struct radeon_winsys *ws = device->ws;
537
struct radeon_cmdbuf *cs;
538
VkResult result;
539
540
/* Destroy the previous stop CS and create a new one. */
541
if (queue->device->thread_trace.stop_cs[family]) {
542
ws->cs_destroy(device->thread_trace.stop_cs[family]);
543
device->thread_trace.stop_cs[family] = NULL;
544
}
545
546
cs = ws->cs_create(ws, family);
547
if (!cs)
548
return false;
549
550
switch (family) {
551
case RADV_QUEUE_GENERAL:
552
radeon_emit(cs, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
553
radeon_emit(cs, CC0_UPDATE_LOAD_ENABLES(1));
554
radeon_emit(cs, CC1_UPDATE_SHADOW_ENABLES(1));
555
break;
556
case RADV_QUEUE_COMPUTE:
557
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
558
radeon_emit(cs, 0);
559
break;
560
}
561
562
radv_cs_add_buffer(ws, cs, device->thread_trace.bo);
563
564
/* Make sure to wait-for-idle before stopping SQTT. */
565
radv_emit_wait_for_idle(device, cs, family);
566
567
/* Stop SQTT. */
568
radv_emit_thread_trace_stop(device, cs, family);
569
570
/* Restore previous state by disabling SQG events. */
571
radv_emit_spi_config_cntl(device, cs, false);
572
573
/* Restore previous state by re-enabling clock gating. */
574
radv_emit_inhibit_clockgating(device, cs, false);
575
576
result = ws->cs_finalize(cs);
577
if (result != VK_SUCCESS) {
578
ws->cs_destroy(cs);
579
return false;
580
}
581
582
device->thread_trace.stop_cs[family] = cs;
583
584
return radv_queue_internal_submit(queue, cs);
585
}
586
587
bool
588
radv_get_thread_trace(struct radv_queue *queue, struct ac_thread_trace *thread_trace)
589
{
590
struct radv_device *device = queue->device;
591
struct radeon_info *rad_info = &device->physical_device->rad_info;
592
unsigned max_se = rad_info->max_se;
593
void *thread_trace_ptr = device->thread_trace.ptr;
594
595
memset(thread_trace, 0, sizeof(*thread_trace));
596
597
for (unsigned se = 0; se < max_se; se++) {
598
uint64_t info_offset = ac_thread_trace_get_info_offset(se);
599
uint64_t data_offset = ac_thread_trace_get_data_offset(rad_info, &device->thread_trace, se);
600
void *info_ptr = (uint8_t *)thread_trace_ptr + info_offset;
601
void *data_ptr = (uint8_t *)thread_trace_ptr + data_offset;
602
struct ac_thread_trace_info *info = (struct ac_thread_trace_info *)info_ptr;
603
struct ac_thread_trace_se thread_trace_se = {0};
604
int first_active_cu = ffs(device->physical_device->rad_info.cu_mask[se][0]);
605
606
if (radv_se_is_disabled(device, se))
607
continue;
608
609
if (!ac_is_thread_trace_complete(&device->physical_device->rad_info, &device->thread_trace,
610
info)) {
611
if (!radv_thread_trace_resize_bo(device)) {
612
fprintf(stderr, "Failed to resize the thread "
613
"trace buffer.\n");
614
abort();
615
}
616
return false;
617
}
618
619
thread_trace_se.data_ptr = data_ptr;
620
thread_trace_se.info = *info;
621
thread_trace_se.shader_engine = se;
622
623
/* RGP seems to expect units of WGP on GFX10+. */
624
thread_trace_se.compute_unit = device->physical_device->rad_info.chip_class >= GFX10
625
? (first_active_cu / 2)
626
: first_active_cu;
627
628
thread_trace->traces[thread_trace->num_traces] = thread_trace_se;
629
thread_trace->num_traces++;
630
}
631
632
thread_trace->data = &device->thread_trace;
633
return true;
634
}
635
636