Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/amd/vulkan/radv_query.c
7129 views
1
/*
2
* Copyrigh 2016 Red Hat Inc.
3
* Based on anv:
4
* Copyright © 2015 Intel Corporation
5
*
6
* Permission is hereby granted, free of charge, to any person obtaining a
7
* copy of this software and associated documentation files (the "Software"),
8
* to deal in the Software without restriction, including without limitation
9
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
* and/or sell copies of the Software, and to permit persons to whom the
11
* Software is furnished to do so, subject to the following conditions:
12
*
13
* The above copyright notice and this permission notice (including the next
14
* paragraph) shall be included in all copies or substantial portions of the
15
* Software.
16
*
17
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23
* IN THE SOFTWARE.
24
*/
25
26
#include <assert.h>
27
#include <fcntl.h>
28
#include <stdbool.h>
29
#include <string.h>
30
31
#include "nir/nir_builder.h"
32
#include "util/u_atomic.h"
33
#include "radv_cs.h"
34
#include "radv_meta.h"
35
#include "radv_private.h"
36
#include "sid.h"
37
38
#define TIMESTAMP_NOT_READY UINT64_MAX
39
40
static const int pipelinestat_block_size = 11 * 8;
41
static const unsigned pipeline_statistics_indices[] = {7, 6, 3, 4, 5, 2, 1, 0, 8, 9, 10};
42
43
static unsigned
44
radv_get_pipeline_statistics_index(const VkQueryPipelineStatisticFlagBits flag)
45
{
46
int offset = ffs(flag) - 1;
47
assert(offset < ARRAY_SIZE(pipeline_statistics_indices));
48
return pipeline_statistics_indices[offset];
49
}
50
51
static nir_ssa_def *
52
nir_test_flag(nir_builder *b, nir_ssa_def *flags, uint32_t flag)
53
{
54
return nir_i2b(b, nir_iand(b, flags, nir_imm_int(b, flag)));
55
}
56
57
static void
58
radv_break_on_count(nir_builder *b, nir_variable *var, nir_ssa_def *count)
59
{
60
nir_ssa_def *counter = nir_load_var(b, var);
61
62
nir_push_if(b, nir_uge(b, counter, count));
63
nir_jump(b, nir_jump_break);
64
nir_pop_if(b, NULL);
65
66
counter = nir_iadd(b, counter, nir_imm_int(b, 1));
67
nir_store_var(b, var, counter, 0x1);
68
}
69
70
static void
71
radv_store_availability(nir_builder *b, nir_ssa_def *flags, nir_ssa_def *dst_buf,
72
nir_ssa_def *offset, nir_ssa_def *value32)
73
{
74
nir_push_if(b, nir_test_flag(b, flags, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT));
75
76
nir_push_if(b, nir_test_flag(b, flags, VK_QUERY_RESULT_64_BIT));
77
78
nir_store_ssbo(b, nir_vec2(b, value32, nir_imm_int(b, 0)), dst_buf, offset, .write_mask = 0x3,
79
.align_mul = 8);
80
81
nir_push_else(b, NULL);
82
83
nir_store_ssbo(b, value32, dst_buf, offset, .write_mask = 0x1, .align_mul = 4);
84
85
nir_pop_if(b, NULL);
86
87
nir_pop_if(b, NULL);
88
}
89
90
static nir_shader *
91
build_occlusion_query_shader(struct radv_device *device)
92
{
93
/* the shader this builds is roughly
94
*
95
* push constants {
96
* uint32_t flags;
97
* uint32_t dst_stride;
98
* };
99
*
100
* uint32_t src_stride = 16 * db_count;
101
*
102
* location(binding = 0) buffer dst_buf;
103
* location(binding = 1) buffer src_buf;
104
*
105
* void main() {
106
* uint64_t result = 0;
107
* uint64_t src_offset = src_stride * global_id.x;
108
* uint64_t dst_offset = dst_stride * global_id.x;
109
* bool available = true;
110
* for (int i = 0; i < db_count; ++i) {
111
* if (enabled_rb_mask & (1 << i)) {
112
* uint64_t start = src_buf[src_offset + 16 * i];
113
* uint64_t end = src_buf[src_offset + 16 * i + 8];
114
* if ((start & (1ull << 63)) && (end & (1ull << 63)))
115
* result += end - start;
116
* else
117
* available = false;
118
* }
119
* }
120
* uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
121
* if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
122
* if (flags & VK_QUERY_RESULT_64_BIT)
123
* dst_buf[dst_offset] = result;
124
* else
125
* dst_buf[dst_offset] = (uint32_t)result.
126
* }
127
* if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
128
* dst_buf[dst_offset + elem_size] = available;
129
* }
130
* }
131
*/
132
nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "occlusion_query");
133
b.shader->info.workgroup_size[0] = 64;
134
b.shader->info.workgroup_size[1] = 1;
135
b.shader->info.workgroup_size[2] = 1;
136
137
nir_variable *result = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "result");
138
nir_variable *outer_counter =
139
nir_local_variable_create(b.impl, glsl_int_type(), "outer_counter");
140
nir_variable *start = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "start");
141
nir_variable *end = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "end");
142
nir_variable *available = nir_local_variable_create(b.impl, glsl_bool_type(), "available");
143
unsigned enabled_rb_mask = device->physical_device->rad_info.enabled_rb_mask;
144
unsigned db_count = device->physical_device->rad_info.max_render_backends;
145
146
nir_ssa_def *flags = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .range = 16);
147
148
nir_ssa_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
149
nir_ssa_def *src_buf = radv_meta_load_descriptor(&b, 0, 1);
150
151
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
152
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
153
nir_ssa_def *block_size =
154
nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
155
b.shader->info.workgroup_size[2], 0);
156
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
157
global_id = nir_channel(&b, global_id, 0); // We only care about x here.
158
159
nir_ssa_def *input_stride = nir_imm_int(&b, db_count * 16);
160
nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
161
nir_ssa_def *output_stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 4), .range = 16);
162
nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
163
164
nir_store_var(&b, result, nir_imm_int64(&b, 0), 0x1);
165
nir_store_var(&b, outer_counter, nir_imm_int(&b, 0), 0x1);
166
nir_store_var(&b, available, nir_imm_true(&b), 0x1);
167
168
nir_push_loop(&b);
169
170
nir_ssa_def *current_outer_count = nir_load_var(&b, outer_counter);
171
radv_break_on_count(&b, outer_counter, nir_imm_int(&b, db_count));
172
173
nir_ssa_def *enabled_cond = nir_iand(&b, nir_imm_int(&b, enabled_rb_mask),
174
nir_ishl(&b, nir_imm_int(&b, 1), current_outer_count));
175
176
nir_push_if(&b, nir_i2b(&b, enabled_cond));
177
178
nir_ssa_def *load_offset = nir_imul(&b, current_outer_count, nir_imm_int(&b, 16));
179
load_offset = nir_iadd(&b, input_base, load_offset);
180
181
nir_ssa_def *load = nir_load_ssbo(&b, 2, 64, src_buf, load_offset, .align_mul = 16);
182
183
nir_store_var(&b, start, nir_channel(&b, load, 0), 0x1);
184
nir_store_var(&b, end, nir_channel(&b, load, 1), 0x1);
185
186
nir_ssa_def *start_done = nir_ilt(&b, nir_load_var(&b, start), nir_imm_int64(&b, 0));
187
nir_ssa_def *end_done = nir_ilt(&b, nir_load_var(&b, end), nir_imm_int64(&b, 0));
188
189
nir_push_if(&b, nir_iand(&b, start_done, end_done));
190
191
nir_store_var(&b, result,
192
nir_iadd(&b, nir_load_var(&b, result),
193
nir_isub(&b, nir_load_var(&b, end), nir_load_var(&b, start))),
194
0x1);
195
196
nir_push_else(&b, NULL);
197
198
nir_store_var(&b, available, nir_imm_false(&b), 0x1);
199
200
nir_pop_if(&b, NULL);
201
nir_pop_if(&b, NULL);
202
nir_pop_loop(&b, NULL);
203
204
/* Store the result if complete or if partial results have been requested. */
205
206
nir_ssa_def *result_is_64bit = nir_test_flag(&b, flags, VK_QUERY_RESULT_64_BIT);
207
nir_ssa_def *result_size =
208
nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
209
nir_push_if(&b, nir_ior(&b, nir_test_flag(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT),
210
nir_load_var(&b, available)));
211
212
nir_push_if(&b, result_is_64bit);
213
214
nir_store_ssbo(&b, nir_load_var(&b, result), dst_buf, output_base, .write_mask = 0x1,
215
.align_mul = 8);
216
217
nir_push_else(&b, NULL);
218
219
nir_store_ssbo(&b, nir_u2u32(&b, nir_load_var(&b, result)), dst_buf, output_base,
220
.write_mask = 0x1, .align_mul = 8);
221
222
nir_pop_if(&b, NULL);
223
nir_pop_if(&b, NULL);
224
225
radv_store_availability(&b, flags, dst_buf, nir_iadd(&b, result_size, output_base),
226
nir_b2i32(&b, nir_load_var(&b, available)));
227
228
return b.shader;
229
}
230
231
static nir_shader *
232
build_pipeline_statistics_query_shader(struct radv_device *device)
233
{
234
/* the shader this builds is roughly
235
*
236
* push constants {
237
* uint32_t flags;
238
* uint32_t dst_stride;
239
* uint32_t stats_mask;
240
* uint32_t avail_offset;
241
* };
242
*
243
* uint32_t src_stride = pipelinestat_block_size * 2;
244
*
245
* location(binding = 0) buffer dst_buf;
246
* location(binding = 1) buffer src_buf;
247
*
248
* void main() {
249
* uint64_t src_offset = src_stride * global_id.x;
250
* uint64_t dst_base = dst_stride * global_id.x;
251
* uint64_t dst_offset = dst_base;
252
* uint32_t elem_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
253
* uint32_t elem_count = stats_mask >> 16;
254
* uint32_t available32 = src_buf[avail_offset + 4 * global_id.x];
255
* if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
256
* dst_buf[dst_offset + elem_count * elem_size] = available32;
257
* }
258
* if ((bool)available32) {
259
* // repeat 11 times:
260
* if (stats_mask & (1 << 0)) {
261
* uint64_t start = src_buf[src_offset + 8 * indices[0]];
262
* uint64_t end = src_buf[src_offset + 8 * indices[0] +
263
* pipelinestat_block_size]; uint64_t result = end - start; if (flags & VK_QUERY_RESULT_64_BIT)
264
* dst_buf[dst_offset] = result;
265
* else
266
* dst_buf[dst_offset] = (uint32_t)result.
267
* dst_offset += elem_size;
268
* }
269
* } else if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
270
* // Set everything to 0 as we don't know what is valid.
271
* for (int i = 0; i < elem_count; ++i)
272
* dst_buf[dst_base + elem_size * i] = 0;
273
* }
274
* }
275
*/
276
nir_builder b =
277
nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "pipeline_statistics_query");
278
b.shader->info.workgroup_size[0] = 64;
279
b.shader->info.workgroup_size[1] = 1;
280
b.shader->info.workgroup_size[2] = 1;
281
282
nir_variable *output_offset =
283
nir_local_variable_create(b.impl, glsl_int_type(), "output_offset");
284
285
nir_ssa_def *flags = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .range = 16);
286
nir_ssa_def *stats_mask = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 8), .range = 16);
287
nir_ssa_def *avail_offset = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 12), .range = 16);
288
289
nir_ssa_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
290
nir_ssa_def *src_buf = radv_meta_load_descriptor(&b, 0, 1);
291
292
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
293
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
294
nir_ssa_def *block_size =
295
nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
296
b.shader->info.workgroup_size[2], 0);
297
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
298
global_id = nir_channel(&b, global_id, 0); // We only care about x here.
299
300
nir_ssa_def *input_stride = nir_imm_int(&b, pipelinestat_block_size * 2);
301
nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
302
nir_ssa_def *output_stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 4), .range = 16);
303
nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
304
305
avail_offset = nir_iadd(&b, avail_offset, nir_imul(&b, global_id, nir_imm_int(&b, 4)));
306
307
nir_ssa_def *available32 = nir_load_ssbo(&b, 1, 32, src_buf, avail_offset, .align_mul = 4);
308
309
nir_ssa_def *result_is_64bit = nir_test_flag(&b, flags, VK_QUERY_RESULT_64_BIT);
310
nir_ssa_def *elem_size = nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
311
nir_ssa_def *elem_count = nir_ushr(&b, stats_mask, nir_imm_int(&b, 16));
312
313
radv_store_availability(&b, flags, dst_buf,
314
nir_iadd(&b, output_base, nir_imul(&b, elem_count, elem_size)),
315
available32);
316
317
nir_push_if(&b, nir_i2b(&b, available32));
318
319
nir_store_var(&b, output_offset, output_base, 0x1);
320
for (int i = 0; i < ARRAY_SIZE(pipeline_statistics_indices); ++i) {
321
nir_push_if(&b, nir_test_flag(&b, stats_mask, 1u << i));
322
323
nir_ssa_def *start_offset =
324
nir_iadd(&b, input_base, nir_imm_int(&b, pipeline_statistics_indices[i] * 8));
325
nir_ssa_def *start = nir_load_ssbo(&b, 1, 64, src_buf, start_offset, .align_mul = 8);
326
327
nir_ssa_def *end_offset =
328
nir_iadd(&b, input_base,
329
nir_imm_int(&b, pipeline_statistics_indices[i] * 8 + pipelinestat_block_size));
330
nir_ssa_def *end = nir_load_ssbo(&b, 1, 64, src_buf, end_offset, .align_mul = 8);
331
332
nir_ssa_def *result = nir_isub(&b, end, start);
333
334
/* Store result */
335
nir_push_if(&b, result_is_64bit);
336
337
nir_store_ssbo(&b, result, dst_buf, nir_load_var(&b, output_offset), .write_mask = 0x1,
338
.align_mul = 8);
339
340
nir_push_else(&b, NULL);
341
342
nir_store_ssbo(&b, nir_u2u32(&b, result), dst_buf, nir_load_var(&b, output_offset),
343
.write_mask = 0x1, .align_mul = 4);
344
345
nir_pop_if(&b, NULL);
346
347
nir_store_var(&b, output_offset, nir_iadd(&b, nir_load_var(&b, output_offset), elem_size),
348
0x1);
349
350
nir_pop_if(&b, NULL);
351
}
352
353
nir_push_else(&b, NULL); /* nir_i2b(&b, available32) */
354
355
nir_push_if(&b, nir_test_flag(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT));
356
357
/* Stores zeros in all outputs. */
358
359
nir_variable *counter = nir_local_variable_create(b.impl, glsl_int_type(), "counter");
360
nir_store_var(&b, counter, nir_imm_int(&b, 0), 0x1);
361
362
nir_loop *loop = nir_push_loop(&b);
363
364
nir_ssa_def *current_counter = nir_load_var(&b, counter);
365
radv_break_on_count(&b, counter, elem_count);
366
367
nir_ssa_def *output_elem = nir_iadd(&b, output_base, nir_imul(&b, elem_size, current_counter));
368
nir_push_if(&b, result_is_64bit);
369
370
nir_store_ssbo(&b, nir_imm_int64(&b, 0), dst_buf, output_elem, .write_mask = 0x1,
371
.align_mul = 8);
372
373
nir_push_else(&b, NULL);
374
375
nir_store_ssbo(&b, nir_imm_int(&b, 0), dst_buf, output_elem, .write_mask = 0x1, .align_mul = 4);
376
377
nir_pop_if(&b, NULL);
378
379
nir_pop_loop(&b, loop);
380
nir_pop_if(&b, NULL); /* VK_QUERY_RESULT_PARTIAL_BIT */
381
nir_pop_if(&b, NULL); /* nir_i2b(&b, available32) */
382
return b.shader;
383
}
384
385
static nir_shader *
386
build_tfb_query_shader(struct radv_device *device)
387
{
388
/* the shader this builds is roughly
389
*
390
* uint32_t src_stride = 32;
391
*
392
* location(binding = 0) buffer dst_buf;
393
* location(binding = 1) buffer src_buf;
394
*
395
* void main() {
396
* uint64_t result[2] = {};
397
* bool available = false;
398
* uint64_t src_offset = src_stride * global_id.x;
399
* uint64_t dst_offset = dst_stride * global_id.x;
400
* uint64_t *src_data = src_buf[src_offset];
401
* uint32_t avail = (src_data[0] >> 32) &
402
* (src_data[1] >> 32) &
403
* (src_data[2] >> 32) &
404
* (src_data[3] >> 32);
405
* if (avail & 0x80000000) {
406
* result[0] = src_data[3] - src_data[1];
407
* result[1] = src_data[2] - src_data[0];
408
* available = true;
409
* }
410
* uint32_t result_size = flags & VK_QUERY_RESULT_64_BIT ? 16 : 8;
411
* if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
412
* if (flags & VK_QUERY_RESULT_64_BIT) {
413
* dst_buf[dst_offset] = result;
414
* } else {
415
* dst_buf[dst_offset] = (uint32_t)result;
416
* }
417
* }
418
* if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
419
* dst_buf[dst_offset + result_size] = available;
420
* }
421
* }
422
*/
423
nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "tfb_query");
424
b.shader->info.workgroup_size[0] = 64;
425
b.shader->info.workgroup_size[1] = 1;
426
b.shader->info.workgroup_size[2] = 1;
427
428
/* Create and initialize local variables. */
429
nir_variable *result =
430
nir_local_variable_create(b.impl, glsl_vector_type(GLSL_TYPE_UINT64, 2), "result");
431
nir_variable *available = nir_local_variable_create(b.impl, glsl_bool_type(), "available");
432
433
nir_store_var(&b, result, nir_vec2(&b, nir_imm_int64(&b, 0), nir_imm_int64(&b, 0)), 0x3);
434
nir_store_var(&b, available, nir_imm_false(&b), 0x1);
435
436
nir_ssa_def *flags = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .range = 16);
437
438
/* Load resources. */
439
nir_ssa_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
440
nir_ssa_def *src_buf = radv_meta_load_descriptor(&b, 0, 1);
441
442
/* Compute global ID. */
443
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
444
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
445
nir_ssa_def *block_size =
446
nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
447
b.shader->info.workgroup_size[2], 0);
448
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
449
global_id = nir_channel(&b, global_id, 0); // We only care about x here.
450
451
/* Compute src/dst strides. */
452
nir_ssa_def *input_stride = nir_imm_int(&b, 32);
453
nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
454
nir_ssa_def *output_stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 4), .range = 16);
455
nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
456
457
/* Load data from the query pool. */
458
nir_ssa_def *load1 = nir_load_ssbo(&b, 4, 32, src_buf, input_base, .align_mul = 32);
459
nir_ssa_def *load2 = nir_load_ssbo(
460
&b, 4, 32, src_buf, nir_iadd(&b, input_base, nir_imm_int(&b, 16)), .align_mul = 16);
461
462
/* Check if result is available. */
463
nir_ssa_def *avails[2];
464
avails[0] = nir_iand(&b, nir_channel(&b, load1, 1), nir_channel(&b, load1, 3));
465
avails[1] = nir_iand(&b, nir_channel(&b, load2, 1), nir_channel(&b, load2, 3));
466
nir_ssa_def *result_is_available =
467
nir_i2b(&b, nir_iand(&b, nir_iand(&b, avails[0], avails[1]), nir_imm_int(&b, 0x80000000)));
468
469
/* Only compute result if available. */
470
nir_push_if(&b, result_is_available);
471
472
/* Pack values. */
473
nir_ssa_def *packed64[4];
474
packed64[0] =
475
nir_pack_64_2x32(&b, nir_vec2(&b, nir_channel(&b, load1, 0), nir_channel(&b, load1, 1)));
476
packed64[1] =
477
nir_pack_64_2x32(&b, nir_vec2(&b, nir_channel(&b, load1, 2), nir_channel(&b, load1, 3)));
478
packed64[2] =
479
nir_pack_64_2x32(&b, nir_vec2(&b, nir_channel(&b, load2, 0), nir_channel(&b, load2, 1)));
480
packed64[3] =
481
nir_pack_64_2x32(&b, nir_vec2(&b, nir_channel(&b, load2, 2), nir_channel(&b, load2, 3)));
482
483
/* Compute result. */
484
nir_ssa_def *num_primitive_written = nir_isub(&b, packed64[3], packed64[1]);
485
nir_ssa_def *primitive_storage_needed = nir_isub(&b, packed64[2], packed64[0]);
486
487
nir_store_var(&b, result, nir_vec2(&b, num_primitive_written, primitive_storage_needed), 0x3);
488
nir_store_var(&b, available, nir_imm_true(&b), 0x1);
489
490
nir_pop_if(&b, NULL);
491
492
/* Determine if result is 64 or 32 bit. */
493
nir_ssa_def *result_is_64bit = nir_test_flag(&b, flags, VK_QUERY_RESULT_64_BIT);
494
nir_ssa_def *result_size =
495
nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 16), nir_imm_int(&b, 8));
496
497
/* Store the result if complete or partial results have been requested. */
498
nir_push_if(&b, nir_ior(&b, nir_test_flag(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT),
499
nir_load_var(&b, available)));
500
501
/* Store result. */
502
nir_push_if(&b, result_is_64bit);
503
504
nir_store_ssbo(&b, nir_load_var(&b, result), dst_buf, output_base, .write_mask = 0x3,
505
.align_mul = 8);
506
507
nir_push_else(&b, NULL);
508
509
nir_store_ssbo(&b, nir_u2u32(&b, nir_load_var(&b, result)), dst_buf, output_base,
510
.write_mask = 0x3, .align_mul = 4);
511
512
nir_pop_if(&b, NULL);
513
nir_pop_if(&b, NULL);
514
515
radv_store_availability(&b, flags, dst_buf, nir_iadd(&b, result_size, output_base),
516
nir_b2i32(&b, nir_load_var(&b, available)));
517
518
return b.shader;
519
}
520
521
static nir_shader *
522
build_timestamp_query_shader(struct radv_device *device)
523
{
524
/* the shader this builds is roughly
525
*
526
* uint32_t src_stride = 8;
527
*
528
* location(binding = 0) buffer dst_buf;
529
* location(binding = 1) buffer src_buf;
530
*
531
* void main() {
532
* uint64_t result = 0;
533
* bool available = false;
534
* uint64_t src_offset = src_stride * global_id.x;
535
* uint64_t dst_offset = dst_stride * global_id.x;
536
* uint64_t timestamp = src_buf[src_offset];
537
* if (timestamp != TIMESTAMP_NOT_READY) {
538
* result = timestamp;
539
* available = true;
540
* }
541
* uint32_t result_size = flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
542
* if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || available) {
543
* if (flags & VK_QUERY_RESULT_64_BIT) {
544
* dst_buf[dst_offset] = result;
545
* } else {
546
* dst_buf[dst_offset] = (uint32_t)result;
547
* }
548
* }
549
* if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
550
* dst_buf[dst_offset + result_size] = available;
551
* }
552
* }
553
*/
554
nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "timestamp_query");
555
b.shader->info.workgroup_size[0] = 64;
556
b.shader->info.workgroup_size[1] = 1;
557
b.shader->info.workgroup_size[2] = 1;
558
559
/* Create and initialize local variables. */
560
nir_variable *result = nir_local_variable_create(b.impl, glsl_uint64_t_type(), "result");
561
nir_variable *available = nir_local_variable_create(b.impl, glsl_bool_type(), "available");
562
563
nir_store_var(&b, result, nir_imm_int64(&b, 0), 0x1);
564
nir_store_var(&b, available, nir_imm_false(&b), 0x1);
565
566
nir_ssa_def *flags = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .range = 16);
567
568
/* Load resources. */
569
nir_ssa_def *dst_buf = radv_meta_load_descriptor(&b, 0, 0);
570
nir_ssa_def *src_buf = radv_meta_load_descriptor(&b, 0, 1);
571
572
/* Compute global ID. */
573
nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
574
nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
575
nir_ssa_def *block_size =
576
nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
577
b.shader->info.workgroup_size[2], 0);
578
nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
579
global_id = nir_channel(&b, global_id, 0); // We only care about x here.
580
581
/* Compute src/dst strides. */
582
nir_ssa_def *input_stride = nir_imm_int(&b, 8);
583
nir_ssa_def *input_base = nir_imul(&b, input_stride, global_id);
584
nir_ssa_def *output_stride = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 4), .range = 16);
585
nir_ssa_def *output_base = nir_imul(&b, output_stride, global_id);
586
587
/* Load data from the query pool. */
588
nir_ssa_def *load = nir_load_ssbo(&b, 2, 32, src_buf, input_base, .align_mul = 8);
589
590
/* Pack the timestamp. */
591
nir_ssa_def *timestamp;
592
timestamp =
593
nir_pack_64_2x32(&b, nir_vec2(&b, nir_channel(&b, load, 0), nir_channel(&b, load, 1)));
594
595
/* Check if result is available. */
596
nir_ssa_def *result_is_available =
597
nir_i2b(&b, nir_ine(&b, timestamp, nir_imm_int64(&b, TIMESTAMP_NOT_READY)));
598
599
/* Only store result if available. */
600
nir_push_if(&b, result_is_available);
601
602
nir_store_var(&b, result, timestamp, 0x1);
603
nir_store_var(&b, available, nir_imm_true(&b), 0x1);
604
605
nir_pop_if(&b, NULL);
606
607
/* Determine if result is 64 or 32 bit. */
608
nir_ssa_def *result_is_64bit = nir_test_flag(&b, flags, VK_QUERY_RESULT_64_BIT);
609
nir_ssa_def *result_size =
610
nir_bcsel(&b, result_is_64bit, nir_imm_int(&b, 8), nir_imm_int(&b, 4));
611
612
/* Store the result if complete or partial results have been requested. */
613
nir_push_if(&b, nir_ior(&b, nir_test_flag(&b, flags, VK_QUERY_RESULT_PARTIAL_BIT),
614
nir_load_var(&b, available)));
615
616
/* Store result. */
617
nir_push_if(&b, result_is_64bit);
618
619
nir_store_ssbo(&b, nir_load_var(&b, result), dst_buf, output_base, .write_mask = 0x1,
620
.align_mul = 8);
621
622
nir_push_else(&b, NULL);
623
624
nir_store_ssbo(&b, nir_u2u32(&b, nir_load_var(&b, result)), dst_buf, output_base,
625
.write_mask = 0x1, .align_mul = 4);
626
627
nir_pop_if(&b, NULL);
628
629
nir_pop_if(&b, NULL);
630
631
radv_store_availability(&b, flags, dst_buf, nir_iadd(&b, result_size, output_base),
632
nir_b2i32(&b, nir_load_var(&b, available)));
633
634
return b.shader;
635
}
636
637
static VkResult
638
radv_device_init_meta_query_state_internal(struct radv_device *device)
639
{
640
VkResult result;
641
nir_shader *occlusion_cs = NULL;
642
nir_shader *pipeline_statistics_cs = NULL;
643
nir_shader *tfb_cs = NULL;
644
nir_shader *timestamp_cs = NULL;
645
646
mtx_lock(&device->meta_state.mtx);
647
if (device->meta_state.query.pipeline_statistics_query_pipeline) {
648
mtx_unlock(&device->meta_state.mtx);
649
return VK_SUCCESS;
650
}
651
occlusion_cs = build_occlusion_query_shader(device);
652
pipeline_statistics_cs = build_pipeline_statistics_query_shader(device);
653
tfb_cs = build_tfb_query_shader(device);
654
timestamp_cs = build_timestamp_query_shader(device);
655
656
VkDescriptorSetLayoutCreateInfo occlusion_ds_create_info = {
657
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
658
.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
659
.bindingCount = 2,
660
.pBindings = (VkDescriptorSetLayoutBinding[]){
661
{.binding = 0,
662
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
663
.descriptorCount = 1,
664
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
665
.pImmutableSamplers = NULL},
666
{.binding = 1,
667
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
668
.descriptorCount = 1,
669
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
670
.pImmutableSamplers = NULL},
671
}};
672
673
result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device), &occlusion_ds_create_info,
674
&device->meta_state.alloc,
675
&device->meta_state.query.ds_layout);
676
if (result != VK_SUCCESS)
677
goto fail;
678
679
VkPipelineLayoutCreateInfo occlusion_pl_create_info = {
680
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
681
.setLayoutCount = 1,
682
.pSetLayouts = &device->meta_state.query.ds_layout,
683
.pushConstantRangeCount = 1,
684
.pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, 16},
685
};
686
687
result =
688
radv_CreatePipelineLayout(radv_device_to_handle(device), &occlusion_pl_create_info,
689
&device->meta_state.alloc, &device->meta_state.query.p_layout);
690
if (result != VK_SUCCESS)
691
goto fail;
692
693
VkPipelineShaderStageCreateInfo occlusion_pipeline_shader_stage = {
694
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
695
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
696
.module = vk_shader_module_handle_from_nir(occlusion_cs),
697
.pName = "main",
698
.pSpecializationInfo = NULL,
699
};
700
701
VkComputePipelineCreateInfo occlusion_vk_pipeline_info = {
702
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
703
.stage = occlusion_pipeline_shader_stage,
704
.flags = 0,
705
.layout = device->meta_state.query.p_layout,
706
};
707
708
result = radv_CreateComputePipelines(
709
radv_device_to_handle(device), radv_pipeline_cache_to_handle(&device->meta_state.cache), 1,
710
&occlusion_vk_pipeline_info, NULL, &device->meta_state.query.occlusion_query_pipeline);
711
if (result != VK_SUCCESS)
712
goto fail;
713
714
VkPipelineShaderStageCreateInfo pipeline_statistics_pipeline_shader_stage = {
715
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
716
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
717
.module = vk_shader_module_handle_from_nir(pipeline_statistics_cs),
718
.pName = "main",
719
.pSpecializationInfo = NULL,
720
};
721
722
VkComputePipelineCreateInfo pipeline_statistics_vk_pipeline_info = {
723
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
724
.stage = pipeline_statistics_pipeline_shader_stage,
725
.flags = 0,
726
.layout = device->meta_state.query.p_layout,
727
};
728
729
result = radv_CreateComputePipelines(
730
radv_device_to_handle(device), radv_pipeline_cache_to_handle(&device->meta_state.cache), 1,
731
&pipeline_statistics_vk_pipeline_info, NULL,
732
&device->meta_state.query.pipeline_statistics_query_pipeline);
733
if (result != VK_SUCCESS)
734
goto fail;
735
736
VkPipelineShaderStageCreateInfo tfb_pipeline_shader_stage = {
737
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
738
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
739
.module = vk_shader_module_handle_from_nir(tfb_cs),
740
.pName = "main",
741
.pSpecializationInfo = NULL,
742
};
743
744
VkComputePipelineCreateInfo tfb_pipeline_info = {
745
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
746
.stage = tfb_pipeline_shader_stage,
747
.flags = 0,
748
.layout = device->meta_state.query.p_layout,
749
};
750
751
result = radv_CreateComputePipelines(
752
radv_device_to_handle(device), radv_pipeline_cache_to_handle(&device->meta_state.cache), 1,
753
&tfb_pipeline_info, NULL, &device->meta_state.query.tfb_query_pipeline);
754
if (result != VK_SUCCESS)
755
goto fail;
756
757
VkPipelineShaderStageCreateInfo timestamp_pipeline_shader_stage = {
758
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
759
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
760
.module = vk_shader_module_handle_from_nir(timestamp_cs),
761
.pName = "main",
762
.pSpecializationInfo = NULL,
763
};
764
765
VkComputePipelineCreateInfo timestamp_pipeline_info = {
766
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
767
.stage = timestamp_pipeline_shader_stage,
768
.flags = 0,
769
.layout = device->meta_state.query.p_layout,
770
};
771
772
result = radv_CreateComputePipelines(
773
radv_device_to_handle(device), radv_pipeline_cache_to_handle(&device->meta_state.cache), 1,
774
&timestamp_pipeline_info, NULL, &device->meta_state.query.timestamp_query_pipeline);
775
776
fail:
777
if (result != VK_SUCCESS)
778
radv_device_finish_meta_query_state(device);
779
ralloc_free(occlusion_cs);
780
ralloc_free(pipeline_statistics_cs);
781
ralloc_free(tfb_cs);
782
ralloc_free(timestamp_cs);
783
mtx_unlock(&device->meta_state.mtx);
784
return result;
785
}
786
787
VkResult
788
radv_device_init_meta_query_state(struct radv_device *device, bool on_demand)
789
{
790
if (on_demand)
791
return VK_SUCCESS;
792
793
return radv_device_init_meta_query_state_internal(device);
794
}
795
796
void
797
radv_device_finish_meta_query_state(struct radv_device *device)
798
{
799
if (device->meta_state.query.tfb_query_pipeline)
800
radv_DestroyPipeline(radv_device_to_handle(device),
801
device->meta_state.query.tfb_query_pipeline, &device->meta_state.alloc);
802
803
if (device->meta_state.query.pipeline_statistics_query_pipeline)
804
radv_DestroyPipeline(radv_device_to_handle(device),
805
device->meta_state.query.pipeline_statistics_query_pipeline,
806
&device->meta_state.alloc);
807
808
if (device->meta_state.query.occlusion_query_pipeline)
809
radv_DestroyPipeline(radv_device_to_handle(device),
810
device->meta_state.query.occlusion_query_pipeline,
811
&device->meta_state.alloc);
812
813
if (device->meta_state.query.timestamp_query_pipeline)
814
radv_DestroyPipeline(radv_device_to_handle(device),
815
device->meta_state.query.timestamp_query_pipeline,
816
&device->meta_state.alloc);
817
818
if (device->meta_state.query.p_layout)
819
radv_DestroyPipelineLayout(radv_device_to_handle(device), device->meta_state.query.p_layout,
820
&device->meta_state.alloc);
821
822
if (device->meta_state.query.ds_layout)
823
radv_DestroyDescriptorSetLayout(radv_device_to_handle(device),
824
device->meta_state.query.ds_layout,
825
&device->meta_state.alloc);
826
}
827
828
static void
829
radv_query_shader(struct radv_cmd_buffer *cmd_buffer, VkPipeline *pipeline,
830
struct radeon_winsys_bo *src_bo, struct radeon_winsys_bo *dst_bo,
831
uint64_t src_offset, uint64_t dst_offset, uint32_t src_stride,
832
uint32_t dst_stride, uint32_t count, uint32_t flags, uint32_t pipeline_stats_mask,
833
uint32_t avail_offset)
834
{
835
struct radv_device *device = cmd_buffer->device;
836
struct radv_meta_saved_state saved_state;
837
bool old_predicating;
838
839
if (!*pipeline) {
840
VkResult ret = radv_device_init_meta_query_state_internal(device);
841
if (ret != VK_SUCCESS) {
842
cmd_buffer->record_result = ret;
843
return;
844
}
845
}
846
847
radv_meta_save(
848
&saved_state, cmd_buffer,
849
RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_CONSTANTS | RADV_META_SAVE_DESCRIPTORS);
850
851
/* VK_EXT_conditional_rendering says that copy commands should not be
852
* affected by conditional rendering.
853
*/
854
old_predicating = cmd_buffer->state.predicating;
855
cmd_buffer->state.predicating = false;
856
857
struct radv_buffer dst_buffer = {.bo = dst_bo, .offset = dst_offset, .size = dst_stride * count};
858
859
struct radv_buffer src_buffer = {
860
.bo = src_bo,
861
.offset = src_offset,
862
.size = MAX2(src_stride * count, avail_offset + 4 * count - src_offset)};
863
864
radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE,
865
*pipeline);
866
867
radv_meta_push_descriptor_set(
868
cmd_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, device->meta_state.query.p_layout, 0, /* set */
869
2, /* descriptorWriteCount */
870
(VkWriteDescriptorSet[]){
871
{.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
872
.dstBinding = 0,
873
.dstArrayElement = 0,
874
.descriptorCount = 1,
875
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
876
.pBufferInfo = &(VkDescriptorBufferInfo){.buffer = radv_buffer_to_handle(&dst_buffer),
877
.offset = 0,
878
.range = VK_WHOLE_SIZE}},
879
{.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
880
.dstBinding = 1,
881
.dstArrayElement = 0,
882
.descriptorCount = 1,
883
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
884
.pBufferInfo = &(VkDescriptorBufferInfo){.buffer = radv_buffer_to_handle(&src_buffer),
885
.offset = 0,
886
.range = VK_WHOLE_SIZE}}});
887
888
/* Encode the number of elements for easy access by the shader. */
889
pipeline_stats_mask &= 0x7ff;
890
pipeline_stats_mask |= util_bitcount(pipeline_stats_mask) << 16;
891
892
avail_offset -= src_offset;
893
894
struct {
895
uint32_t flags;
896
uint32_t dst_stride;
897
uint32_t pipeline_stats_mask;
898
uint32_t avail_offset;
899
} push_constants = {flags, dst_stride, pipeline_stats_mask, avail_offset};
900
901
radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer), device->meta_state.query.p_layout,
902
VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants), &push_constants);
903
904
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_INV_L2 | RADV_CMD_FLAG_INV_VCACHE;
905
906
if (flags & VK_QUERY_RESULT_WAIT_BIT)
907
cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER;
908
909
radv_unaligned_dispatch(cmd_buffer, count, 1, 1);
910
911
/* Restore conditional rendering. */
912
cmd_buffer->state.predicating = old_predicating;
913
914
radv_meta_restore(&saved_state, cmd_buffer);
915
}
916
917
static bool
918
radv_query_pool_needs_gds(struct radv_device *device, struct radv_query_pool *pool)
919
{
920
/* The number of primitives generated by geometry shader invocations is
921
* only counted by the hardware if GS uses the legacy path. When NGG GS
922
* is used, the hardware can't know the number of generated primitives
923
* and we have to it manually inside the shader. To achieve that, the
924
* driver does a plain GDS atomic to accumulate that value.
925
* TODO: fix use of NGG GS and non-NGG GS inside the same begin/end
926
* query.
927
*/
928
return device->physical_device->use_ngg &&
929
(pool->pipeline_stats_mask & VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT);
930
}
931
932
static void
933
radv_destroy_query_pool(struct radv_device *device, const VkAllocationCallbacks *pAllocator,
934
struct radv_query_pool *pool)
935
{
936
if (pool->bo)
937
device->ws->buffer_destroy(device->ws, pool->bo);
938
vk_object_base_finish(&pool->base);
939
vk_free2(&device->vk.alloc, pAllocator, pool);
940
}
941
942
VkResult
943
radv_CreateQueryPool(VkDevice _device, const VkQueryPoolCreateInfo *pCreateInfo,
944
const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool)
945
{
946
RADV_FROM_HANDLE(radv_device, device, _device);
947
struct radv_query_pool *pool =
948
vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*pool), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
949
950
if (!pool)
951
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
952
953
vk_object_base_init(&device->vk, &pool->base, VK_OBJECT_TYPE_QUERY_POOL);
954
955
switch (pCreateInfo->queryType) {
956
case VK_QUERY_TYPE_OCCLUSION:
957
pool->stride = 16 * device->physical_device->rad_info.max_render_backends;
958
break;
959
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
960
pool->stride = pipelinestat_block_size * 2;
961
break;
962
case VK_QUERY_TYPE_TIMESTAMP:
963
pool->stride = 8;
964
break;
965
case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
966
pool->stride = 32;
967
break;
968
default:
969
unreachable("creating unhandled query type");
970
}
971
972
pool->type = pCreateInfo->queryType;
973
pool->pipeline_stats_mask = pCreateInfo->pipelineStatistics;
974
pool->availability_offset = pool->stride * pCreateInfo->queryCount;
975
pool->size = pool->availability_offset;
976
if (pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS)
977
pool->size += 4 * pCreateInfo->queryCount;
978
979
VkResult result = device->ws->buffer_create(device->ws, pool->size, 64, RADEON_DOMAIN_GTT,
980
RADEON_FLAG_NO_INTERPROCESS_SHARING,
981
RADV_BO_PRIORITY_QUERY_POOL, 0, &pool->bo);
982
if (result != VK_SUCCESS) {
983
radv_destroy_query_pool(device, pAllocator, pool);
984
return vk_error(device->instance, result);
985
}
986
987
pool->ptr = device->ws->buffer_map(pool->bo);
988
if (!pool->ptr) {
989
radv_destroy_query_pool(device, pAllocator, pool);
990
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
991
}
992
993
*pQueryPool = radv_query_pool_to_handle(pool);
994
return VK_SUCCESS;
995
}
996
997
void
998
radv_DestroyQueryPool(VkDevice _device, VkQueryPool _pool, const VkAllocationCallbacks *pAllocator)
999
{
1000
RADV_FROM_HANDLE(radv_device, device, _device);
1001
RADV_FROM_HANDLE(radv_query_pool, pool, _pool);
1002
1003
if (!pool)
1004
return;
1005
1006
radv_destroy_query_pool(device, pAllocator, pool);
1007
}
1008
1009
VkResult
1010
radv_GetQueryPoolResults(VkDevice _device, VkQueryPool queryPool, uint32_t firstQuery,
1011
uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
1012
VkQueryResultFlags flags)
1013
{
1014
RADV_FROM_HANDLE(radv_device, device, _device);
1015
RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1016
char *data = pData;
1017
VkResult result = VK_SUCCESS;
1018
1019
if (radv_device_is_lost(device))
1020
return VK_ERROR_DEVICE_LOST;
1021
1022
for (unsigned query_idx = 0; query_idx < queryCount; ++query_idx, data += stride) {
1023
char *dest = data;
1024
unsigned query = firstQuery + query_idx;
1025
char *src = pool->ptr + query * pool->stride;
1026
uint32_t available;
1027
1028
switch (pool->type) {
1029
case VK_QUERY_TYPE_TIMESTAMP: {
1030
uint64_t const *src64 = (uint64_t const *)src;
1031
uint64_t value;
1032
1033
do {
1034
value = p_atomic_read(src64);
1035
} while (value == TIMESTAMP_NOT_READY && (flags & VK_QUERY_RESULT_WAIT_BIT));
1036
1037
available = value != TIMESTAMP_NOT_READY;
1038
1039
if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
1040
result = VK_NOT_READY;
1041
1042
if (flags & VK_QUERY_RESULT_64_BIT) {
1043
if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1044
*(uint64_t *)dest = value;
1045
dest += 8;
1046
} else {
1047
if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1048
*(uint32_t *)dest = (uint32_t)value;
1049
dest += 4;
1050
}
1051
break;
1052
}
1053
case VK_QUERY_TYPE_OCCLUSION: {
1054
uint64_t const *src64 = (uint64_t const *)src;
1055
uint32_t db_count = device->physical_device->rad_info.max_render_backends;
1056
uint32_t enabled_rb_mask = device->physical_device->rad_info.enabled_rb_mask;
1057
uint64_t sample_count = 0;
1058
available = 1;
1059
1060
for (int i = 0; i < db_count; ++i) {
1061
uint64_t start, end;
1062
1063
if (!(enabled_rb_mask & (1 << i)))
1064
continue;
1065
1066
do {
1067
start = p_atomic_read(src64 + 2 * i);
1068
end = p_atomic_read(src64 + 2 * i + 1);
1069
} while ((!(start & (1ull << 63)) || !(end & (1ull << 63))) &&
1070
(flags & VK_QUERY_RESULT_WAIT_BIT));
1071
1072
if (!(start & (1ull << 63)) || !(end & (1ull << 63)))
1073
available = 0;
1074
else {
1075
sample_count += end - start;
1076
}
1077
}
1078
1079
if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
1080
result = VK_NOT_READY;
1081
1082
if (flags & VK_QUERY_RESULT_64_BIT) {
1083
if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1084
*(uint64_t *)dest = sample_count;
1085
dest += 8;
1086
} else {
1087
if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1088
*(uint32_t *)dest = sample_count;
1089
dest += 4;
1090
}
1091
break;
1092
}
1093
case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
1094
const uint32_t *avail_ptr =
1095
(const uint32_t *)(pool->ptr + pool->availability_offset + 4 * query);
1096
1097
do {
1098
available = p_atomic_read(avail_ptr);
1099
} while (!available && (flags & VK_QUERY_RESULT_WAIT_BIT));
1100
1101
if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
1102
result = VK_NOT_READY;
1103
1104
const uint64_t *start = (uint64_t *)src;
1105
const uint64_t *stop = (uint64_t *)(src + pipelinestat_block_size);
1106
if (flags & VK_QUERY_RESULT_64_BIT) {
1107
uint64_t *dst = (uint64_t *)dest;
1108
dest += util_bitcount(pool->pipeline_stats_mask) * 8;
1109
for (int i = 0; i < ARRAY_SIZE(pipeline_statistics_indices); ++i) {
1110
if (pool->pipeline_stats_mask & (1u << i)) {
1111
if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1112
*dst = stop[pipeline_statistics_indices[i]] -
1113
start[pipeline_statistics_indices[i]];
1114
dst++;
1115
}
1116
}
1117
1118
} else {
1119
uint32_t *dst = (uint32_t *)dest;
1120
dest += util_bitcount(pool->pipeline_stats_mask) * 4;
1121
for (int i = 0; i < ARRAY_SIZE(pipeline_statistics_indices); ++i) {
1122
if (pool->pipeline_stats_mask & (1u << i)) {
1123
if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1124
*dst = stop[pipeline_statistics_indices[i]] -
1125
start[pipeline_statistics_indices[i]];
1126
dst++;
1127
}
1128
}
1129
}
1130
break;
1131
}
1132
case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT: {
1133
uint64_t const *src64 = (uint64_t const *)src;
1134
uint64_t num_primitives_written;
1135
uint64_t primitive_storage_needed;
1136
1137
/* SAMPLE_STREAMOUTSTATS stores this structure:
1138
* {
1139
* u64 NumPrimitivesWritten;
1140
* u64 PrimitiveStorageNeeded;
1141
* }
1142
*/
1143
available = 1;
1144
for (int j = 0; j < 4; j++) {
1145
if (!(p_atomic_read(src64 + j) & 0x8000000000000000UL))
1146
available = 0;
1147
}
1148
1149
if (!available && !(flags & VK_QUERY_RESULT_PARTIAL_BIT))
1150
result = VK_NOT_READY;
1151
1152
num_primitives_written = src64[3] - src64[1];
1153
primitive_storage_needed = src64[2] - src64[0];
1154
1155
if (flags & VK_QUERY_RESULT_64_BIT) {
1156
if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1157
*(uint64_t *)dest = num_primitives_written;
1158
dest += 8;
1159
if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1160
*(uint64_t *)dest = primitive_storage_needed;
1161
dest += 8;
1162
} else {
1163
if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1164
*(uint32_t *)dest = num_primitives_written;
1165
dest += 4;
1166
if (available || (flags & VK_QUERY_RESULT_PARTIAL_BIT))
1167
*(uint32_t *)dest = primitive_storage_needed;
1168
dest += 4;
1169
}
1170
break;
1171
}
1172
default:
1173
unreachable("trying to get results of unhandled query type");
1174
}
1175
1176
if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
1177
if (flags & VK_QUERY_RESULT_64_BIT) {
1178
*(uint64_t *)dest = available;
1179
} else {
1180
*(uint32_t *)dest = available;
1181
}
1182
}
1183
}
1184
1185
return result;
1186
}
1187
1188
static void
1189
emit_query_flush(struct radv_cmd_buffer *cmd_buffer, struct radv_query_pool *pool)
1190
{
1191
if (cmd_buffer->pending_reset_query) {
1192
if (pool->size >= RADV_BUFFER_OPS_CS_THRESHOLD) {
1193
/* Only need to flush caches if the query pool size is
1194
* large enough to be resetted using the compute shader
1195
* path. Small pools don't need any cache flushes
1196
* because we use a CP dma clear.
1197
*/
1198
si_emit_cache_flush(cmd_buffer);
1199
}
1200
}
1201
}
1202
1203
void
1204
radv_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool,
1205
uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer,
1206
VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags)
1207
{
1208
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1209
RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1210
RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
1211
struct radeon_cmdbuf *cs = cmd_buffer->cs;
1212
uint64_t va = radv_buffer_get_va(pool->bo);
1213
uint64_t dest_va = radv_buffer_get_va(dst_buffer->bo);
1214
dest_va += dst_buffer->offset + dstOffset;
1215
1216
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, pool->bo);
1217
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, dst_buffer->bo);
1218
1219
/* From the Vulkan spec 1.1.108:
1220
*
1221
* "vkCmdCopyQueryPoolResults is guaranteed to see the effect of
1222
* previous uses of vkCmdResetQueryPool in the same queue, without any
1223
* additional synchronization."
1224
*
1225
* So, we have to flush the caches if the compute shader path was used.
1226
*/
1227
emit_query_flush(cmd_buffer, pool);
1228
1229
switch (pool->type) {
1230
case VK_QUERY_TYPE_OCCLUSION:
1231
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1232
unsigned enabled_rb_mask = cmd_buffer->device->physical_device->rad_info.enabled_rb_mask;
1233
uint32_t rb_avail_offset = 16 * util_last_bit(enabled_rb_mask) - 4;
1234
for (unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
1235
unsigned query = firstQuery + i;
1236
uint64_t src_va = va + query * pool->stride + rb_avail_offset;
1237
1238
radeon_check_space(cmd_buffer->device->ws, cs, 7);
1239
1240
/* Waits on the upper word of the last DB entry */
1241
radv_cp_wait_mem(cs, WAIT_REG_MEM_GREATER_OR_EQUAL, src_va, 0x80000000, 0xffffffff);
1242
}
1243
}
1244
radv_query_shader(cmd_buffer, &cmd_buffer->device->meta_state.query.occlusion_query_pipeline,
1245
pool->bo, dst_buffer->bo, firstQuery * pool->stride,
1246
dst_buffer->offset + dstOffset, pool->stride, stride, queryCount, flags, 0,
1247
0);
1248
break;
1249
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1250
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1251
for (unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
1252
unsigned query = firstQuery + i;
1253
1254
radeon_check_space(cmd_buffer->device->ws, cs, 7);
1255
1256
uint64_t avail_va = va + pool->availability_offset + 4 * query;
1257
1258
/* This waits on the ME. All copies below are done on the ME */
1259
radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, avail_va, 1, 0xffffffff);
1260
}
1261
}
1262
radv_query_shader(cmd_buffer,
1263
&cmd_buffer->device->meta_state.query.pipeline_statistics_query_pipeline,
1264
pool->bo, dst_buffer->bo, firstQuery * pool->stride,
1265
dst_buffer->offset + dstOffset, pool->stride, stride, queryCount, flags,
1266
pool->pipeline_stats_mask, pool->availability_offset + 4 * firstQuery);
1267
break;
1268
case VK_QUERY_TYPE_TIMESTAMP:
1269
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1270
for (unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
1271
unsigned query = firstQuery + i;
1272
uint64_t local_src_va = va + query * pool->stride;
1273
1274
radeon_check_space(cmd_buffer->device->ws, cs, 7);
1275
1276
/* Wait on the high 32 bits of the timestamp in
1277
* case the low part is 0xffffffff.
1278
*/
1279
radv_cp_wait_mem(cs, WAIT_REG_MEM_NOT_EQUAL, local_src_va + 4,
1280
TIMESTAMP_NOT_READY >> 32, 0xffffffff);
1281
}
1282
}
1283
1284
radv_query_shader(cmd_buffer, &cmd_buffer->device->meta_state.query.timestamp_query_pipeline,
1285
pool->bo, dst_buffer->bo, firstQuery * pool->stride,
1286
dst_buffer->offset + dstOffset, pool->stride, stride, queryCount, flags, 0,
1287
0);
1288
break;
1289
case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1290
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1291
for (unsigned i = 0; i < queryCount; i++) {
1292
unsigned query = firstQuery + i;
1293
uint64_t src_va = va + query * pool->stride;
1294
1295
radeon_check_space(cmd_buffer->device->ws, cs, 7 * 4);
1296
1297
/* Wait on the upper word of all results. */
1298
for (unsigned j = 0; j < 4; j++, src_va += 8) {
1299
radv_cp_wait_mem(cs, WAIT_REG_MEM_GREATER_OR_EQUAL, src_va + 4, 0x80000000,
1300
0xffffffff);
1301
}
1302
}
1303
}
1304
1305
radv_query_shader(cmd_buffer, &cmd_buffer->device->meta_state.query.tfb_query_pipeline,
1306
pool->bo, dst_buffer->bo, firstQuery * pool->stride,
1307
dst_buffer->offset + dstOffset, pool->stride, stride, queryCount, flags, 0,
1308
0);
1309
break;
1310
default:
1311
unreachable("trying to get results of unhandled query type");
1312
}
1313
}
1314
1315
void
1316
radv_CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
1317
uint32_t queryCount)
1318
{
1319
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1320
RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1321
uint32_t value = pool->type == VK_QUERY_TYPE_TIMESTAMP ? (uint32_t)TIMESTAMP_NOT_READY : 0;
1322
uint32_t flush_bits = 0;
1323
1324
/* Make sure to sync all previous work if the given command buffer has
1325
* pending active queries. Otherwise the GPU might write queries data
1326
* after the reset operation.
1327
*/
1328
cmd_buffer->state.flush_bits |= cmd_buffer->active_query_flush_bits;
1329
1330
flush_bits |= radv_fill_buffer(cmd_buffer, NULL, pool->bo, firstQuery * pool->stride,
1331
queryCount * pool->stride, value);
1332
1333
if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
1334
flush_bits |= radv_fill_buffer(cmd_buffer, NULL, pool->bo,
1335
pool->availability_offset + firstQuery * 4, queryCount * 4, 0);
1336
}
1337
1338
if (flush_bits) {
1339
/* Only need to flush caches for the compute shader path. */
1340
cmd_buffer->pending_reset_query = true;
1341
cmd_buffer->state.flush_bits |= flush_bits;
1342
}
1343
}
1344
1345
void
1346
radv_ResetQueryPool(VkDevice _device, VkQueryPool queryPool, uint32_t firstQuery,
1347
uint32_t queryCount)
1348
{
1349
RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1350
1351
uint32_t value = pool->type == VK_QUERY_TYPE_TIMESTAMP ? (uint32_t)TIMESTAMP_NOT_READY : 0;
1352
uint32_t *data = (uint32_t *)(pool->ptr + firstQuery * pool->stride);
1353
uint32_t *data_end = (uint32_t *)(pool->ptr + (firstQuery + queryCount) * pool->stride);
1354
1355
for (uint32_t *p = data; p != data_end; ++p)
1356
*p = value;
1357
1358
if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
1359
memset(pool->ptr + pool->availability_offset + firstQuery * 4, 0, queryCount * 4);
1360
}
1361
}
1362
1363
static unsigned
1364
event_type_for_stream(unsigned stream)
1365
{
1366
switch (stream) {
1367
default:
1368
case 0:
1369
return V_028A90_SAMPLE_STREAMOUTSTATS;
1370
case 1:
1371
return V_028A90_SAMPLE_STREAMOUTSTATS1;
1372
case 2:
1373
return V_028A90_SAMPLE_STREAMOUTSTATS2;
1374
case 3:
1375
return V_028A90_SAMPLE_STREAMOUTSTATS3;
1376
}
1377
}
1378
1379
static void
1380
emit_begin_query(struct radv_cmd_buffer *cmd_buffer, struct radv_query_pool *pool, uint64_t va,
1381
VkQueryType query_type, VkQueryControlFlags flags, uint32_t index)
1382
{
1383
struct radeon_cmdbuf *cs = cmd_buffer->cs;
1384
switch (query_type) {
1385
case VK_QUERY_TYPE_OCCLUSION:
1386
radeon_check_space(cmd_buffer->device->ws, cs, 7);
1387
1388
++cmd_buffer->state.active_occlusion_queries;
1389
if (cmd_buffer->state.active_occlusion_queries == 1) {
1390
if (flags & VK_QUERY_CONTROL_PRECISE_BIT) {
1391
/* This is the first occlusion query, enable
1392
* the hint if the precision bit is set.
1393
*/
1394
cmd_buffer->state.perfect_occlusion_queries_enabled = true;
1395
}
1396
1397
radv_set_db_count_control(cmd_buffer);
1398
} else {
1399
if ((flags & VK_QUERY_CONTROL_PRECISE_BIT) &&
1400
!cmd_buffer->state.perfect_occlusion_queries_enabled) {
1401
/* This is not the first query, but this one
1402
* needs to enable precision, DB_COUNT_CONTROL
1403
* has to be updated accordingly.
1404
*/
1405
cmd_buffer->state.perfect_occlusion_queries_enabled = true;
1406
1407
radv_set_db_count_control(cmd_buffer);
1408
}
1409
}
1410
1411
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1412
radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
1413
radeon_emit(cs, va);
1414
radeon_emit(cs, va >> 32);
1415
break;
1416
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1417
radeon_check_space(cmd_buffer->device->ws, cs, 4);
1418
1419
++cmd_buffer->state.active_pipeline_queries;
1420
if (cmd_buffer->state.active_pipeline_queries == 1) {
1421
cmd_buffer->state.flush_bits &= ~RADV_CMD_FLAG_STOP_PIPELINE_STATS;
1422
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_START_PIPELINE_STATS;
1423
}
1424
1425
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1426
radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
1427
radeon_emit(cs, va);
1428
radeon_emit(cs, va >> 32);
1429
1430
if (radv_query_pool_needs_gds(cmd_buffer->device, pool)) {
1431
int idx = radv_get_pipeline_statistics_index(
1432
VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT);
1433
1434
/* Make sure GDS is idle before copying the value. */
1435
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_INV_L2;
1436
si_emit_cache_flush(cmd_buffer);
1437
1438
va += 8 * idx;
1439
1440
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1441
radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_GDS) | COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) |
1442
COPY_DATA_WR_CONFIRM);
1443
radeon_emit(cs, 0);
1444
radeon_emit(cs, 0);
1445
radeon_emit(cs, va);
1446
radeon_emit(cs, va >> 32);
1447
1448
/* Record that the command buffer needs GDS. */
1449
cmd_buffer->gds_needed = true;
1450
1451
cmd_buffer->state.active_pipeline_gds_queries++;
1452
}
1453
break;
1454
case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1455
radeon_check_space(cmd_buffer->device->ws, cs, 4);
1456
1457
assert(index < MAX_SO_STREAMS);
1458
1459
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1460
radeon_emit(cs, EVENT_TYPE(event_type_for_stream(index)) | EVENT_INDEX(3));
1461
radeon_emit(cs, va);
1462
radeon_emit(cs, va >> 32);
1463
break;
1464
default:
1465
unreachable("beginning unhandled query type");
1466
}
1467
}
1468
1469
static void
1470
emit_end_query(struct radv_cmd_buffer *cmd_buffer, struct radv_query_pool *pool, uint64_t va,
1471
uint64_t avail_va, VkQueryType query_type, uint32_t index)
1472
{
1473
struct radeon_cmdbuf *cs = cmd_buffer->cs;
1474
switch (query_type) {
1475
case VK_QUERY_TYPE_OCCLUSION:
1476
radeon_check_space(cmd_buffer->device->ws, cs, 14);
1477
1478
cmd_buffer->state.active_occlusion_queries--;
1479
if (cmd_buffer->state.active_occlusion_queries == 0) {
1480
radv_set_db_count_control(cmd_buffer);
1481
1482
/* Reset the perfect occlusion queries hint now that no
1483
* queries are active.
1484
*/
1485
cmd_buffer->state.perfect_occlusion_queries_enabled = false;
1486
}
1487
1488
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1489
radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
1490
radeon_emit(cs, va + 8);
1491
radeon_emit(cs, (va + 8) >> 32);
1492
1493
break;
1494
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1495
radeon_check_space(cmd_buffer->device->ws, cs, 16);
1496
1497
cmd_buffer->state.active_pipeline_queries--;
1498
if (cmd_buffer->state.active_pipeline_queries == 0) {
1499
cmd_buffer->state.flush_bits &= ~RADV_CMD_FLAG_START_PIPELINE_STATS;
1500
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_STOP_PIPELINE_STATS;
1501
}
1502
va += pipelinestat_block_size;
1503
1504
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1505
radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
1506
radeon_emit(cs, va);
1507
radeon_emit(cs, va >> 32);
1508
1509
si_cs_emit_write_event_eop(cs, cmd_buffer->device->physical_device->rad_info.chip_class,
1510
radv_cmd_buffer_uses_mec(cmd_buffer), V_028A90_BOTTOM_OF_PIPE_TS,
1511
0, EOP_DST_SEL_MEM, EOP_DATA_SEL_VALUE_32BIT, avail_va, 1,
1512
cmd_buffer->gfx9_eop_bug_va);
1513
1514
if (radv_query_pool_needs_gds(cmd_buffer->device, pool)) {
1515
int idx = radv_get_pipeline_statistics_index(
1516
VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT);
1517
1518
/* Make sure GDS is idle before copying the value. */
1519
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_INV_L2;
1520
si_emit_cache_flush(cmd_buffer);
1521
1522
va += 8 * idx;
1523
1524
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1525
radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_GDS) | COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) |
1526
COPY_DATA_WR_CONFIRM);
1527
radeon_emit(cs, 0);
1528
radeon_emit(cs, 0);
1529
radeon_emit(cs, va);
1530
radeon_emit(cs, va >> 32);
1531
1532
cmd_buffer->state.active_pipeline_gds_queries--;
1533
}
1534
break;
1535
case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1536
radeon_check_space(cmd_buffer->device->ws, cs, 4);
1537
1538
assert(index < MAX_SO_STREAMS);
1539
1540
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1541
radeon_emit(cs, EVENT_TYPE(event_type_for_stream(index)) | EVENT_INDEX(3));
1542
radeon_emit(cs, (va + 16));
1543
radeon_emit(cs, (va + 16) >> 32);
1544
break;
1545
default:
1546
unreachable("ending unhandled query type");
1547
}
1548
1549
cmd_buffer->active_query_flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
1550
RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_INV_L2 |
1551
RADV_CMD_FLAG_INV_VCACHE;
1552
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1553
cmd_buffer->active_query_flush_bits |=
1554
RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB;
1555
}
1556
}
1557
1558
void
1559
radv_CmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
1560
VkQueryControlFlags flags, uint32_t index)
1561
{
1562
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1563
RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1564
struct radeon_cmdbuf *cs = cmd_buffer->cs;
1565
uint64_t va = radv_buffer_get_va(pool->bo);
1566
1567
radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo);
1568
1569
emit_query_flush(cmd_buffer, pool);
1570
1571
va += pool->stride * query;
1572
1573
emit_begin_query(cmd_buffer, pool, va, pool->type, flags, index);
1574
}
1575
1576
void
1577
radv_CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
1578
VkQueryControlFlags flags)
1579
{
1580
radv_CmdBeginQueryIndexedEXT(commandBuffer, queryPool, query, flags, 0);
1581
}
1582
1583
void
1584
radv_CmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
1585
uint32_t index)
1586
{
1587
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1588
RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1589
uint64_t va = radv_buffer_get_va(pool->bo);
1590
uint64_t avail_va = va + pool->availability_offset + 4 * query;
1591
va += pool->stride * query;
1592
1593
/* Do not need to add the pool BO to the list because the query must
1594
* currently be active, which means the BO is already in the list.
1595
*/
1596
emit_end_query(cmd_buffer, pool, va, avail_va, pool->type, index);
1597
1598
/*
1599
* For multiview we have to emit a query for each bit in the mask,
1600
* however the first query we emit will get the totals for all the
1601
* operations, so we don't want to get a real value in the other
1602
* queries. This emits a fake begin/end sequence so the waiting
1603
* code gets a completed query value and doesn't hang, but the
1604
* query returns 0.
1605
*/
1606
if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask) {
1607
for (unsigned i = 1; i < util_bitcount(cmd_buffer->state.subpass->view_mask); i++) {
1608
va += pool->stride;
1609
avail_va += 4;
1610
emit_begin_query(cmd_buffer, pool, va, pool->type, 0, 0);
1611
emit_end_query(cmd_buffer, pool, va, avail_va, pool->type, 0);
1612
}
1613
}
1614
}
1615
1616
void
1617
radv_CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query)
1618
{
1619
radv_CmdEndQueryIndexedEXT(commandBuffer, queryPool, query, 0);
1620
}
1621
1622
void
1623
radv_CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
1624
VkQueryPool queryPool, uint32_t query)
1625
{
1626
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1627
RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
1628
bool mec = radv_cmd_buffer_uses_mec(cmd_buffer);
1629
struct radeon_cmdbuf *cs = cmd_buffer->cs;
1630
uint64_t va = radv_buffer_get_va(pool->bo);
1631
uint64_t query_va = va + pool->stride * query;
1632
1633
radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo);
1634
1635
emit_query_flush(cmd_buffer, pool);
1636
1637
int num_queries = 1;
1638
if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask)
1639
num_queries = util_bitcount(cmd_buffer->state.subpass->view_mask);
1640
1641
ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 28 * num_queries);
1642
1643
for (unsigned i = 0; i < num_queries; i++) {
1644
switch (pipelineStage) {
1645
case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
1646
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1647
radeon_emit(cs, COPY_DATA_COUNT_SEL | COPY_DATA_WR_CONFIRM |
1648
COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP) | COPY_DATA_DST_SEL(V_370_MEM));
1649
radeon_emit(cs, 0);
1650
radeon_emit(cs, 0);
1651
radeon_emit(cs, query_va);
1652
radeon_emit(cs, query_va >> 32);
1653
break;
1654
default:
1655
si_cs_emit_write_event_eop(cs, cmd_buffer->device->physical_device->rad_info.chip_class,
1656
mec, V_028A90_BOTTOM_OF_PIPE_TS, 0, EOP_DST_SEL_MEM,
1657
EOP_DATA_SEL_TIMESTAMP, query_va, 0,
1658
cmd_buffer->gfx9_eop_bug_va);
1659
break;
1660
}
1661
query_va += pool->stride;
1662
}
1663
1664
cmd_buffer->active_query_flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
1665
RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_INV_L2 |
1666
RADV_CMD_FLAG_INV_VCACHE;
1667
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1668
cmd_buffer->active_query_flush_bits |=
1669
RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB;
1670
}
1671
1672
assert(cmd_buffer->cs->cdw <= cdw_max);
1673
}
1674
1675