Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/r600/r600_query.c
4570 views
1
/*
2
* Copyright 2010 Jerome Glisse <[email protected]>
3
* Copyright 2014 Marek Olšák <[email protected]>
4
*
5
* Permission is hereby granted, free of charge, to any person obtaining a
6
* copy of this software and associated documentation files (the "Software"),
7
* to deal in the Software without restriction, including without limitation
8
* on the rights to use, copy, modify, merge, publish, distribute, sub
9
* license, and/or sell copies of the Software, and to permit persons to whom
10
* the Software is furnished to do so, subject to the following conditions:
11
*
12
* The above copyright notice and this permission notice (including the next
13
* paragraph) shall be included in all copies or substantial portions of the
14
* Software.
15
*
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22
* USE OR OTHER DEALINGS IN THE SOFTWARE.
23
*/
24
25
#include "r600_query.h"
26
#include "r600_pipe.h"
27
#include "r600_cs.h"
28
#include "util/u_memory.h"
29
#include "util/u_upload_mgr.h"
30
#include "util/os_time.h"
31
#include "tgsi/tgsi_text.h"
32
33
#define R600_MAX_STREAMS 4
34
35
struct r600_hw_query_params {
36
unsigned start_offset;
37
unsigned end_offset;
38
unsigned fence_offset;
39
unsigned pair_stride;
40
unsigned pair_count;
41
};
42
43
/* Queries without buffer handling or suspend/resume. */
44
struct r600_query_sw {
45
struct r600_query b;
46
47
uint64_t begin_result;
48
uint64_t end_result;
49
50
uint64_t begin_time;
51
uint64_t end_time;
52
53
/* Fence for GPU_FINISHED. */
54
struct pipe_fence_handle *fence;
55
};
56
57
static void r600_query_sw_destroy(struct r600_common_screen *rscreen,
58
struct r600_query *rquery)
59
{
60
struct r600_query_sw *query = (struct r600_query_sw *)rquery;
61
62
rscreen->b.fence_reference(&rscreen->b, &query->fence, NULL);
63
FREE(query);
64
}
65
66
static enum radeon_value_id winsys_id_from_type(unsigned type)
67
{
68
switch (type) {
69
case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
70
case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
71
case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
72
case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
73
case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
74
case R600_QUERY_NUM_MAPPED_BUFFERS: return RADEON_NUM_MAPPED_BUFFERS;
75
case R600_QUERY_NUM_GFX_IBS: return RADEON_NUM_GFX_IBS;
76
case R600_QUERY_NUM_SDMA_IBS: return RADEON_NUM_SDMA_IBS;
77
case R600_QUERY_GFX_BO_LIST_SIZE: return RADEON_GFX_BO_LIST_COUNTER;
78
case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
79
case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
80
case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS;
81
case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
82
case R600_QUERY_VRAM_VIS_USAGE: return RADEON_VRAM_VIS_USAGE;
83
case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
84
case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
85
case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
86
case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
87
case R600_QUERY_CS_THREAD_BUSY: return RADEON_CS_THREAD_TIME;
88
default: unreachable("query type does not correspond to winsys id");
89
}
90
}
91
92
static bool r600_query_sw_begin(struct r600_common_context *rctx,
93
struct r600_query *rquery)
94
{
95
struct r600_query_sw *query = (struct r600_query_sw *)rquery;
96
enum radeon_value_id ws_id;
97
98
switch(query->b.type) {
99
case PIPE_QUERY_TIMESTAMP_DISJOINT:
100
case PIPE_QUERY_GPU_FINISHED:
101
break;
102
case R600_QUERY_DRAW_CALLS:
103
query->begin_result = rctx->num_draw_calls;
104
break;
105
case R600_QUERY_DECOMPRESS_CALLS:
106
query->begin_result = rctx->num_decompress_calls;
107
break;
108
case R600_QUERY_MRT_DRAW_CALLS:
109
query->begin_result = rctx->num_mrt_draw_calls;
110
break;
111
case R600_QUERY_PRIM_RESTART_CALLS:
112
query->begin_result = rctx->num_prim_restart_calls;
113
break;
114
case R600_QUERY_SPILL_DRAW_CALLS:
115
query->begin_result = rctx->num_spill_draw_calls;
116
break;
117
case R600_QUERY_COMPUTE_CALLS:
118
query->begin_result = rctx->num_compute_calls;
119
break;
120
case R600_QUERY_SPILL_COMPUTE_CALLS:
121
query->begin_result = rctx->num_spill_compute_calls;
122
break;
123
case R600_QUERY_DMA_CALLS:
124
query->begin_result = rctx->num_dma_calls;
125
break;
126
case R600_QUERY_CP_DMA_CALLS:
127
query->begin_result = rctx->num_cp_dma_calls;
128
break;
129
case R600_QUERY_NUM_VS_FLUSHES:
130
query->begin_result = rctx->num_vs_flushes;
131
break;
132
case R600_QUERY_NUM_PS_FLUSHES:
133
query->begin_result = rctx->num_ps_flushes;
134
break;
135
case R600_QUERY_NUM_CS_FLUSHES:
136
query->begin_result = rctx->num_cs_flushes;
137
break;
138
case R600_QUERY_NUM_CB_CACHE_FLUSHES:
139
query->begin_result = rctx->num_cb_cache_flushes;
140
break;
141
case R600_QUERY_NUM_DB_CACHE_FLUSHES:
142
query->begin_result = rctx->num_db_cache_flushes;
143
break;
144
case R600_QUERY_NUM_RESIDENT_HANDLES:
145
query->begin_result = rctx->num_resident_handles;
146
break;
147
case R600_QUERY_TC_OFFLOADED_SLOTS:
148
query->begin_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
149
break;
150
case R600_QUERY_TC_DIRECT_SLOTS:
151
query->begin_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
152
break;
153
case R600_QUERY_TC_NUM_SYNCS:
154
query->begin_result = rctx->tc ? rctx->tc->num_syncs : 0;
155
break;
156
case R600_QUERY_REQUESTED_VRAM:
157
case R600_QUERY_REQUESTED_GTT:
158
case R600_QUERY_MAPPED_VRAM:
159
case R600_QUERY_MAPPED_GTT:
160
case R600_QUERY_VRAM_USAGE:
161
case R600_QUERY_VRAM_VIS_USAGE:
162
case R600_QUERY_GTT_USAGE:
163
case R600_QUERY_GPU_TEMPERATURE:
164
case R600_QUERY_CURRENT_GPU_SCLK:
165
case R600_QUERY_CURRENT_GPU_MCLK:
166
case R600_QUERY_NUM_MAPPED_BUFFERS:
167
query->begin_result = 0;
168
break;
169
case R600_QUERY_BUFFER_WAIT_TIME:
170
case R600_QUERY_NUM_GFX_IBS:
171
case R600_QUERY_NUM_SDMA_IBS:
172
case R600_QUERY_NUM_BYTES_MOVED:
173
case R600_QUERY_NUM_EVICTIONS:
174
case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
175
enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
176
query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
177
break;
178
}
179
case R600_QUERY_GFX_BO_LIST_SIZE:
180
ws_id = winsys_id_from_type(query->b.type);
181
query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
182
query->begin_time = rctx->ws->query_value(rctx->ws,
183
RADEON_NUM_GFX_IBS);
184
break;
185
case R600_QUERY_CS_THREAD_BUSY:
186
ws_id = winsys_id_from_type(query->b.type);
187
query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
188
query->begin_time = os_time_get_nano();
189
break;
190
case R600_QUERY_GALLIUM_THREAD_BUSY:
191
query->begin_result =
192
rctx->tc ? util_queue_get_thread_time_nano(&rctx->tc->queue, 0) : 0;
193
query->begin_time = os_time_get_nano();
194
break;
195
case R600_QUERY_GPU_LOAD:
196
case R600_QUERY_GPU_SHADERS_BUSY:
197
case R600_QUERY_GPU_TA_BUSY:
198
case R600_QUERY_GPU_GDS_BUSY:
199
case R600_QUERY_GPU_VGT_BUSY:
200
case R600_QUERY_GPU_IA_BUSY:
201
case R600_QUERY_GPU_SX_BUSY:
202
case R600_QUERY_GPU_WD_BUSY:
203
case R600_QUERY_GPU_BCI_BUSY:
204
case R600_QUERY_GPU_SC_BUSY:
205
case R600_QUERY_GPU_PA_BUSY:
206
case R600_QUERY_GPU_DB_BUSY:
207
case R600_QUERY_GPU_CP_BUSY:
208
case R600_QUERY_GPU_CB_BUSY:
209
case R600_QUERY_GPU_SDMA_BUSY:
210
case R600_QUERY_GPU_PFP_BUSY:
211
case R600_QUERY_GPU_MEQ_BUSY:
212
case R600_QUERY_GPU_ME_BUSY:
213
case R600_QUERY_GPU_SURF_SYNC_BUSY:
214
case R600_QUERY_GPU_CP_DMA_BUSY:
215
case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
216
query->begin_result = r600_begin_counter(rctx->screen,
217
query->b.type);
218
break;
219
case R600_QUERY_NUM_COMPILATIONS:
220
query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
221
break;
222
case R600_QUERY_NUM_SHADERS_CREATED:
223
query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
224
break;
225
case R600_QUERY_NUM_SHADER_CACHE_HITS:
226
query->begin_result =
227
p_atomic_read(&rctx->screen->num_shader_cache_hits);
228
break;
229
case R600_QUERY_GPIN_ASIC_ID:
230
case R600_QUERY_GPIN_NUM_SIMD:
231
case R600_QUERY_GPIN_NUM_RB:
232
case R600_QUERY_GPIN_NUM_SPI:
233
case R600_QUERY_GPIN_NUM_SE:
234
break;
235
default:
236
unreachable("r600_query_sw_begin: bad query type");
237
}
238
239
return true;
240
}
241
242
static bool r600_query_sw_end(struct r600_common_context *rctx,
243
struct r600_query *rquery)
244
{
245
struct r600_query_sw *query = (struct r600_query_sw *)rquery;
246
enum radeon_value_id ws_id;
247
248
switch(query->b.type) {
249
case PIPE_QUERY_TIMESTAMP_DISJOINT:
250
break;
251
case PIPE_QUERY_GPU_FINISHED:
252
rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
253
break;
254
case R600_QUERY_DRAW_CALLS:
255
query->end_result = rctx->num_draw_calls;
256
break;
257
case R600_QUERY_DECOMPRESS_CALLS:
258
query->end_result = rctx->num_decompress_calls;
259
break;
260
case R600_QUERY_MRT_DRAW_CALLS:
261
query->end_result = rctx->num_mrt_draw_calls;
262
break;
263
case R600_QUERY_PRIM_RESTART_CALLS:
264
query->end_result = rctx->num_prim_restart_calls;
265
break;
266
case R600_QUERY_SPILL_DRAW_CALLS:
267
query->end_result = rctx->num_spill_draw_calls;
268
break;
269
case R600_QUERY_COMPUTE_CALLS:
270
query->end_result = rctx->num_compute_calls;
271
break;
272
case R600_QUERY_SPILL_COMPUTE_CALLS:
273
query->end_result = rctx->num_spill_compute_calls;
274
break;
275
case R600_QUERY_DMA_CALLS:
276
query->end_result = rctx->num_dma_calls;
277
break;
278
case R600_QUERY_CP_DMA_CALLS:
279
query->end_result = rctx->num_cp_dma_calls;
280
break;
281
case R600_QUERY_NUM_VS_FLUSHES:
282
query->end_result = rctx->num_vs_flushes;
283
break;
284
case R600_QUERY_NUM_PS_FLUSHES:
285
query->end_result = rctx->num_ps_flushes;
286
break;
287
case R600_QUERY_NUM_CS_FLUSHES:
288
query->end_result = rctx->num_cs_flushes;
289
break;
290
case R600_QUERY_NUM_CB_CACHE_FLUSHES:
291
query->end_result = rctx->num_cb_cache_flushes;
292
break;
293
case R600_QUERY_NUM_DB_CACHE_FLUSHES:
294
query->end_result = rctx->num_db_cache_flushes;
295
break;
296
case R600_QUERY_NUM_RESIDENT_HANDLES:
297
query->end_result = rctx->num_resident_handles;
298
break;
299
case R600_QUERY_TC_OFFLOADED_SLOTS:
300
query->end_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
301
break;
302
case R600_QUERY_TC_DIRECT_SLOTS:
303
query->end_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
304
break;
305
case R600_QUERY_TC_NUM_SYNCS:
306
query->end_result = rctx->tc ? rctx->tc->num_syncs : 0;
307
break;
308
case R600_QUERY_REQUESTED_VRAM:
309
case R600_QUERY_REQUESTED_GTT:
310
case R600_QUERY_MAPPED_VRAM:
311
case R600_QUERY_MAPPED_GTT:
312
case R600_QUERY_VRAM_USAGE:
313
case R600_QUERY_VRAM_VIS_USAGE:
314
case R600_QUERY_GTT_USAGE:
315
case R600_QUERY_GPU_TEMPERATURE:
316
case R600_QUERY_CURRENT_GPU_SCLK:
317
case R600_QUERY_CURRENT_GPU_MCLK:
318
case R600_QUERY_BUFFER_WAIT_TIME:
319
case R600_QUERY_NUM_MAPPED_BUFFERS:
320
case R600_QUERY_NUM_GFX_IBS:
321
case R600_QUERY_NUM_SDMA_IBS:
322
case R600_QUERY_NUM_BYTES_MOVED:
323
case R600_QUERY_NUM_EVICTIONS:
324
case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
325
enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
326
query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
327
break;
328
}
329
case R600_QUERY_GFX_BO_LIST_SIZE:
330
ws_id = winsys_id_from_type(query->b.type);
331
query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
332
query->end_time = rctx->ws->query_value(rctx->ws,
333
RADEON_NUM_GFX_IBS);
334
break;
335
case R600_QUERY_CS_THREAD_BUSY:
336
ws_id = winsys_id_from_type(query->b.type);
337
query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
338
query->end_time = os_time_get_nano();
339
break;
340
case R600_QUERY_GALLIUM_THREAD_BUSY:
341
query->end_result =
342
rctx->tc ? util_queue_get_thread_time_nano(&rctx->tc->queue, 0) : 0;
343
query->end_time = os_time_get_nano();
344
break;
345
case R600_QUERY_GPU_LOAD:
346
case R600_QUERY_GPU_SHADERS_BUSY:
347
case R600_QUERY_GPU_TA_BUSY:
348
case R600_QUERY_GPU_GDS_BUSY:
349
case R600_QUERY_GPU_VGT_BUSY:
350
case R600_QUERY_GPU_IA_BUSY:
351
case R600_QUERY_GPU_SX_BUSY:
352
case R600_QUERY_GPU_WD_BUSY:
353
case R600_QUERY_GPU_BCI_BUSY:
354
case R600_QUERY_GPU_SC_BUSY:
355
case R600_QUERY_GPU_PA_BUSY:
356
case R600_QUERY_GPU_DB_BUSY:
357
case R600_QUERY_GPU_CP_BUSY:
358
case R600_QUERY_GPU_CB_BUSY:
359
case R600_QUERY_GPU_SDMA_BUSY:
360
case R600_QUERY_GPU_PFP_BUSY:
361
case R600_QUERY_GPU_MEQ_BUSY:
362
case R600_QUERY_GPU_ME_BUSY:
363
case R600_QUERY_GPU_SURF_SYNC_BUSY:
364
case R600_QUERY_GPU_CP_DMA_BUSY:
365
case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
366
query->end_result = r600_end_counter(rctx->screen,
367
query->b.type,
368
query->begin_result);
369
query->begin_result = 0;
370
break;
371
case R600_QUERY_NUM_COMPILATIONS:
372
query->end_result = p_atomic_read(&rctx->screen->num_compilations);
373
break;
374
case R600_QUERY_NUM_SHADERS_CREATED:
375
query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
376
break;
377
case R600_QUERY_NUM_SHADER_CACHE_HITS:
378
query->end_result =
379
p_atomic_read(&rctx->screen->num_shader_cache_hits);
380
break;
381
case R600_QUERY_GPIN_ASIC_ID:
382
case R600_QUERY_GPIN_NUM_SIMD:
383
case R600_QUERY_GPIN_NUM_RB:
384
case R600_QUERY_GPIN_NUM_SPI:
385
case R600_QUERY_GPIN_NUM_SE:
386
break;
387
default:
388
unreachable("r600_query_sw_end: bad query type");
389
}
390
391
return true;
392
}
393
394
static bool r600_query_sw_get_result(struct r600_common_context *rctx,
395
struct r600_query *rquery,
396
bool wait,
397
union pipe_query_result *result)
398
{
399
struct r600_query_sw *query = (struct r600_query_sw *)rquery;
400
401
switch (query->b.type) {
402
case PIPE_QUERY_TIMESTAMP_DISJOINT:
403
/* Convert from cycles per millisecond to cycles per second (Hz). */
404
result->timestamp_disjoint.frequency =
405
(uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
406
result->timestamp_disjoint.disjoint = false;
407
return true;
408
case PIPE_QUERY_GPU_FINISHED: {
409
struct pipe_screen *screen = rctx->b.screen;
410
struct pipe_context *ctx = rquery->b.flushed ? NULL : &rctx->b;
411
412
result->b = screen->fence_finish(screen, ctx, query->fence,
413
wait ? PIPE_TIMEOUT_INFINITE : 0);
414
return result->b;
415
}
416
417
case R600_QUERY_GFX_BO_LIST_SIZE:
418
result->u64 = (query->end_result - query->begin_result) /
419
(query->end_time - query->begin_time);
420
return true;
421
case R600_QUERY_CS_THREAD_BUSY:
422
case R600_QUERY_GALLIUM_THREAD_BUSY:
423
result->u64 = (query->end_result - query->begin_result) * 100 /
424
(query->end_time - query->begin_time);
425
return true;
426
case R600_QUERY_GPIN_ASIC_ID:
427
result->u32 = 0;
428
return true;
429
case R600_QUERY_GPIN_NUM_SIMD:
430
result->u32 = rctx->screen->info.num_good_compute_units;
431
return true;
432
case R600_QUERY_GPIN_NUM_RB:
433
result->u32 = rctx->screen->info.max_render_backends;
434
return true;
435
case R600_QUERY_GPIN_NUM_SPI:
436
result->u32 = 1; /* all supported chips have one SPI per SE */
437
return true;
438
case R600_QUERY_GPIN_NUM_SE:
439
result->u32 = rctx->screen->info.max_se;
440
return true;
441
}
442
443
result->u64 = query->end_result - query->begin_result;
444
445
switch (query->b.type) {
446
case R600_QUERY_BUFFER_WAIT_TIME:
447
case R600_QUERY_GPU_TEMPERATURE:
448
result->u64 /= 1000;
449
break;
450
case R600_QUERY_CURRENT_GPU_SCLK:
451
case R600_QUERY_CURRENT_GPU_MCLK:
452
result->u64 *= 1000000;
453
break;
454
}
455
456
return true;
457
}
458
459
460
static struct r600_query_ops sw_query_ops = {
461
.destroy = r600_query_sw_destroy,
462
.begin = r600_query_sw_begin,
463
.end = r600_query_sw_end,
464
.get_result = r600_query_sw_get_result,
465
.get_result_resource = NULL
466
};
467
468
static struct pipe_query *r600_query_sw_create(unsigned query_type)
469
{
470
struct r600_query_sw *query;
471
472
query = CALLOC_STRUCT(r600_query_sw);
473
if (!query)
474
return NULL;
475
476
query->b.type = query_type;
477
query->b.ops = &sw_query_ops;
478
479
return (struct pipe_query *)query;
480
}
481
482
void r600_query_hw_destroy(struct r600_common_screen *rscreen,
483
struct r600_query *rquery)
484
{
485
struct r600_query_hw *query = (struct r600_query_hw *)rquery;
486
struct r600_query_buffer *prev = query->buffer.previous;
487
488
/* Release all query buffers. */
489
while (prev) {
490
struct r600_query_buffer *qbuf = prev;
491
prev = prev->previous;
492
r600_resource_reference(&qbuf->buf, NULL);
493
FREE(qbuf);
494
}
495
496
r600_resource_reference(&query->buffer.buf, NULL);
497
FREE(rquery);
498
}
499
500
static struct r600_resource *r600_new_query_buffer(struct r600_common_screen *rscreen,
501
struct r600_query_hw *query)
502
{
503
unsigned buf_size = MAX2(query->result_size,
504
rscreen->info.min_alloc_size);
505
506
/* Queries are normally read by the CPU after
507
* being written by the gpu, hence staging is probably a good
508
* usage pattern.
509
*/
510
struct r600_resource *buf = (struct r600_resource*)
511
pipe_buffer_create(&rscreen->b, 0,
512
PIPE_USAGE_STAGING, buf_size);
513
if (!buf)
514
return NULL;
515
516
if (!query->ops->prepare_buffer(rscreen, query, buf)) {
517
r600_resource_reference(&buf, NULL);
518
return NULL;
519
}
520
521
return buf;
522
}
523
524
static bool r600_query_hw_prepare_buffer(struct r600_common_screen *rscreen,
525
struct r600_query_hw *query,
526
struct r600_resource *buffer)
527
{
528
/* Callers ensure that the buffer is currently unused by the GPU. */
529
uint32_t *results = rscreen->ws->buffer_map(rscreen->ws, buffer->buf, NULL,
530
PIPE_MAP_WRITE |
531
PIPE_MAP_UNSYNCHRONIZED);
532
if (!results)
533
return false;
534
535
memset(results, 0, buffer->b.b.width0);
536
537
if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
538
query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
539
query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
540
unsigned max_rbs = rscreen->info.max_render_backends;
541
unsigned enabled_rb_mask = rscreen->info.enabled_rb_mask;
542
unsigned num_results;
543
unsigned i, j;
544
545
/* Set top bits for unused backends. */
546
num_results = buffer->b.b.width0 / query->result_size;
547
for (j = 0; j < num_results; j++) {
548
for (i = 0; i < max_rbs; i++) {
549
if (!(enabled_rb_mask & (1<<i))) {
550
results[(i * 4)+1] = 0x80000000;
551
results[(i * 4)+3] = 0x80000000;
552
}
553
}
554
results += 4 * max_rbs;
555
}
556
}
557
558
return true;
559
}
560
561
static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
562
struct r600_query *rquery,
563
bool wait,
564
enum pipe_query_value_type result_type,
565
int index,
566
struct pipe_resource *resource,
567
unsigned offset);
568
569
static struct r600_query_ops query_hw_ops = {
570
.destroy = r600_query_hw_destroy,
571
.begin = r600_query_hw_begin,
572
.end = r600_query_hw_end,
573
.get_result = r600_query_hw_get_result,
574
.get_result_resource = r600_query_hw_get_result_resource,
575
};
576
577
static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
578
struct r600_query_hw *query,
579
struct r600_resource *buffer,
580
uint64_t va);
581
static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
582
struct r600_query_hw *query,
583
struct r600_resource *buffer,
584
uint64_t va);
585
static void r600_query_hw_add_result(struct r600_common_screen *rscreen,
586
struct r600_query_hw *, void *buffer,
587
union pipe_query_result *result);
588
static void r600_query_hw_clear_result(struct r600_query_hw *,
589
union pipe_query_result *);
590
591
static struct r600_query_hw_ops query_hw_default_hw_ops = {
592
.prepare_buffer = r600_query_hw_prepare_buffer,
593
.emit_start = r600_query_hw_do_emit_start,
594
.emit_stop = r600_query_hw_do_emit_stop,
595
.clear_result = r600_query_hw_clear_result,
596
.add_result = r600_query_hw_add_result,
597
};
598
599
bool r600_query_hw_init(struct r600_common_screen *rscreen,
600
struct r600_query_hw *query)
601
{
602
query->buffer.buf = r600_new_query_buffer(rscreen, query);
603
if (!query->buffer.buf)
604
return false;
605
606
return true;
607
}
608
609
static struct pipe_query *r600_query_hw_create(struct r600_common_screen *rscreen,
610
unsigned query_type,
611
unsigned index)
612
{
613
struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
614
if (!query)
615
return NULL;
616
617
query->b.type = query_type;
618
query->b.ops = &query_hw_ops;
619
query->ops = &query_hw_default_hw_ops;
620
621
switch (query_type) {
622
case PIPE_QUERY_OCCLUSION_COUNTER:
623
case PIPE_QUERY_OCCLUSION_PREDICATE:
624
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
625
query->result_size = 16 * rscreen->info.max_render_backends;
626
query->result_size += 16; /* for the fence + alignment */
627
query->num_cs_dw_begin = 6;
628
query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
629
break;
630
case PIPE_QUERY_TIME_ELAPSED:
631
query->result_size = 24;
632
query->num_cs_dw_begin = 8;
633
query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
634
break;
635
case PIPE_QUERY_TIMESTAMP:
636
query->result_size = 16;
637
query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
638
query->flags = R600_QUERY_HW_FLAG_NO_START;
639
break;
640
case PIPE_QUERY_PRIMITIVES_EMITTED:
641
case PIPE_QUERY_PRIMITIVES_GENERATED:
642
case PIPE_QUERY_SO_STATISTICS:
643
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
644
/* NumPrimitivesWritten, PrimitiveStorageNeeded. */
645
query->result_size = 32;
646
query->num_cs_dw_begin = 6;
647
query->num_cs_dw_end = 6;
648
query->stream = index;
649
break;
650
case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
651
/* NumPrimitivesWritten, PrimitiveStorageNeeded. */
652
query->result_size = 32 * R600_MAX_STREAMS;
653
query->num_cs_dw_begin = 6 * R600_MAX_STREAMS;
654
query->num_cs_dw_end = 6 * R600_MAX_STREAMS;
655
break;
656
case PIPE_QUERY_PIPELINE_STATISTICS:
657
/* 11 values on EG, 8 on R600. */
658
query->result_size = (rscreen->chip_class >= EVERGREEN ? 11 : 8) * 16;
659
query->result_size += 8; /* for the fence + alignment */
660
query->num_cs_dw_begin = 6;
661
query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
662
break;
663
default:
664
assert(0);
665
FREE(query);
666
return NULL;
667
}
668
669
if (!r600_query_hw_init(rscreen, query)) {
670
FREE(query);
671
return NULL;
672
}
673
674
return (struct pipe_query *)query;
675
}
676
677
static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
678
unsigned type, int diff)
679
{
680
if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
681
type == PIPE_QUERY_OCCLUSION_PREDICATE ||
682
type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
683
bool old_enable = rctx->num_occlusion_queries != 0;
684
bool old_perfect_enable =
685
rctx->num_perfect_occlusion_queries != 0;
686
bool enable, perfect_enable;
687
688
rctx->num_occlusion_queries += diff;
689
assert(rctx->num_occlusion_queries >= 0);
690
691
if (type != PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
692
rctx->num_perfect_occlusion_queries += diff;
693
assert(rctx->num_perfect_occlusion_queries >= 0);
694
}
695
696
enable = rctx->num_occlusion_queries != 0;
697
perfect_enable = rctx->num_perfect_occlusion_queries != 0;
698
699
if (enable != old_enable || perfect_enable != old_perfect_enable) {
700
struct r600_context *ctx = (struct r600_context*)rctx;
701
r600_mark_atom_dirty(ctx, &ctx->db_misc_state.atom);
702
}
703
}
704
}
705
706
static unsigned event_type_for_stream(unsigned stream)
707
{
708
switch (stream) {
709
default:
710
case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
711
case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
712
case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
713
case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
714
}
715
}
716
717
static void emit_sample_streamout(struct radeon_cmdbuf *cs, uint64_t va,
718
unsigned stream)
719
{
720
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
721
radeon_emit(cs, EVENT_TYPE(event_type_for_stream(stream)) | EVENT_INDEX(3));
722
radeon_emit(cs, va);
723
radeon_emit(cs, va >> 32);
724
}
725
726
static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
727
struct r600_query_hw *query,
728
struct r600_resource *buffer,
729
uint64_t va)
730
{
731
struct radeon_cmdbuf *cs = &ctx->gfx.cs;
732
733
switch (query->b.type) {
734
case PIPE_QUERY_OCCLUSION_COUNTER:
735
case PIPE_QUERY_OCCLUSION_PREDICATE:
736
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
737
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
738
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
739
radeon_emit(cs, va);
740
radeon_emit(cs, va >> 32);
741
break;
742
case PIPE_QUERY_PRIMITIVES_EMITTED:
743
case PIPE_QUERY_PRIMITIVES_GENERATED:
744
case PIPE_QUERY_SO_STATISTICS:
745
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
746
emit_sample_streamout(cs, va, query->stream);
747
break;
748
case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
749
for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream)
750
emit_sample_streamout(cs, va + 32 * stream, stream);
751
break;
752
case PIPE_QUERY_TIME_ELAPSED:
753
/* Write the timestamp after the last draw is done.
754
* (bottom-of-pipe)
755
*/
756
r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
757
0, EOP_DATA_SEL_TIMESTAMP,
758
NULL, va, 0, query->b.type);
759
break;
760
case PIPE_QUERY_PIPELINE_STATISTICS:
761
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
762
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
763
radeon_emit(cs, va);
764
radeon_emit(cs, va >> 32);
765
break;
766
default:
767
assert(0);
768
}
769
r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
770
RADEON_PRIO_QUERY);
771
}
772
773
static void r600_query_hw_emit_start(struct r600_common_context *ctx,
774
struct r600_query_hw *query)
775
{
776
uint64_t va;
777
778
if (!query->buffer.buf)
779
return; // previous buffer allocation failure
780
781
r600_update_occlusion_query_state(ctx, query->b.type, 1);
782
r600_update_prims_generated_query_state(ctx, query->b.type, 1);
783
784
ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
785
true);
786
787
/* Get a new query buffer if needed. */
788
if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
789
struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
790
*qbuf = query->buffer;
791
query->buffer.results_end = 0;
792
query->buffer.previous = qbuf;
793
query->buffer.buf = r600_new_query_buffer(ctx->screen, query);
794
if (!query->buffer.buf)
795
return;
796
}
797
798
/* emit begin query */
799
va = query->buffer.buf->gpu_address + query->buffer.results_end;
800
801
query->ops->emit_start(ctx, query, query->buffer.buf, va);
802
803
ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
804
}
805
806
static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
807
struct r600_query_hw *query,
808
struct r600_resource *buffer,
809
uint64_t va)
810
{
811
struct radeon_cmdbuf *cs = &ctx->gfx.cs;
812
uint64_t fence_va = 0;
813
814
switch (query->b.type) {
815
case PIPE_QUERY_OCCLUSION_COUNTER:
816
case PIPE_QUERY_OCCLUSION_PREDICATE:
817
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
818
va += 8;
819
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
820
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
821
radeon_emit(cs, va);
822
radeon_emit(cs, va >> 32);
823
824
fence_va = va + ctx->screen->info.max_render_backends * 16 - 8;
825
break;
826
case PIPE_QUERY_PRIMITIVES_EMITTED:
827
case PIPE_QUERY_PRIMITIVES_GENERATED:
828
case PIPE_QUERY_SO_STATISTICS:
829
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
830
va += 16;
831
emit_sample_streamout(cs, va, query->stream);
832
break;
833
case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
834
va += 16;
835
for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream)
836
emit_sample_streamout(cs, va + 32 * stream, stream);
837
break;
838
case PIPE_QUERY_TIME_ELAPSED:
839
va += 8;
840
FALLTHROUGH;
841
case PIPE_QUERY_TIMESTAMP:
842
r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
843
0, EOP_DATA_SEL_TIMESTAMP, NULL, va,
844
0, query->b.type);
845
fence_va = va + 8;
846
break;
847
case PIPE_QUERY_PIPELINE_STATISTICS: {
848
unsigned sample_size = (query->result_size - 8) / 2;
849
850
va += sample_size;
851
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
852
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
853
radeon_emit(cs, va);
854
radeon_emit(cs, va >> 32);
855
856
fence_va = va + sample_size;
857
break;
858
}
859
default:
860
assert(0);
861
}
862
r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
863
RADEON_PRIO_QUERY);
864
865
if (fence_va)
866
r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0,
867
EOP_DATA_SEL_VALUE_32BIT,
868
query->buffer.buf, fence_va, 0x80000000,
869
query->b.type);
870
}
871
872
static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
873
struct r600_query_hw *query)
874
{
875
uint64_t va;
876
877
if (!query->buffer.buf)
878
return; // previous buffer allocation failure
879
880
/* The queries which need begin already called this in begin_query. */
881
if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
882
ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
883
}
884
885
/* emit end query */
886
va = query->buffer.buf->gpu_address + query->buffer.results_end;
887
888
query->ops->emit_stop(ctx, query, query->buffer.buf, va);
889
890
query->buffer.results_end += query->result_size;
891
892
if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
893
ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
894
895
r600_update_occlusion_query_state(ctx, query->b.type, -1);
896
r600_update_prims_generated_query_state(ctx, query->b.type, -1);
897
}
898
899
static void emit_set_predicate(struct r600_common_context *ctx,
900
struct r600_resource *buf, uint64_t va,
901
uint32_t op)
902
{
903
struct radeon_cmdbuf *cs = &ctx->gfx.cs;
904
905
radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
906
radeon_emit(cs, va);
907
radeon_emit(cs, op | ((va >> 32) & 0xFF));
908
r600_emit_reloc(ctx, &ctx->gfx, buf, RADEON_USAGE_READ,
909
RADEON_PRIO_QUERY);
910
}
911
912
static void r600_emit_query_predication(struct r600_common_context *ctx,
913
struct r600_atom *atom)
914
{
915
struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
916
struct r600_query_buffer *qbuf;
917
uint32_t op;
918
bool flag_wait, invert;
919
920
if (!query)
921
return;
922
923
invert = ctx->render_cond_invert;
924
flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
925
ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
926
927
switch (query->b.type) {
928
case PIPE_QUERY_OCCLUSION_COUNTER:
929
case PIPE_QUERY_OCCLUSION_PREDICATE:
930
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
931
op = PRED_OP(PREDICATION_OP_ZPASS);
932
break;
933
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
934
case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
935
op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
936
invert = !invert;
937
break;
938
default:
939
assert(0);
940
return;
941
}
942
943
/* if true then invert, see GL_ARB_conditional_render_inverted */
944
if (invert)
945
op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visible or overflow */
946
else
947
op |= PREDICATION_DRAW_VISIBLE; /* Draw if visible or no overflow */
948
949
op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
950
951
/* emit predicate packets for all data blocks */
952
for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
953
unsigned results_base = 0;
954
uint64_t va_base = qbuf->buf->gpu_address;
955
956
while (results_base < qbuf->results_end) {
957
uint64_t va = va_base + results_base;
958
959
if (query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE) {
960
for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream) {
961
emit_set_predicate(ctx, qbuf->buf, va + 32 * stream, op);
962
963
/* set CONTINUE bit for all packets except the first */
964
op |= PREDICATION_CONTINUE;
965
}
966
} else {
967
emit_set_predicate(ctx, qbuf->buf, va, op);
968
op |= PREDICATION_CONTINUE;
969
}
970
971
results_base += query->result_size;
972
}
973
}
974
}
975
976
static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
977
{
978
struct r600_common_screen *rscreen =
979
(struct r600_common_screen *)ctx->screen;
980
981
if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
982
query_type == PIPE_QUERY_GPU_FINISHED ||
983
query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
984
return r600_query_sw_create(query_type);
985
986
return r600_query_hw_create(rscreen, query_type, index);
987
}
988
989
static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
990
{
991
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
992
struct r600_query *rquery = (struct r600_query *)query;
993
994
rquery->ops->destroy(rctx->screen, rquery);
995
}
996
997
static bool r600_begin_query(struct pipe_context *ctx,
998
struct pipe_query *query)
999
{
1000
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1001
struct r600_query *rquery = (struct r600_query *)query;
1002
1003
return rquery->ops->begin(rctx, rquery);
1004
}
1005
1006
void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
1007
struct r600_query_hw *query)
1008
{
1009
struct r600_query_buffer *prev = query->buffer.previous;
1010
1011
/* Discard the old query buffers. */
1012
while (prev) {
1013
struct r600_query_buffer *qbuf = prev;
1014
prev = prev->previous;
1015
r600_resource_reference(&qbuf->buf, NULL);
1016
FREE(qbuf);
1017
}
1018
1019
query->buffer.results_end = 0;
1020
query->buffer.previous = NULL;
1021
1022
/* Obtain a new buffer if the current one can't be mapped without a stall. */
1023
if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
1024
!rctx->ws->buffer_wait(rctx->ws, query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
1025
r600_resource_reference(&query->buffer.buf, NULL);
1026
query->buffer.buf = r600_new_query_buffer(rctx->screen, query);
1027
} else {
1028
if (!query->ops->prepare_buffer(rctx->screen, query, query->buffer.buf))
1029
r600_resource_reference(&query->buffer.buf, NULL);
1030
}
1031
}
1032
1033
bool r600_query_hw_begin(struct r600_common_context *rctx,
1034
struct r600_query *rquery)
1035
{
1036
struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1037
1038
if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
1039
assert(0);
1040
return false;
1041
}
1042
1043
if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
1044
r600_query_hw_reset_buffers(rctx, query);
1045
1046
r600_query_hw_emit_start(rctx, query);
1047
if (!query->buffer.buf)
1048
return false;
1049
1050
list_addtail(&query->list, &rctx->active_queries);
1051
return true;
1052
}
1053
1054
static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
1055
{
1056
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1057
struct r600_query *rquery = (struct r600_query *)query;
1058
1059
return rquery->ops->end(rctx, rquery);
1060
}
1061
1062
bool r600_query_hw_end(struct r600_common_context *rctx,
1063
struct r600_query *rquery)
1064
{
1065
struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1066
1067
if (query->flags & R600_QUERY_HW_FLAG_NO_START)
1068
r600_query_hw_reset_buffers(rctx, query);
1069
1070
r600_query_hw_emit_stop(rctx, query);
1071
1072
if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
1073
list_delinit(&query->list);
1074
1075
if (!query->buffer.buf)
1076
return false;
1077
1078
return true;
1079
}
1080
1081
static void r600_get_hw_query_params(struct r600_common_context *rctx,
1082
struct r600_query_hw *rquery, int index,
1083
struct r600_hw_query_params *params)
1084
{
1085
unsigned max_rbs = rctx->screen->info.max_render_backends;
1086
1087
params->pair_stride = 0;
1088
params->pair_count = 1;
1089
1090
switch (rquery->b.type) {
1091
case PIPE_QUERY_OCCLUSION_COUNTER:
1092
case PIPE_QUERY_OCCLUSION_PREDICATE:
1093
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
1094
params->start_offset = 0;
1095
params->end_offset = 8;
1096
params->fence_offset = max_rbs * 16;
1097
params->pair_stride = 16;
1098
params->pair_count = max_rbs;
1099
break;
1100
case PIPE_QUERY_TIME_ELAPSED:
1101
params->start_offset = 0;
1102
params->end_offset = 8;
1103
params->fence_offset = 16;
1104
break;
1105
case PIPE_QUERY_TIMESTAMP:
1106
params->start_offset = 0;
1107
params->end_offset = 0;
1108
params->fence_offset = 8;
1109
break;
1110
case PIPE_QUERY_PRIMITIVES_EMITTED:
1111
params->start_offset = 8;
1112
params->end_offset = 24;
1113
params->fence_offset = params->end_offset + 4;
1114
break;
1115
case PIPE_QUERY_PRIMITIVES_GENERATED:
1116
params->start_offset = 0;
1117
params->end_offset = 16;
1118
params->fence_offset = params->end_offset + 4;
1119
break;
1120
case PIPE_QUERY_SO_STATISTICS:
1121
params->start_offset = 8 - index * 8;
1122
params->end_offset = 24 - index * 8;
1123
params->fence_offset = params->end_offset + 4;
1124
break;
1125
case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
1126
params->pair_count = R600_MAX_STREAMS;
1127
params->pair_stride = 32;
1128
FALLTHROUGH;
1129
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1130
params->start_offset = 0;
1131
params->end_offset = 16;
1132
1133
/* We can re-use the high dword of the last 64-bit value as a
1134
* fence: it is initialized as 0, and the high bit is set by
1135
* the write of the streamout stats event.
1136
*/
1137
params->fence_offset = rquery->result_size - 4;
1138
break;
1139
case PIPE_QUERY_PIPELINE_STATISTICS:
1140
{
1141
/* Offsets apply to EG+ */
1142
static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1143
params->start_offset = offsets[index];
1144
params->end_offset = 88 + offsets[index];
1145
params->fence_offset = 2 * 88;
1146
break;
1147
}
1148
default:
1149
unreachable("r600_get_hw_query_params unsupported");
1150
}
1151
}
1152
1153
static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
1154
bool test_status_bit)
1155
{
1156
uint32_t *current_result = (uint32_t*)map;
1157
uint64_t start, end;
1158
1159
start = (uint64_t)current_result[start_index] |
1160
(uint64_t)current_result[start_index+1] << 32;
1161
end = (uint64_t)current_result[end_index] |
1162
(uint64_t)current_result[end_index+1] << 32;
1163
1164
if (!test_status_bit ||
1165
((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
1166
return end - start;
1167
}
1168
return 0;
1169
}
1170
1171
static void r600_query_hw_add_result(struct r600_common_screen *rscreen,
1172
struct r600_query_hw *query,
1173
void *buffer,
1174
union pipe_query_result *result)
1175
{
1176
unsigned max_rbs = rscreen->info.max_render_backends;
1177
1178
switch (query->b.type) {
1179
case PIPE_QUERY_OCCLUSION_COUNTER: {
1180
for (unsigned i = 0; i < max_rbs; ++i) {
1181
unsigned results_base = i * 16;
1182
result->u64 +=
1183
r600_query_read_result(buffer + results_base, 0, 2, true);
1184
}
1185
break;
1186
}
1187
case PIPE_QUERY_OCCLUSION_PREDICATE:
1188
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: {
1189
for (unsigned i = 0; i < max_rbs; ++i) {
1190
unsigned results_base = i * 16;
1191
result->b = result->b ||
1192
r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
1193
}
1194
break;
1195
}
1196
case PIPE_QUERY_TIME_ELAPSED:
1197
result->u64 += r600_query_read_result(buffer, 0, 2, false);
1198
break;
1199
case PIPE_QUERY_TIMESTAMP:
1200
result->u64 = *(uint64_t*)buffer;
1201
break;
1202
case PIPE_QUERY_PRIMITIVES_EMITTED:
1203
/* SAMPLE_STREAMOUTSTATS stores this structure:
1204
* {
1205
* u64 NumPrimitivesWritten;
1206
* u64 PrimitiveStorageNeeded;
1207
* }
1208
* We only need NumPrimitivesWritten here. */
1209
result->u64 += r600_query_read_result(buffer, 2, 6, true);
1210
break;
1211
case PIPE_QUERY_PRIMITIVES_GENERATED:
1212
/* Here we read PrimitiveStorageNeeded. */
1213
result->u64 += r600_query_read_result(buffer, 0, 4, true);
1214
break;
1215
case PIPE_QUERY_SO_STATISTICS:
1216
result->so_statistics.num_primitives_written +=
1217
r600_query_read_result(buffer, 2, 6, true);
1218
result->so_statistics.primitives_storage_needed +=
1219
r600_query_read_result(buffer, 0, 4, true);
1220
break;
1221
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1222
result->b = result->b ||
1223
r600_query_read_result(buffer, 2, 6, true) !=
1224
r600_query_read_result(buffer, 0, 4, true);
1225
break;
1226
case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
1227
for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream) {
1228
result->b = result->b ||
1229
r600_query_read_result(buffer, 2, 6, true) !=
1230
r600_query_read_result(buffer, 0, 4, true);
1231
buffer = (char *)buffer + 32;
1232
}
1233
break;
1234
case PIPE_QUERY_PIPELINE_STATISTICS:
1235
if (rscreen->chip_class >= EVERGREEN) {
1236
result->pipeline_statistics.ps_invocations +=
1237
r600_query_read_result(buffer, 0, 22, false);
1238
result->pipeline_statistics.c_primitives +=
1239
r600_query_read_result(buffer, 2, 24, false);
1240
result->pipeline_statistics.c_invocations +=
1241
r600_query_read_result(buffer, 4, 26, false);
1242
result->pipeline_statistics.vs_invocations +=
1243
r600_query_read_result(buffer, 6, 28, false);
1244
result->pipeline_statistics.gs_invocations +=
1245
r600_query_read_result(buffer, 8, 30, false);
1246
result->pipeline_statistics.gs_primitives +=
1247
r600_query_read_result(buffer, 10, 32, false);
1248
result->pipeline_statistics.ia_primitives +=
1249
r600_query_read_result(buffer, 12, 34, false);
1250
result->pipeline_statistics.ia_vertices +=
1251
r600_query_read_result(buffer, 14, 36, false);
1252
result->pipeline_statistics.hs_invocations +=
1253
r600_query_read_result(buffer, 16, 38, false);
1254
result->pipeline_statistics.ds_invocations +=
1255
r600_query_read_result(buffer, 18, 40, false);
1256
result->pipeline_statistics.cs_invocations +=
1257
r600_query_read_result(buffer, 20, 42, false);
1258
} else {
1259
result->pipeline_statistics.ps_invocations +=
1260
r600_query_read_result(buffer, 0, 16, false);
1261
result->pipeline_statistics.c_primitives +=
1262
r600_query_read_result(buffer, 2, 18, false);
1263
result->pipeline_statistics.c_invocations +=
1264
r600_query_read_result(buffer, 4, 20, false);
1265
result->pipeline_statistics.vs_invocations +=
1266
r600_query_read_result(buffer, 6, 22, false);
1267
result->pipeline_statistics.gs_invocations +=
1268
r600_query_read_result(buffer, 8, 24, false);
1269
result->pipeline_statistics.gs_primitives +=
1270
r600_query_read_result(buffer, 10, 26, false);
1271
result->pipeline_statistics.ia_primitives +=
1272
r600_query_read_result(buffer, 12, 28, false);
1273
result->pipeline_statistics.ia_vertices +=
1274
r600_query_read_result(buffer, 14, 30, false);
1275
}
1276
#if 0 /* for testing */
1277
printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1278
"DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1279
"Clipper prims=%llu, PS=%llu, CS=%llu\n",
1280
result->pipeline_statistics.ia_vertices,
1281
result->pipeline_statistics.ia_primitives,
1282
result->pipeline_statistics.vs_invocations,
1283
result->pipeline_statistics.hs_invocations,
1284
result->pipeline_statistics.ds_invocations,
1285
result->pipeline_statistics.gs_invocations,
1286
result->pipeline_statistics.gs_primitives,
1287
result->pipeline_statistics.c_invocations,
1288
result->pipeline_statistics.c_primitives,
1289
result->pipeline_statistics.ps_invocations,
1290
result->pipeline_statistics.cs_invocations);
1291
#endif
1292
break;
1293
default:
1294
assert(0);
1295
}
1296
}
1297
1298
static bool r600_get_query_result(struct pipe_context *ctx,
1299
struct pipe_query *query, bool wait,
1300
union pipe_query_result *result)
1301
{
1302
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1303
struct r600_query *rquery = (struct r600_query *)query;
1304
1305
return rquery->ops->get_result(rctx, rquery, wait, result);
1306
}
1307
1308
static void r600_get_query_result_resource(struct pipe_context *ctx,
1309
struct pipe_query *query,
1310
bool wait,
1311
enum pipe_query_value_type result_type,
1312
int index,
1313
struct pipe_resource *resource,
1314
unsigned offset)
1315
{
1316
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1317
struct r600_query *rquery = (struct r600_query *)query;
1318
1319
rquery->ops->get_result_resource(rctx, rquery, wait, result_type, index,
1320
resource, offset);
1321
}
1322
1323
static void r600_query_hw_clear_result(struct r600_query_hw *query,
1324
union pipe_query_result *result)
1325
{
1326
util_query_clear_result(result, query->b.type);
1327
}
1328
1329
bool r600_query_hw_get_result(struct r600_common_context *rctx,
1330
struct r600_query *rquery,
1331
bool wait, union pipe_query_result *result)
1332
{
1333
struct r600_common_screen *rscreen = rctx->screen;
1334
struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1335
struct r600_query_buffer *qbuf;
1336
1337
query->ops->clear_result(query, result);
1338
1339
for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1340
unsigned usage = PIPE_MAP_READ |
1341
(wait ? 0 : PIPE_MAP_DONTBLOCK);
1342
unsigned results_base = 0;
1343
void *map;
1344
1345
if (rquery->b.flushed)
1346
map = rctx->ws->buffer_map(rctx->ws, qbuf->buf->buf, NULL, usage);
1347
else
1348
map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf, usage);
1349
1350
if (!map)
1351
return false;
1352
1353
while (results_base != qbuf->results_end) {
1354
query->ops->add_result(rscreen, query, map + results_base,
1355
result);
1356
results_base += query->result_size;
1357
}
1358
}
1359
1360
/* Convert the time to expected units. */
1361
if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
1362
rquery->type == PIPE_QUERY_TIMESTAMP) {
1363
result->u64 = (1000000 * result->u64) / rscreen->info.clock_crystal_freq;
1364
}
1365
return true;
1366
}
1367
1368
/* Create the compute shader that is used to collect the results.
1369
*
1370
* One compute grid with a single thread is launched for every query result
1371
* buffer. The thread (optionally) reads a previous summary buffer, then
1372
* accumulates data from the query result buffer, and writes the result either
1373
* to a summary buffer to be consumed by the next grid invocation or to the
1374
* user-supplied buffer.
1375
*
1376
* Data layout:
1377
*
1378
* CONST
1379
* 0.x = end_offset
1380
* 0.y = result_stride
1381
* 0.z = result_count
1382
* 0.w = bit field:
1383
* 1: read previously accumulated values
1384
* 2: write accumulated values for chaining
1385
* 4: write result available
1386
* 8: convert result to boolean (0/1)
1387
* 16: only read one dword and use that as result
1388
* 32: apply timestamp conversion
1389
* 64: store full 64 bits result
1390
* 128: store signed 32 bits result
1391
* 256: SO_OVERFLOW mode: take the difference of two successive half-pairs
1392
* 1.x = fence_offset
1393
* 1.y = pair_stride
1394
* 1.z = pair_count
1395
* 1.w = result_offset
1396
* 2.x = buffer0 offset
1397
*
1398
* BUFFER[0] = query result buffer
1399
* BUFFER[1] = previous summary buffer
1400
* BUFFER[2] = next summary buffer or user-supplied buffer
1401
*/
1402
static void r600_create_query_result_shader(struct r600_common_context *rctx)
1403
{
1404
/* TEMP[0].xy = accumulated result so far
1405
* TEMP[0].z = result not available
1406
*
1407
* TEMP[1].x = current result index
1408
* TEMP[1].y = current pair index
1409
*/
1410
static const char text_tmpl[] =
1411
"COMP\n"
1412
"PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
1413
"PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
1414
"PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
1415
"DCL BUFFER[0]\n"
1416
"DCL BUFFER[1]\n"
1417
"DCL BUFFER[2]\n"
1418
"DCL CONST[0][0..2]\n"
1419
"DCL TEMP[0..5]\n"
1420
"IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
1421
"IMM[1] UINT32 {1, 2, 4, 8}\n"
1422
"IMM[2] UINT32 {16, 32, 64, 128}\n"
1423
"IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
1424
"IMM[4] UINT32 {256, 0, 0, 0}\n"
1425
1426
"AND TEMP[5], CONST[0][0].wwww, IMM[2].xxxx\n"
1427
"UIF TEMP[5]\n"
1428
/* Check result availability. */
1429
"UADD TEMP[1].x, CONST[0][1].xxxx, CONST[0][2].xxxx\n"
1430
"LOAD TEMP[1].x, BUFFER[0], TEMP[1].xxxx\n"
1431
"ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
1432
"MOV TEMP[1], TEMP[0].zzzz\n"
1433
"NOT TEMP[0].z, TEMP[0].zzzz\n"
1434
1435
/* Load result if available. */
1436
"UIF TEMP[1]\n"
1437
"UADD TEMP[0].x, IMM[0].xxxx, CONST[0][2].xxxx\n"
1438
"LOAD TEMP[0].xy, BUFFER[0], TEMP[0].xxxx\n"
1439
"ENDIF\n"
1440
"ELSE\n"
1441
/* Load previously accumulated result if requested. */
1442
"MOV TEMP[0], IMM[0].xxxx\n"
1443
"AND TEMP[4], CONST[0][0].wwww, IMM[1].xxxx\n"
1444
"UIF TEMP[4]\n"
1445
"LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
1446
"ENDIF\n"
1447
1448
"MOV TEMP[1].x, IMM[0].xxxx\n"
1449
"BGNLOOP\n"
1450
/* Break if accumulated result so far is not available. */
1451
"UIF TEMP[0].zzzz\n"
1452
"BRK\n"
1453
"ENDIF\n"
1454
1455
/* Break if result_index >= result_count. */
1456
"USGE TEMP[5], TEMP[1].xxxx, CONST[0][0].zzzz\n"
1457
"UIF TEMP[5]\n"
1458
"BRK\n"
1459
"ENDIF\n"
1460
1461
/* Load fence and check result availability */
1462
"UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy, CONST[0][1].xxxx\n"
1463
"UADD TEMP[5].x, TEMP[5].xxxx, CONST[0][2].xxxx\n"
1464
"LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
1465
"ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
1466
"NOT TEMP[0].z, TEMP[0].zzzz\n"
1467
"UIF TEMP[0].zzzz\n"
1468
"BRK\n"
1469
"ENDIF\n"
1470
1471
"MOV TEMP[1].y, IMM[0].xxxx\n"
1472
"BGNLOOP\n"
1473
/* Load start and end. */
1474
"UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy\n"
1475
"UMAD TEMP[5].x, TEMP[1].yyyy, CONST[0][1].yyyy, TEMP[5].xxxx\n"
1476
"UADD TEMP[5].x, TEMP[5].xxxx, CONST[0][2].xxxx\n"
1477
"LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1478
1479
"UADD TEMP[5].y, TEMP[5].xxxx, CONST[0][0].xxxx\n"
1480
"LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
1481
1482
"U64ADD TEMP[4].xy, TEMP[3], -TEMP[2]\n"
1483
1484
"AND TEMP[5].z, CONST[0][0].wwww, IMM[4].xxxx\n"
1485
"UIF TEMP[5].zzzz\n"
1486
/* Load second start/end half-pair and
1487
* take the difference
1488
*/
1489
"UADD TEMP[5].xy, TEMP[5], IMM[1].wwww\n"
1490
"LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1491
"LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
1492
1493
"U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
1494
"U64ADD TEMP[4].xy, TEMP[4], -TEMP[3]\n"
1495
"ENDIF\n"
1496
1497
"U64ADD TEMP[0].xy, TEMP[0], TEMP[4]\n"
1498
1499
/* Increment pair index */
1500
"UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
1501
"USGE TEMP[5], TEMP[1].yyyy, CONST[0][1].zzzz\n"
1502
"UIF TEMP[5]\n"
1503
"BRK\n"
1504
"ENDIF\n"
1505
"ENDLOOP\n"
1506
1507
/* Increment result index */
1508
"UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
1509
"ENDLOOP\n"
1510
"ENDIF\n"
1511
1512
"AND TEMP[4], CONST[0][0].wwww, IMM[1].yyyy\n"
1513
"UIF TEMP[4]\n"
1514
/* Store accumulated data for chaining. */
1515
"STORE BUFFER[2].xyz, CONST[0][1].wwww, TEMP[0]\n"
1516
"ELSE\n"
1517
"AND TEMP[4], CONST[0][0].wwww, IMM[1].zzzz\n"
1518
"UIF TEMP[4]\n"
1519
/* Store result availability. */
1520
"NOT TEMP[0].z, TEMP[0]\n"
1521
"AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
1522
"STORE BUFFER[2].x, CONST[0][1].wwww, TEMP[0].zzzz\n"
1523
1524
"AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
1525
"UIF TEMP[4]\n"
1526
"STORE BUFFER[2].y, CONST[0][1].wwww, IMM[0].xxxx\n"
1527
"ENDIF\n"
1528
"ELSE\n"
1529
/* Store result if it is available. */
1530
"NOT TEMP[4], TEMP[0].zzzz\n"
1531
"UIF TEMP[4]\n"
1532
/* Apply timestamp conversion */
1533
"AND TEMP[4], CONST[0][0].wwww, IMM[2].yyyy\n"
1534
"UIF TEMP[4]\n"
1535
"U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
1536
"U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
1537
"ENDIF\n"
1538
1539
/* Convert to boolean */
1540
"AND TEMP[4], CONST[0][0].wwww, IMM[1].wwww\n"
1541
"UIF TEMP[4]\n"
1542
"U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[4].zwzw\n"
1543
"AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
1544
"MOV TEMP[0].y, IMM[0].xxxx\n"
1545
"ENDIF\n"
1546
1547
"AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
1548
"UIF TEMP[4]\n"
1549
"STORE BUFFER[2].xy, CONST[0][1].wwww, TEMP[0].xyxy\n"
1550
"ELSE\n"
1551
/* Clamping */
1552
"UIF TEMP[0].yyyy\n"
1553
"MOV TEMP[0].x, IMM[0].wwww\n"
1554
"ENDIF\n"
1555
1556
"AND TEMP[4], CONST[0][0].wwww, IMM[2].wwww\n"
1557
"UIF TEMP[4]\n"
1558
"UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
1559
"ENDIF\n"
1560
1561
"STORE BUFFER[2].x, CONST[0][1].wwww, TEMP[0].xxxx\n"
1562
"ENDIF\n"
1563
"ENDIF\n"
1564
"ENDIF\n"
1565
"ENDIF\n"
1566
1567
"END\n";
1568
1569
char text[sizeof(text_tmpl) + 32];
1570
struct tgsi_token tokens[1024];
1571
struct pipe_compute_state state = {};
1572
1573
/* Hard code the frequency into the shader so that the backend can
1574
* use the full range of optimizations for divide-by-constant.
1575
*/
1576
snprintf(text, sizeof(text), text_tmpl,
1577
rctx->screen->info.clock_crystal_freq);
1578
1579
if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
1580
assert(false);
1581
return;
1582
}
1583
1584
state.ir_type = PIPE_SHADER_IR_TGSI;
1585
state.prog = tokens;
1586
1587
rctx->query_result_shader = rctx->b.create_compute_state(&rctx->b, &state);
1588
}
1589
1590
static void r600_restore_qbo_state(struct r600_common_context *rctx,
1591
struct r600_qbo_state *st)
1592
{
1593
rctx->b.bind_compute_state(&rctx->b, st->saved_compute);
1594
rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, true, &st->saved_const0);
1595
rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo, ~0);
1596
for (unsigned i = 0; i < 3; ++i)
1597
pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
1598
}
1599
1600
static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
1601
struct r600_query *rquery,
1602
bool wait,
1603
enum pipe_query_value_type result_type,
1604
int index,
1605
struct pipe_resource *resource,
1606
unsigned offset)
1607
{
1608
struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1609
struct r600_query_buffer *qbuf;
1610
struct r600_query_buffer *qbuf_prev;
1611
struct pipe_resource *tmp_buffer = NULL;
1612
unsigned tmp_buffer_offset = 0;
1613
struct r600_qbo_state saved_state = {};
1614
struct pipe_grid_info grid = {};
1615
struct pipe_constant_buffer constant_buffer = {};
1616
struct pipe_shader_buffer ssbo[3];
1617
struct r600_hw_query_params params;
1618
struct {
1619
uint32_t end_offset;
1620
uint32_t result_stride;
1621
uint32_t result_count;
1622
uint32_t config;
1623
uint32_t fence_offset;
1624
uint32_t pair_stride;
1625
uint32_t pair_count;
1626
uint32_t buffer_offset;
1627
uint32_t buffer0_offset;
1628
} consts;
1629
1630
if (!rctx->query_result_shader) {
1631
r600_create_query_result_shader(rctx);
1632
if (!rctx->query_result_shader)
1633
return;
1634
}
1635
1636
if (query->buffer.previous) {
1637
u_suballocator_alloc(&rctx->allocator_zeroed_memory, 16, 256,
1638
&tmp_buffer_offset, &tmp_buffer);
1639
if (!tmp_buffer)
1640
return;
1641
}
1642
1643
rctx->save_qbo_state(&rctx->b, &saved_state);
1644
1645
r600_get_hw_query_params(rctx, query, index >= 0 ? index : 0, &params);
1646
consts.end_offset = params.end_offset - params.start_offset;
1647
consts.fence_offset = params.fence_offset - params.start_offset;
1648
consts.result_stride = query->result_size;
1649
consts.pair_stride = params.pair_stride;
1650
consts.pair_count = params.pair_count;
1651
1652
constant_buffer.buffer_size = sizeof(consts);
1653
constant_buffer.user_buffer = &consts;
1654
1655
ssbo[1].buffer = tmp_buffer;
1656
ssbo[1].buffer_offset = tmp_buffer_offset;
1657
ssbo[1].buffer_size = 16;
1658
1659
ssbo[2] = ssbo[1];
1660
1661
rctx->b.bind_compute_state(&rctx->b, rctx->query_result_shader);
1662
1663
grid.block[0] = 1;
1664
grid.block[1] = 1;
1665
grid.block[2] = 1;
1666
grid.grid[0] = 1;
1667
grid.grid[1] = 1;
1668
grid.grid[2] = 1;
1669
1670
consts.config = 0;
1671
if (index < 0)
1672
consts.config |= 4;
1673
if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1674
query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE)
1675
consts.config |= 8;
1676
else if (query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
1677
query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
1678
consts.config |= 8 | 256;
1679
else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
1680
query->b.type == PIPE_QUERY_TIME_ELAPSED)
1681
consts.config |= 32;
1682
1683
switch (result_type) {
1684
case PIPE_QUERY_TYPE_U64:
1685
case PIPE_QUERY_TYPE_I64:
1686
consts.config |= 64;
1687
break;
1688
case PIPE_QUERY_TYPE_I32:
1689
consts.config |= 128;
1690
break;
1691
case PIPE_QUERY_TYPE_U32:
1692
break;
1693
}
1694
1695
rctx->flags |= rctx->screen->barrier_flags.cp_to_L2;
1696
1697
for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
1698
if (query->b.type != PIPE_QUERY_TIMESTAMP) {
1699
qbuf_prev = qbuf->previous;
1700
consts.result_count = qbuf->results_end / query->result_size;
1701
consts.config &= ~3;
1702
if (qbuf != &query->buffer)
1703
consts.config |= 1;
1704
if (qbuf->previous)
1705
consts.config |= 2;
1706
} else {
1707
/* Only read the last timestamp. */
1708
qbuf_prev = NULL;
1709
consts.result_count = 0;
1710
consts.config |= 16;
1711
params.start_offset += qbuf->results_end - query->result_size;
1712
}
1713
1714
ssbo[0].buffer = &qbuf->buf->b.b;
1715
ssbo[0].buffer_offset = params.start_offset & ~0xff;
1716
ssbo[0].buffer_size = qbuf->results_end - ssbo[0].buffer_offset;
1717
consts.buffer0_offset = (params.start_offset & 0xff);
1718
if (!qbuf->previous) {
1719
1720
ssbo[2].buffer = resource;
1721
ssbo[2].buffer_offset = offset & ~0xff;
1722
ssbo[2].buffer_size = offset + 8;
1723
consts.buffer_offset = (offset & 0xff);
1724
} else
1725
consts.buffer_offset = 0;
1726
1727
rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, false, &constant_buffer);
1728
1729
rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo, ~0);
1730
1731
if (wait && qbuf == &query->buffer) {
1732
uint64_t va;
1733
1734
/* Wait for result availability. Wait only for readiness
1735
* of the last entry, since the fence writes should be
1736
* serialized in the CP.
1737
*/
1738
va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
1739
va += params.fence_offset;
1740
1741
r600_gfx_wait_fence(rctx, qbuf->buf, va, 0x80000000, 0x80000000);
1742
}
1743
1744
rctx->b.launch_grid(&rctx->b, &grid);
1745
rctx->flags |= rctx->screen->barrier_flags.compute_to_L2;
1746
}
1747
1748
r600_restore_qbo_state(rctx, &saved_state);
1749
pipe_resource_reference(&tmp_buffer, NULL);
1750
}
1751
1752
static void r600_render_condition(struct pipe_context *ctx,
1753
struct pipe_query *query,
1754
bool condition,
1755
enum pipe_render_cond_flag mode)
1756
{
1757
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1758
struct r600_query_hw *rquery = (struct r600_query_hw *)query;
1759
struct r600_query_buffer *qbuf;
1760
struct r600_atom *atom = &rctx->render_cond_atom;
1761
1762
/* Compute the size of SET_PREDICATION packets. */
1763
atom->num_dw = 0;
1764
if (query) {
1765
for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
1766
atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
1767
1768
if (rquery->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
1769
atom->num_dw *= R600_MAX_STREAMS;
1770
}
1771
1772
rctx->render_cond = query;
1773
rctx->render_cond_invert = condition;
1774
rctx->render_cond_mode = mode;
1775
1776
rctx->set_atom_dirty(rctx, atom, query != NULL);
1777
}
1778
1779
void r600_suspend_queries(struct r600_common_context *ctx)
1780
{
1781
struct r600_query_hw *query;
1782
1783
LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1784
r600_query_hw_emit_stop(ctx, query);
1785
}
1786
assert(ctx->num_cs_dw_queries_suspend == 0);
1787
}
1788
1789
static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1790
struct list_head *query_list)
1791
{
1792
struct r600_query_hw *query;
1793
unsigned num_dw = 0;
1794
1795
LIST_FOR_EACH_ENTRY(query, query_list, list) {
1796
/* begin + end */
1797
num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1798
1799
/* Workaround for the fact that
1800
* num_cs_dw_nontimer_queries_suspend is incremented for every
1801
* resumed query, which raises the bar in need_cs_space for
1802
* queries about to be resumed.
1803
*/
1804
num_dw += query->num_cs_dw_end;
1805
}
1806
/* primitives generated query */
1807
num_dw += ctx->streamout.enable_atom.num_dw;
1808
/* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1809
num_dw += 13;
1810
1811
return num_dw;
1812
}
1813
1814
void r600_resume_queries(struct r600_common_context *ctx)
1815
{
1816
struct r600_query_hw *query;
1817
unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1818
1819
assert(ctx->num_cs_dw_queries_suspend == 0);
1820
1821
/* Check CS space here. Resuming must not be interrupted by flushes. */
1822
ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
1823
1824
LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1825
r600_query_hw_emit_start(ctx, query);
1826
}
1827
}
1828
1829
/* Fix radeon_info::enabled_rb_mask for R600, R700, EVERGREEN, NI. */
1830
void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen)
1831
{
1832
struct r600_common_context *ctx =
1833
(struct r600_common_context*)rscreen->aux_context;
1834
struct radeon_cmdbuf *cs = &ctx->gfx.cs;
1835
struct r600_resource *buffer;
1836
uint32_t *results;
1837
unsigned i, mask = 0;
1838
unsigned max_rbs;
1839
1840
if (ctx->family == CHIP_JUNIPER) {
1841
/*
1842
* Fix for predication lockups - the chip can only ever have
1843
* 4 RBs, however it looks like the predication logic assumes
1844
* there's 8, trying to read results from query buffers never
1845
* written to. By increasing this number we'll write the
1846
* status bit for these as per the normal disabled rb logic.
1847
*/
1848
ctx->screen->info.max_render_backends = 8;
1849
}
1850
max_rbs = ctx->screen->info.max_render_backends;
1851
1852
assert(rscreen->chip_class <= CAYMAN);
1853
1854
/*
1855
* if backend_map query is supported by the kernel.
1856
* Note the kernel drm driver for a long time never filled in the
1857
* associated data on eg/cm, only on r600/r700, hence ignore the valid
1858
* bit there if the map is zero.
1859
* (Albeit some chips with just one active rb can have a valid 0 map.)
1860
*/
1861
if (rscreen->info.r600_gb_backend_map_valid &&
1862
(ctx->chip_class < EVERGREEN || rscreen->info.r600_gb_backend_map != 0)) {
1863
unsigned num_tile_pipes = rscreen->info.num_tile_pipes;
1864
unsigned backend_map = rscreen->info.r600_gb_backend_map;
1865
unsigned item_width, item_mask;
1866
1867
if (ctx->chip_class >= EVERGREEN) {
1868
item_width = 4;
1869
item_mask = 0x7;
1870
} else {
1871
item_width = 2;
1872
item_mask = 0x3;
1873
}
1874
1875
while (num_tile_pipes--) {
1876
i = backend_map & item_mask;
1877
mask |= (1<<i);
1878
backend_map >>= item_width;
1879
}
1880
if (mask != 0) {
1881
rscreen->info.enabled_rb_mask = mask;
1882
return;
1883
}
1884
}
1885
1886
/* otherwise backup path for older kernels */
1887
1888
/* create buffer for event data */
1889
buffer = (struct r600_resource*)
1890
pipe_buffer_create(ctx->b.screen, 0,
1891
PIPE_USAGE_STAGING, max_rbs * 16);
1892
if (!buffer)
1893
return;
1894
1895
/* initialize buffer with zeroes */
1896
results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_MAP_WRITE);
1897
if (results) {
1898
memset(results, 0, max_rbs * 4 * 4);
1899
1900
/* emit EVENT_WRITE for ZPASS_DONE */
1901
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1902
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1903
radeon_emit(cs, buffer->gpu_address);
1904
radeon_emit(cs, buffer->gpu_address >> 32);
1905
1906
r600_emit_reloc(ctx, &ctx->gfx, buffer,
1907
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1908
1909
/* analyze results */
1910
results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_MAP_READ);
1911
if (results) {
1912
for(i = 0; i < max_rbs; i++) {
1913
/* at least highest bit will be set if backend is used */
1914
if (results[i*4 + 1])
1915
mask |= (1<<i);
1916
}
1917
}
1918
}
1919
1920
r600_resource_reference(&buffer, NULL);
1921
1922
if (mask) {
1923
if (rscreen->debug_flags & DBG_INFO &&
1924
mask != rscreen->info.enabled_rb_mask) {
1925
printf("enabled_rb_mask (fixed) = 0x%x\n", mask);
1926
}
1927
rscreen->info.enabled_rb_mask = mask;
1928
}
1929
}
1930
1931
#define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1932
{ \
1933
.name = name_, \
1934
.query_type = R600_QUERY_##query_type_, \
1935
.type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1936
.result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1937
.group_id = group_id_ \
1938
}
1939
1940
#define X(name_, query_type_, type_, result_type_) \
1941
XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1942
1943
#define XG(group_, name_, query_type_, type_, result_type_) \
1944
XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1945
1946
static const struct pipe_driver_query_info r600_driver_query_list[] = {
1947
X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1948
X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1949
X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS, UINT64, CUMULATIVE),
1950
X("draw-calls", DRAW_CALLS, UINT64, AVERAGE),
1951
X("decompress-calls", DECOMPRESS_CALLS, UINT64, AVERAGE),
1952
X("MRT-draw-calls", MRT_DRAW_CALLS, UINT64, AVERAGE),
1953
X("prim-restart-calls", PRIM_RESTART_CALLS, UINT64, AVERAGE),
1954
X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE),
1955
X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE),
1956
X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE),
1957
X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
1958
X("cp-dma-calls", CP_DMA_CALLS, UINT64, AVERAGE),
1959
X("num-vs-flushes", NUM_VS_FLUSHES, UINT64, AVERAGE),
1960
X("num-ps-flushes", NUM_PS_FLUSHES, UINT64, AVERAGE),
1961
X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
1962
X("num-CB-cache-flushes", NUM_CB_CACHE_FLUSHES, UINT64, AVERAGE),
1963
X("num-DB-cache-flushes", NUM_DB_CACHE_FLUSHES, UINT64, AVERAGE),
1964
X("num-resident-handles", NUM_RESIDENT_HANDLES, UINT64, AVERAGE),
1965
X("tc-offloaded-slots", TC_OFFLOADED_SLOTS, UINT64, AVERAGE),
1966
X("tc-direct-slots", TC_DIRECT_SLOTS, UINT64, AVERAGE),
1967
X("tc-num-syncs", TC_NUM_SYNCS, UINT64, AVERAGE),
1968
X("CS-thread-busy", CS_THREAD_BUSY, UINT64, AVERAGE),
1969
X("gallium-thread-busy", GALLIUM_THREAD_BUSY, UINT64, AVERAGE),
1970
X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1971
X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1972
X("mapped-VRAM", MAPPED_VRAM, BYTES, AVERAGE),
1973
X("mapped-GTT", MAPPED_GTT, BYTES, AVERAGE),
1974
X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1975
X("num-mapped-buffers", NUM_MAPPED_BUFFERS, UINT64, AVERAGE),
1976
X("num-GFX-IBs", NUM_GFX_IBS, UINT64, AVERAGE),
1977
X("num-SDMA-IBs", NUM_SDMA_IBS, UINT64, AVERAGE),
1978
X("GFX-BO-list-size", GFX_BO_LIST_SIZE, UINT64, AVERAGE),
1979
X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1980
X("num-evictions", NUM_EVICTIONS, UINT64, CUMULATIVE),
1981
X("VRAM-CPU-page-faults", NUM_VRAM_CPU_PAGE_FAULTS, UINT64, CUMULATIVE),
1982
X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1983
X("VRAM-vis-usage", VRAM_VIS_USAGE, BYTES, AVERAGE),
1984
X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1985
1986
/* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1987
* which use it as a fallback path to detect the GPU type.
1988
*
1989
* Note: The names of these queries are significant for GPUPerfStudio
1990
* (and possibly their order as well). */
1991
XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1992
XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1993
XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1994
XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1995
XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1996
1997
X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1998
X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1999
X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
2000
2001
/* The following queries must be at the end of the list because their
2002
* availability is adjusted dynamically based on the DRM version. */
2003
X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
2004
X("GPU-shaders-busy", GPU_SHADERS_BUSY, UINT64, AVERAGE),
2005
X("GPU-ta-busy", GPU_TA_BUSY, UINT64, AVERAGE),
2006
X("GPU-gds-busy", GPU_GDS_BUSY, UINT64, AVERAGE),
2007
X("GPU-vgt-busy", GPU_VGT_BUSY, UINT64, AVERAGE),
2008
X("GPU-ia-busy", GPU_IA_BUSY, UINT64, AVERAGE),
2009
X("GPU-sx-busy", GPU_SX_BUSY, UINT64, AVERAGE),
2010
X("GPU-wd-busy", GPU_WD_BUSY, UINT64, AVERAGE),
2011
X("GPU-bci-busy", GPU_BCI_BUSY, UINT64, AVERAGE),
2012
X("GPU-sc-busy", GPU_SC_BUSY, UINT64, AVERAGE),
2013
X("GPU-pa-busy", GPU_PA_BUSY, UINT64, AVERAGE),
2014
X("GPU-db-busy", GPU_DB_BUSY, UINT64, AVERAGE),
2015
X("GPU-cp-busy", GPU_CP_BUSY, UINT64, AVERAGE),
2016
X("GPU-cb-busy", GPU_CB_BUSY, UINT64, AVERAGE),
2017
X("GPU-sdma-busy", GPU_SDMA_BUSY, UINT64, AVERAGE),
2018
X("GPU-pfp-busy", GPU_PFP_BUSY, UINT64, AVERAGE),
2019
X("GPU-meq-busy", GPU_MEQ_BUSY, UINT64, AVERAGE),
2020
X("GPU-me-busy", GPU_ME_BUSY, UINT64, AVERAGE),
2021
X("GPU-surf-sync-busy", GPU_SURF_SYNC_BUSY, UINT64, AVERAGE),
2022
X("GPU-cp-dma-busy", GPU_CP_DMA_BUSY, UINT64, AVERAGE),
2023
X("GPU-scratch-ram-busy", GPU_SCRATCH_RAM_BUSY, UINT64, AVERAGE),
2024
};
2025
2026
#undef X
2027
#undef XG
2028
#undef XFULL
2029
2030
static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
2031
{
2032
if (rscreen->info.drm_minor >= 42)
2033
return ARRAY_SIZE(r600_driver_query_list);
2034
else
2035
return ARRAY_SIZE(r600_driver_query_list) - 25;
2036
}
2037
2038
static int r600_get_driver_query_info(struct pipe_screen *screen,
2039
unsigned index,
2040
struct pipe_driver_query_info *info)
2041
{
2042
struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
2043
unsigned num_queries = r600_get_num_queries(rscreen);
2044
2045
if (!info) {
2046
unsigned num_perfcounters =
2047
r600_get_perfcounter_info(rscreen, 0, NULL);
2048
2049
return num_queries + num_perfcounters;
2050
}
2051
2052
if (index >= num_queries)
2053
return r600_get_perfcounter_info(rscreen, index - num_queries, info);
2054
2055
*info = r600_driver_query_list[index];
2056
2057
switch (info->query_type) {
2058
case R600_QUERY_REQUESTED_VRAM:
2059
case R600_QUERY_VRAM_USAGE:
2060
case R600_QUERY_MAPPED_VRAM:
2061
info->max_value.u64 = rscreen->info.vram_size;
2062
break;
2063
case R600_QUERY_REQUESTED_GTT:
2064
case R600_QUERY_GTT_USAGE:
2065
case R600_QUERY_MAPPED_GTT:
2066
info->max_value.u64 = rscreen->info.gart_size;
2067
break;
2068
case R600_QUERY_GPU_TEMPERATURE:
2069
info->max_value.u64 = 125;
2070
break;
2071
case R600_QUERY_VRAM_VIS_USAGE:
2072
info->max_value.u64 = rscreen->info.vram_vis_size;
2073
break;
2074
}
2075
2076
if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
2077
info->group_id += rscreen->perfcounters->num_groups;
2078
2079
return 1;
2080
}
2081
2082
/* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
2083
* performance counter groups, so be careful when changing this and related
2084
* functions.
2085
*/
2086
static int r600_get_driver_query_group_info(struct pipe_screen *screen,
2087
unsigned index,
2088
struct pipe_driver_query_group_info *info)
2089
{
2090
struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
2091
unsigned num_pc_groups = 0;
2092
2093
if (rscreen->perfcounters)
2094
num_pc_groups = rscreen->perfcounters->num_groups;
2095
2096
if (!info)
2097
return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
2098
2099
if (index < num_pc_groups)
2100
return r600_get_perfcounter_group_info(rscreen, index, info);
2101
2102
index -= num_pc_groups;
2103
if (index >= R600_NUM_SW_QUERY_GROUPS)
2104
return 0;
2105
2106
info->name = "GPIN";
2107
info->max_active_queries = 5;
2108
info->num_queries = 5;
2109
return 1;
2110
}
2111
2112
void r600_query_init(struct r600_common_context *rctx)
2113
{
2114
rctx->b.create_query = r600_create_query;
2115
rctx->b.create_batch_query = r600_create_batch_query;
2116
rctx->b.destroy_query = r600_destroy_query;
2117
rctx->b.begin_query = r600_begin_query;
2118
rctx->b.end_query = r600_end_query;
2119
rctx->b.get_query_result = r600_get_query_result;
2120
rctx->b.get_query_result_resource = r600_get_query_result_resource;
2121
rctx->render_cond_atom.emit = r600_emit_query_predication;
2122
2123
if (((struct r600_common_screen*)rctx->b.screen)->info.max_render_backends > 0)
2124
rctx->b.render_condition = r600_render_condition;
2125
2126
list_inithead(&rctx->active_queries);
2127
}
2128
2129
void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
2130
{
2131
rscreen->b.get_driver_query_info = r600_get_driver_query_info;
2132
rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
2133
}
2134
2135