Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/freedreno/computerator/a6xx.c
4564 views
1
/*
2
* Copyright © 2020 Google, Inc.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
* SOFTWARE.
22
*/
23
24
#include "ir3/ir3_compiler.h"
25
26
#include "util/u_math.h"
27
28
#include "adreno_pm4.xml.h"
29
#include "adreno_common.xml.h"
30
#include "a6xx.xml.h"
31
32
#include "ir3_asm.h"
33
#include "main.h"
34
35
struct a6xx_backend {
36
struct backend base;
37
38
struct ir3_compiler *compiler;
39
struct fd_device *dev;
40
41
unsigned seqno;
42
struct fd_bo *control_mem;
43
44
struct fd_bo *query_mem;
45
const struct perfcntr *perfcntrs;
46
unsigned num_perfcntrs;
47
};
48
define_cast(backend, a6xx_backend);
49
50
/*
51
* Data structures shared with GPU:
52
*/
53
54
/* This struct defines the layout of the fd6_context::control buffer: */
55
struct fd6_control {
56
uint32_t seqno; /* seqno for async CP_EVENT_WRITE, etc */
57
uint32_t _pad0;
58
volatile uint32_t vsc_overflow;
59
uint32_t _pad1;
60
/* flag set from cmdstream when VSC overflow detected: */
61
uint32_t vsc_scratch;
62
uint32_t _pad2;
63
uint32_t _pad3;
64
uint32_t _pad4;
65
66
/* scratch space for VPC_SO[i].FLUSH_BASE_LO/HI, start on 32 byte boundary. */
67
struct {
68
uint32_t offset;
69
uint32_t pad[7];
70
} flush_base[4];
71
};
72
73
#define control_ptr(a6xx_backend, member) \
74
(a6xx_backend)->control_mem, offsetof(struct fd6_control, member), 0, 0
75
76
struct PACKED fd6_query_sample {
77
uint64_t start;
78
uint64_t result;
79
uint64_t stop;
80
};
81
82
/* offset of a single field of an array of fd6_query_sample: */
83
#define query_sample_idx(a6xx_backend, idx, field) \
84
(a6xx_backend)->query_mem, \
85
(idx * sizeof(struct fd6_query_sample)) + \
86
offsetof(struct fd6_query_sample, field), \
87
0, 0
88
89
/*
90
* Backend implementation:
91
*/
92
93
static struct kernel *
94
a6xx_assemble(struct backend *b, FILE *in)
95
{
96
struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
97
struct ir3_kernel *ir3_kernel = ir3_asm_assemble(a6xx_backend->compiler, in);
98
ir3_kernel->backend = b;
99
return &ir3_kernel->base;
100
}
101
102
static void
103
a6xx_disassemble(struct kernel *kernel, FILE *out)
104
{
105
ir3_asm_disassemble(to_ir3_kernel(kernel), out);
106
}
107
108
static void
109
cs_program_emit(struct fd_ringbuffer *ring, struct kernel *kernel)
110
{
111
struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
112
struct ir3_shader_variant *v = ir3_kernel->v;
113
const struct ir3_info *i = &v->info;
114
enum a6xx_threadsize thrsz = i->double_threadsize ? THREAD128 : THREAD64;
115
116
OUT_PKT4(ring, REG_A6XX_SP_MODE_CONTROL, 1);
117
OUT_RING(ring, A6XX_SP_MODE_CONTROL_CONSTANT_DEMOTION_ENABLE | 4);
118
119
OUT_PKT4(ring, REG_A6XX_SP_PERFCTR_ENABLE, 1);
120
OUT_RING(ring, A6XX_SP_PERFCTR_ENABLE_CS);
121
122
OUT_PKT4(ring, REG_A6XX_SP_FLOAT_CNTL, 1);
123
OUT_RING(ring, 0);
124
125
OUT_PKT4(ring, REG_A6XX_HLSQ_INVALIDATE_CMD, 1);
126
OUT_RING(
127
ring,
128
A6XX_HLSQ_INVALIDATE_CMD_VS_STATE | A6XX_HLSQ_INVALIDATE_CMD_HS_STATE |
129
A6XX_HLSQ_INVALIDATE_CMD_DS_STATE | A6XX_HLSQ_INVALIDATE_CMD_GS_STATE |
130
A6XX_HLSQ_INVALIDATE_CMD_FS_STATE | A6XX_HLSQ_INVALIDATE_CMD_CS_STATE |
131
A6XX_HLSQ_INVALIDATE_CMD_CS_IBO | A6XX_HLSQ_INVALIDATE_CMD_GFX_IBO);
132
133
unsigned constlen = align(v->constlen, 4);
134
OUT_PKT4(ring, REG_A6XX_HLSQ_CS_CNTL, 1);
135
OUT_RING(ring,
136
A6XX_HLSQ_CS_CNTL_CONSTLEN(constlen) | A6XX_HLSQ_CS_CNTL_ENABLED);
137
138
OUT_PKT4(ring, REG_A6XX_SP_CS_CONFIG, 2);
139
OUT_RING(ring, A6XX_SP_CS_CONFIG_ENABLED |
140
A6XX_SP_CS_CONFIG_NIBO(kernel->num_bufs) |
141
A6XX_SP_CS_CONFIG_NTEX(v->num_samp) |
142
A6XX_SP_CS_CONFIG_NSAMP(v->num_samp)); /* SP_VS_CONFIG */
143
OUT_RING(ring, v->instrlen); /* SP_VS_INSTRLEN */
144
145
OUT_PKT4(ring, REG_A6XX_SP_CS_CTRL_REG0, 1);
146
OUT_RING(ring,
147
A6XX_SP_CS_CTRL_REG0_THREADSIZE(thrsz) |
148
A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(i->max_reg + 1) |
149
A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(i->max_half_reg + 1) |
150
COND(v->mergedregs, A6XX_SP_CS_CTRL_REG0_MERGEDREGS) |
151
A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(ir3_shader_branchstack_hw(v)));
152
153
OUT_PKT4(ring, REG_A6XX_SP_CS_UNKNOWN_A9B1, 1);
154
OUT_RING(ring, 0x41);
155
156
uint32_t local_invocation_id, work_group_id;
157
local_invocation_id =
158
ir3_find_sysval_regid(v, SYSTEM_VALUE_LOCAL_INVOCATION_ID);
159
work_group_id = ir3_find_sysval_regid(v, SYSTEM_VALUE_WORKGROUP_ID);
160
161
OUT_PKT4(ring, REG_A6XX_HLSQ_CS_CNTL_0, 2);
162
OUT_RING(ring, A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id) |
163
A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID(regid(63, 0)) |
164
A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID(regid(63, 0)) |
165
A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id));
166
OUT_RING(ring, A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID(regid(63, 0)) |
167
A6XX_HLSQ_CS_CNTL_1_THREADSIZE(thrsz));
168
169
OUT_PKT4(ring, REG_A6XX_SP_CS_OBJ_START, 2);
170
OUT_RELOC(ring, v->bo, 0, 0, 0); /* SP_CS_OBJ_START_LO/HI */
171
172
OUT_PKT4(ring, REG_A6XX_SP_CS_INSTRLEN, 1);
173
OUT_RING(ring, v->instrlen);
174
175
OUT_PKT4(ring, REG_A6XX_SP_CS_OBJ_START, 2);
176
OUT_RELOC(ring, v->bo, 0, 0, 0);
177
178
OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3);
179
OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
180
CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
181
CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
182
CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
183
CP_LOAD_STATE6_0_NUM_UNIT(v->instrlen));
184
OUT_RELOC(ring, v->bo, 0, 0, 0);
185
}
186
187
static void
188
emit_const(struct fd_ringbuffer *ring, uint32_t regid, uint32_t sizedwords,
189
const uint32_t *dwords)
190
{
191
uint32_t align_sz;
192
193
debug_assert((regid % 4) == 0);
194
195
align_sz = align(sizedwords, 4);
196
197
OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3 + align_sz);
198
OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(regid / 4) |
199
CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
200
CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
201
CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
202
CP_LOAD_STATE6_0_NUM_UNIT(DIV_ROUND_UP(sizedwords, 4)));
203
OUT_RING(ring, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
204
OUT_RING(ring, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
205
206
for (uint32_t i = 0; i < sizedwords; i++) {
207
OUT_RING(ring, dwords[i]);
208
}
209
210
/* Zero-pad to multiple of 4 dwords */
211
for (uint32_t i = sizedwords; i < align_sz; i++) {
212
OUT_RING(ring, 0);
213
}
214
}
215
216
static void
217
cs_const_emit(struct fd_ringbuffer *ring, struct kernel *kernel,
218
uint32_t grid[3])
219
{
220
struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
221
struct ir3_shader_variant *v = ir3_kernel->v;
222
223
const struct ir3_const_state *const_state = ir3_const_state(v);
224
uint32_t base = const_state->offsets.immediate;
225
int size = DIV_ROUND_UP(const_state->immediates_count, 4);
226
227
if (ir3_kernel->info.numwg != INVALID_REG) {
228
assert((ir3_kernel->info.numwg & 0x3) == 0);
229
int idx = ir3_kernel->info.numwg >> 2;
230
const_state->immediates[idx * 4 + 0] = grid[0];
231
const_state->immediates[idx * 4 + 1] = grid[1];
232
const_state->immediates[idx * 4 + 2] = grid[2];
233
}
234
235
for (int i = 0; i < MAX_BUFS; i++) {
236
if (kernel->buf_addr_regs[i] != INVALID_REG) {
237
assert((kernel->buf_addr_regs[i] & 0x3) == 0);
238
int idx = kernel->buf_addr_regs[i] >> 2;
239
240
uint64_t iova = fd_bo_get_iova(kernel->bufs[i]);
241
242
const_state->immediates[idx * 4 + 1] = iova >> 32;
243
const_state->immediates[idx * 4 + 0] = (iova << 32) >> 32;
244
}
245
}
246
247
/* truncate size to avoid writing constants that shader
248
* does not use:
249
*/
250
size = MIN2(size + base, v->constlen) - base;
251
252
/* convert out of vec4: */
253
base *= 4;
254
size *= 4;
255
256
if (size > 0) {
257
emit_const(ring, base, size, const_state->immediates);
258
}
259
}
260
261
static void
262
cs_ibo_emit(struct fd_ringbuffer *ring, struct fd_submit *submit,
263
struct kernel *kernel)
264
{
265
struct fd_ringbuffer *state = fd_submit_new_ringbuffer(
266
submit, kernel->num_bufs * 16 * 4, FD_RINGBUFFER_STREAMING);
267
268
for (unsigned i = 0; i < kernel->num_bufs; i++) {
269
/* size is encoded with low 15b in WIDTH and high bits in HEIGHT,
270
* in units of elements:
271
*/
272
unsigned sz = kernel->buf_sizes[i];
273
unsigned width = sz & MASK(15);
274
unsigned height = sz >> 15;
275
276
OUT_RING(state, A6XX_IBO_0_FMT(FMT6_32_UINT) | A6XX_IBO_0_TILE_MODE(0));
277
OUT_RING(state, A6XX_IBO_1_WIDTH(width) | A6XX_IBO_1_HEIGHT(height));
278
OUT_RING(state, A6XX_IBO_2_PITCH(0) | A6XX_IBO_2_UNK4 | A6XX_IBO_2_UNK31 |
279
A6XX_IBO_2_TYPE(A6XX_TEX_1D));
280
OUT_RING(state, A6XX_IBO_3_ARRAY_PITCH(0));
281
OUT_RELOC(state, kernel->bufs[i], 0, 0, 0);
282
OUT_RING(state, 0x00000000);
283
OUT_RING(state, 0x00000000);
284
OUT_RING(state, 0x00000000);
285
OUT_RING(state, 0x00000000);
286
OUT_RING(state, 0x00000000);
287
OUT_RING(state, 0x00000000);
288
OUT_RING(state, 0x00000000);
289
OUT_RING(state, 0x00000000);
290
OUT_RING(state, 0x00000000);
291
OUT_RING(state, 0x00000000);
292
}
293
294
OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3);
295
OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
296
CP_LOAD_STATE6_0_STATE_TYPE(ST6_IBO) |
297
CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
298
CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
299
CP_LOAD_STATE6_0_NUM_UNIT(kernel->num_bufs));
300
OUT_RB(ring, state);
301
302
OUT_PKT4(ring, REG_A6XX_SP_CS_IBO, 2);
303
OUT_RB(ring, state);
304
305
OUT_PKT4(ring, REG_A6XX_SP_CS_IBO_COUNT, 1);
306
OUT_RING(ring, kernel->num_bufs);
307
308
fd_ringbuffer_del(state);
309
}
310
311
static inline unsigned
312
event_write(struct fd_ringbuffer *ring, struct kernel *kernel,
313
enum vgt_event_type evt, bool timestamp)
314
{
315
unsigned seqno = 0;
316
317
OUT_PKT7(ring, CP_EVENT_WRITE, timestamp ? 4 : 1);
318
OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(evt));
319
if (timestamp) {
320
struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
321
struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
322
seqno = ++a6xx_backend->seqno;
323
OUT_RELOC(ring, control_ptr(a6xx_backend, seqno)); /* ADDR_LO/HI */
324
OUT_RING(ring, seqno);
325
}
326
327
return seqno;
328
}
329
330
static inline void
331
cache_flush(struct fd_ringbuffer *ring, struct kernel *kernel)
332
{
333
struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
334
struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
335
unsigned seqno;
336
337
seqno = event_write(ring, kernel, RB_DONE_TS, true);
338
339
OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
340
OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
341
CP_WAIT_REG_MEM_0_POLL_MEMORY);
342
OUT_RELOC(ring, control_ptr(a6xx_backend, seqno));
343
OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(seqno));
344
OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(~0));
345
OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
346
347
seqno = event_write(ring, kernel, CACHE_FLUSH_TS, true);
348
349
OUT_PKT7(ring, CP_WAIT_MEM_GTE, 4);
350
OUT_RING(ring, CP_WAIT_MEM_GTE_0_RESERVED(0));
351
OUT_RELOC(ring, control_ptr(a6xx_backend, seqno));
352
OUT_RING(ring, CP_WAIT_MEM_GTE_3_REF(seqno));
353
}
354
355
static void
356
a6xx_emit_grid(struct kernel *kernel, uint32_t grid[3],
357
struct fd_submit *submit)
358
{
359
struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
360
struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
361
struct fd_ringbuffer *ring = fd_submit_new_ringbuffer(
362
submit, 0, FD_RINGBUFFER_PRIMARY | FD_RINGBUFFER_GROWABLE);
363
364
cs_program_emit(ring, kernel);
365
cs_const_emit(ring, kernel, grid);
366
cs_ibo_emit(ring, submit, kernel);
367
368
OUT_PKT7(ring, CP_SET_MARKER, 1);
369
OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));
370
371
const unsigned *local_size = kernel->local_size;
372
const unsigned *num_groups = grid;
373
374
unsigned work_dim = 0;
375
for (int i = 0; i < 3; i++) {
376
if (!grid[i])
377
break;
378
work_dim++;
379
}
380
381
OUT_PKT4(ring, REG_A6XX_HLSQ_CS_NDRANGE_0, 7);
382
OUT_RING(ring, A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM(work_dim) |
383
A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(local_size[0] - 1) |
384
A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(local_size[1] - 1) |
385
A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(local_size[2] - 1));
386
OUT_RING(ring,
387
A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(local_size[0] * num_groups[0]));
388
OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_2_GLOBALOFF_X */
389
OUT_RING(ring,
390
A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(local_size[1] * num_groups[1]));
391
OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_4_GLOBALOFF_Y */
392
OUT_RING(ring,
393
A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(local_size[2] * num_groups[2]));
394
OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_6_GLOBALOFF_Z */
395
396
OUT_PKT4(ring, REG_A6XX_HLSQ_CS_KERNEL_GROUP_X, 3);
397
OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_X */
398
OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_Y */
399
OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_Z */
400
401
if (a6xx_backend->num_perfcntrs > 0) {
402
a6xx_backend->query_mem = fd_bo_new(
403
a6xx_backend->dev,
404
a6xx_backend->num_perfcntrs * sizeof(struct fd6_query_sample), 0, "query");
405
406
/* configure the performance counters to count the requested
407
* countables:
408
*/
409
for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
410
const struct perfcntr *counter = &a6xx_backend->perfcntrs[i];
411
412
OUT_PKT4(ring, counter->select_reg, 1);
413
OUT_RING(ring, counter->selector);
414
}
415
416
OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
417
418
/* and snapshot the start values: */
419
for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
420
const struct perfcntr *counter = &a6xx_backend->perfcntrs[i];
421
422
OUT_PKT7(ring, CP_REG_TO_MEM, 3);
423
OUT_RING(ring, CP_REG_TO_MEM_0_64B |
424
CP_REG_TO_MEM_0_REG(counter->counter_reg_lo));
425
OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, start));
426
}
427
}
428
429
OUT_PKT7(ring, CP_EXEC_CS, 4);
430
OUT_RING(ring, 0x00000000);
431
OUT_RING(ring, CP_EXEC_CS_1_NGROUPS_X(grid[0]));
432
OUT_RING(ring, CP_EXEC_CS_2_NGROUPS_Y(grid[1]));
433
OUT_RING(ring, CP_EXEC_CS_3_NGROUPS_Z(grid[2]));
434
435
OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
436
437
if (a6xx_backend->num_perfcntrs > 0) {
438
/* snapshot the end values: */
439
for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
440
const struct perfcntr *counter = &a6xx_backend->perfcntrs[i];
441
442
OUT_PKT7(ring, CP_REG_TO_MEM, 3);
443
OUT_RING(ring, CP_REG_TO_MEM_0_64B |
444
CP_REG_TO_MEM_0_REG(counter->counter_reg_lo));
445
OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, stop));
446
}
447
448
/* and compute the result: */
449
for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
450
/* result += stop - start: */
451
OUT_PKT7(ring, CP_MEM_TO_MEM, 9);
452
OUT_RING(ring, CP_MEM_TO_MEM_0_DOUBLE | CP_MEM_TO_MEM_0_NEG_C);
453
OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, result)); /* dst */
454
OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, result)); /* srcA */
455
OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, stop)); /* srcB */
456
OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, start)); /* srcC */
457
}
458
}
459
460
cache_flush(ring, kernel);
461
}
462
463
static void
464
a6xx_set_perfcntrs(struct backend *b, const struct perfcntr *perfcntrs,
465
unsigned num_perfcntrs)
466
{
467
struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
468
469
a6xx_backend->perfcntrs = perfcntrs;
470
a6xx_backend->num_perfcntrs = num_perfcntrs;
471
}
472
473
static void
474
a6xx_read_perfcntrs(struct backend *b, uint64_t *results)
475
{
476
struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
477
478
fd_bo_cpu_prep(a6xx_backend->query_mem, NULL, FD_BO_PREP_READ);
479
struct fd6_query_sample *samples = fd_bo_map(a6xx_backend->query_mem);
480
481
for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
482
results[i] = samples[i].result;
483
}
484
}
485
486
struct backend *
487
a6xx_init(struct fd_device *dev, uint32_t gpu_id)
488
{
489
struct a6xx_backend *a6xx_backend = calloc(1, sizeof(*a6xx_backend));
490
491
a6xx_backend->base = (struct backend){
492
.assemble = a6xx_assemble,
493
.disassemble = a6xx_disassemble,
494
.emit_grid = a6xx_emit_grid,
495
.set_perfcntrs = a6xx_set_perfcntrs,
496
.read_perfcntrs = a6xx_read_perfcntrs,
497
};
498
499
a6xx_backend->compiler = ir3_compiler_create(dev, gpu_id, false);
500
a6xx_backend->dev = dev;
501
502
a6xx_backend->control_mem =
503
fd_bo_new(dev, 0x1000, 0, "control");
504
505
return &a6xx_backend->base;
506
}
507
508