Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/panfrost/lib/decode.c
4560 views
1
/*
2
* Copyright (C) 2017-2019 Alyssa Rosenzweig
3
* Copyright (C) 2017-2019 Connor Abbott
4
* Copyright (C) 2019 Collabora, Ltd.
5
*
6
* Permission is hereby granted, free of charge, to any person obtaining a
7
* copy of this software and associated documentation files (the "Software"),
8
* to deal in the Software without restriction, including without limitation
9
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
* and/or sell copies of the Software, and to permit persons to whom the
11
* Software is furnished to do so, subject to the following conditions:
12
*
13
* The above copyright notice and this permission notice (including the next
14
* paragraph) shall be included in all copies or substantial portions of the
15
* Software.
16
*
17
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23
* SOFTWARE.
24
*/
25
26
#include <midgard_pack.h>
27
#include <stdio.h>
28
#include <stdlib.h>
29
#include <memory.h>
30
#include <stdbool.h>
31
#include <stdarg.h>
32
#include <errno.h>
33
#include <ctype.h>
34
#include "decode.h"
35
36
#include "midgard/disassemble.h"
37
#include "bifrost/disassemble.h"
38
39
#define DUMP_UNPACKED(T, var, ...) { \
40
pandecode_log(__VA_ARGS__); \
41
pan_print(pandecode_dump_stream, T, var, (pandecode_indent + 1) * 2); \
42
}
43
44
#define DUMP_CL(T, cl, ...) {\
45
pan_unpack(cl, T, temp); \
46
DUMP_UNPACKED(T, temp, __VA_ARGS__); \
47
}
48
49
#define DUMP_SECTION(A, S, cl, ...) { \
50
pan_section_unpack(cl, A, S, temp); \
51
pandecode_log(__VA_ARGS__); \
52
pan_section_print(pandecode_dump_stream, A, S, temp, (pandecode_indent + 1) * 2); \
53
}
54
55
#define MAP_ADDR(T, addr, cl) \
56
const uint8_t *cl = 0; \
57
{ \
58
struct pandecode_mapped_memory *mapped_mem = pandecode_find_mapped_gpu_mem_containing(addr); \
59
cl = pandecode_fetch_gpu_mem(mapped_mem, addr, MALI_ ## T ## _LENGTH); \
60
}
61
62
#define DUMP_ADDR(T, addr, ...) {\
63
MAP_ADDR(T, addr, cl) \
64
DUMP_CL(T, cl, __VA_ARGS__); \
65
}
66
67
FILE *pandecode_dump_stream;
68
69
/* Semantic logging type.
70
*
71
* Raw: for raw messages to be printed as is.
72
* Message: for helpful information to be commented out in replays.
73
*
74
* Use one of pandecode_log or pandecode_msg as syntax sugar.
75
*/
76
77
enum pandecode_log_type {
78
PANDECODE_RAW,
79
PANDECODE_MESSAGE,
80
};
81
82
#define pandecode_log(...) pandecode_log_typed(PANDECODE_RAW, __VA_ARGS__)
83
#define pandecode_msg(...) pandecode_log_typed(PANDECODE_MESSAGE, __VA_ARGS__)
84
85
unsigned pandecode_indent = 0;
86
87
static void
88
pandecode_make_indent(void)
89
{
90
for (unsigned i = 0; i < pandecode_indent; ++i)
91
fprintf(pandecode_dump_stream, " ");
92
}
93
94
static void PRINTFLIKE(2, 3)
95
pandecode_log_typed(enum pandecode_log_type type, const char *format, ...)
96
{
97
va_list ap;
98
99
pandecode_make_indent();
100
101
if (type == PANDECODE_MESSAGE)
102
fprintf(pandecode_dump_stream, "// ");
103
104
va_start(ap, format);
105
vfprintf(pandecode_dump_stream, format, ap);
106
va_end(ap);
107
}
108
109
static void
110
pandecode_log_cont(const char *format, ...)
111
{
112
va_list ap;
113
114
va_start(ap, format);
115
vfprintf(pandecode_dump_stream, format, ap);
116
va_end(ap);
117
}
118
119
/* To check for memory safety issues, validates that the given pointer in GPU
120
* memory is valid, containing at least sz bytes. The goal is to eliminate
121
* GPU-side memory bugs (NULL pointer dereferences, buffer overflows, or buffer
122
* overruns) by statically validating pointers.
123
*/
124
125
static void
126
pandecode_validate_buffer(mali_ptr addr, size_t sz)
127
{
128
if (!addr) {
129
pandecode_msg("XXX: null pointer deref");
130
return;
131
}
132
133
/* Find a BO */
134
135
struct pandecode_mapped_memory *bo =
136
pandecode_find_mapped_gpu_mem_containing(addr);
137
138
if (!bo) {
139
pandecode_msg("XXX: invalid memory dereference\n");
140
return;
141
}
142
143
/* Bounds check */
144
145
unsigned offset = addr - bo->gpu_va;
146
unsigned total = offset + sz;
147
148
if (total > bo->length) {
149
pandecode_msg("XXX: buffer overrun. "
150
"Chunk of size %zu at offset %d in buffer of size %zu. "
151
"Overrun by %zu bytes. \n",
152
sz, offset, bo->length, total - bo->length);
153
return;
154
}
155
}
156
157
/* Midgard's tiler descriptor is embedded within the
158
* larger FBD */
159
160
static void
161
pandecode_midgard_tiler_descriptor(
162
const struct mali_midgard_tiler_packed *tp,
163
const struct mali_midgard_tiler_weights_packed *wp)
164
{
165
pan_unpack(tp, MIDGARD_TILER, t);
166
DUMP_UNPACKED(MIDGARD_TILER, t, "Tiler:\n");
167
168
/* We've never seen weights used in practice, but they exist */
169
pan_unpack(wp, MIDGARD_TILER_WEIGHTS, w);
170
bool nonzero_weights = false;
171
172
nonzero_weights |= w.weight0 != 0x0;
173
nonzero_weights |= w.weight1 != 0x0;
174
nonzero_weights |= w.weight2 != 0x0;
175
nonzero_weights |= w.weight3 != 0x0;
176
nonzero_weights |= w.weight4 != 0x0;
177
nonzero_weights |= w.weight5 != 0x0;
178
nonzero_weights |= w.weight6 != 0x0;
179
nonzero_weights |= w.weight7 != 0x0;
180
181
if (nonzero_weights)
182
DUMP_UNPACKED(MIDGARD_TILER_WEIGHTS, w, "Tiler Weights:\n");
183
}
184
185
/* Information about the framebuffer passed back for
186
* additional analysis */
187
188
struct pandecode_fbd {
189
unsigned width;
190
unsigned height;
191
unsigned rt_count;
192
bool has_extra;
193
};
194
195
static struct pandecode_fbd
196
pandecode_sfbd(uint64_t gpu_va, int job_no, bool is_fragment, unsigned gpu_id)
197
{
198
struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(gpu_va);
199
const void *PANDECODE_PTR_VAR(s, mem, (mali_ptr) gpu_va);
200
201
struct pandecode_fbd info = {
202
.has_extra = false,
203
.rt_count = 1
204
};
205
206
pandecode_log("Single-Target Framebuffer:\n");
207
pandecode_indent++;
208
209
DUMP_SECTION(SINGLE_TARGET_FRAMEBUFFER, LOCAL_STORAGE, s, "Local Storage:\n");
210
pan_section_unpack(s, SINGLE_TARGET_FRAMEBUFFER, PARAMETERS, p);
211
DUMP_UNPACKED(SINGLE_TARGET_FRAMEBUFFER_PARAMETERS, p, "Parameters:\n");
212
213
const void *t = pan_section_ptr(s, SINGLE_TARGET_FRAMEBUFFER, TILER);
214
const void *w = pan_section_ptr(s, SINGLE_TARGET_FRAMEBUFFER, TILER_WEIGHTS);
215
216
pandecode_midgard_tiler_descriptor(t, w);
217
218
pandecode_indent--;
219
220
/* Dummy unpack of the padding section to make sure all words are 0.
221
* No need to call print here since the section is supposed to be empty.
222
*/
223
pan_section_unpack(s, SINGLE_TARGET_FRAMEBUFFER, PADDING_1, padding1);
224
pan_section_unpack(s, SINGLE_TARGET_FRAMEBUFFER, PADDING_2, padding2);
225
pandecode_log("\n");
226
227
return info;
228
}
229
230
static void
231
pandecode_local_storage(uint64_t gpu_va, int job_no)
232
{
233
struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(gpu_va);
234
const struct mali_local_storage_packed *PANDECODE_PTR_VAR(s, mem, (mali_ptr) gpu_va);
235
DUMP_CL(LOCAL_STORAGE, s, "Local Storage:\n");
236
}
237
238
static void
239
pandecode_render_target(uint64_t gpu_va, unsigned job_no, bool is_bifrost, unsigned gpu_id,
240
const struct MALI_MULTI_TARGET_FRAMEBUFFER_PARAMETERS *fb)
241
{
242
pandecode_log("Color Render Targets:\n");
243
pandecode_indent++;
244
245
for (int i = 0; i < (fb->render_target_count); i++) {
246
mali_ptr rt_va = gpu_va + i * MALI_RENDER_TARGET_LENGTH;
247
struct pandecode_mapped_memory *mem =
248
pandecode_find_mapped_gpu_mem_containing(rt_va);
249
const struct mali_render_target_packed *PANDECODE_PTR_VAR(rtp, mem, (mali_ptr) rt_va);
250
DUMP_CL(RENDER_TARGET, rtp, "Color Render Target %d:\n", i);
251
}
252
253
pandecode_indent--;
254
pandecode_log("\n");
255
}
256
257
static void
258
pandecode_sample_locations(const void *fb, int job_no)
259
{
260
pan_section_unpack(fb, MULTI_TARGET_FRAMEBUFFER, BIFROST_PARAMETERS, params);
261
262
struct pandecode_mapped_memory *smem =
263
pandecode_find_mapped_gpu_mem_containing(params.sample_locations);
264
265
const u16 *PANDECODE_PTR_VAR(samples, smem, params.sample_locations);
266
267
pandecode_log("Sample locations:\n");
268
for (int i = 0; i < 33; i++) {
269
pandecode_log(" (%d, %d),\n",
270
samples[2 * i] - 128,
271
samples[2 * i + 1] - 128);
272
}
273
}
274
275
static void
276
pandecode_dcd(const struct MALI_DRAW *p,
277
int job_no, enum mali_job_type job_type,
278
char *suffix, bool is_bifrost, unsigned gpu_id);
279
280
static struct pandecode_fbd
281
pandecode_mfbd_bfr(uint64_t gpu_va, int job_no, bool is_fragment, bool is_bifrost, unsigned gpu_id)
282
{
283
struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(gpu_va);
284
const void *PANDECODE_PTR_VAR(fb, mem, (mali_ptr) gpu_va);
285
pan_section_unpack(fb, MULTI_TARGET_FRAMEBUFFER, PARAMETERS, params);
286
287
struct pandecode_fbd info;
288
289
if (is_bifrost) {
290
pandecode_sample_locations(fb, job_no);
291
292
pan_section_unpack(fb, MULTI_TARGET_FRAMEBUFFER, BIFROST_PARAMETERS, bparams);
293
unsigned dcd_size = MALI_DRAW_LENGTH + MALI_DRAW_PADDING_LENGTH;
294
struct pandecode_mapped_memory *dcdmem =
295
pandecode_find_mapped_gpu_mem_containing(bparams.frame_shader_dcds);
296
297
if (bparams.pre_frame_0 != MALI_PRE_POST_FRAME_SHADER_MODE_NEVER) {
298
const void *PANDECODE_PTR_VAR(dcd, dcdmem, bparams.frame_shader_dcds + (0 * dcd_size));
299
pan_unpack(dcd, DRAW, draw);
300
pandecode_log("Pre frame 0:\n");
301
pandecode_dcd(&draw, job_no, MALI_JOB_TYPE_FRAGMENT, "", true, gpu_id);
302
}
303
304
if (bparams.pre_frame_1 != MALI_PRE_POST_FRAME_SHADER_MODE_NEVER) {
305
const void *PANDECODE_PTR_VAR(dcd, dcdmem, bparams.frame_shader_dcds + (1 * dcd_size));
306
pan_unpack(dcd, DRAW, draw);
307
pandecode_log("Pre frame 1:\n");
308
pandecode_dcd(&draw, job_no, MALI_JOB_TYPE_FRAGMENT, "", true, gpu_id);
309
}
310
311
if (bparams.post_frame != MALI_PRE_POST_FRAME_SHADER_MODE_NEVER) {
312
const void *PANDECODE_PTR_VAR(dcd, dcdmem, bparams.frame_shader_dcds + (2 * dcd_size));
313
pan_unpack(dcd, DRAW, draw);
314
pandecode_log("Post frame:\n");
315
pandecode_dcd(&draw, job_no, MALI_JOB_TYPE_FRAGMENT, "", true, gpu_id);
316
}
317
}
318
319
pandecode_log("Multi-Target Framebuffer:\n");
320
pandecode_indent++;
321
322
if (is_bifrost) {
323
DUMP_SECTION(MULTI_TARGET_FRAMEBUFFER, BIFROST_PARAMETERS, fb, "Bifrost Params:\n");
324
} else {
325
DUMP_SECTION(MULTI_TARGET_FRAMEBUFFER, LOCAL_STORAGE, fb, "Local Storage:\n");
326
}
327
328
info.width = params.width;
329
info.height = params.height;
330
info.rt_count = params.render_target_count;
331
DUMP_UNPACKED(MULTI_TARGET_FRAMEBUFFER_PARAMETERS, params, "Parameters:\n");
332
333
if (is_bifrost) {
334
DUMP_SECTION(MULTI_TARGET_FRAMEBUFFER, BIFROST_TILER_POINTER, fb, "Tiler Pointer");
335
pan_section_unpack(fb, MULTI_TARGET_FRAMEBUFFER, BIFROST_PADDING, padding);
336
} else {
337
const void *t = pan_section_ptr(fb, MULTI_TARGET_FRAMEBUFFER, TILER);
338
const void *w = pan_section_ptr(fb, MULTI_TARGET_FRAMEBUFFER, TILER_WEIGHTS);
339
pandecode_midgard_tiler_descriptor(t, w);
340
}
341
342
pandecode_indent--;
343
pandecode_log("\n");
344
345
gpu_va += MALI_MULTI_TARGET_FRAMEBUFFER_LENGTH;
346
347
info.has_extra = params.has_zs_crc_extension;
348
349
if (info.has_extra) {
350
struct pandecode_mapped_memory *mem =
351
pandecode_find_mapped_gpu_mem_containing(gpu_va);
352
const struct mali_zs_crc_extension_packed *PANDECODE_PTR_VAR(zs_crc, mem, (mali_ptr)gpu_va);
353
DUMP_CL(ZS_CRC_EXTENSION, zs_crc, "ZS CRC Extension:\n");
354
pandecode_log("\n");
355
356
gpu_va += MALI_ZS_CRC_EXTENSION_LENGTH;
357
}
358
359
if (is_fragment)
360
pandecode_render_target(gpu_va, job_no, is_bifrost, gpu_id, &params);
361
362
return info;
363
}
364
365
static void
366
pandecode_attributes(const struct pandecode_mapped_memory *mem,
367
mali_ptr addr, int job_no, char *suffix,
368
int count, bool varying, enum mali_job_type job_type)
369
{
370
char *prefix = varying ? "Varying" : "Attribute";
371
assert(addr);
372
373
if (!count) {
374
pandecode_msg("warn: No %s records\n", prefix);
375
return;
376
}
377
378
MAP_ADDR(ATTRIBUTE_BUFFER, addr, cl);
379
380
for (int i = 0; i < count; ++i) {
381
pan_unpack(cl + i * MALI_ATTRIBUTE_BUFFER_LENGTH, ATTRIBUTE_BUFFER, temp);
382
DUMP_UNPACKED(ATTRIBUTE_BUFFER, temp, "%s:\n", prefix);
383
384
switch (temp.type) {
385
case MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR_WRITE_REDUCTION:
386
case MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR: {
387
pan_unpack(cl + (i + 1) * MALI_ATTRIBUTE_BUFFER_LENGTH,
388
ATTRIBUTE_BUFFER_CONTINUATION_NPOT, temp2);
389
pan_print(pandecode_dump_stream, ATTRIBUTE_BUFFER_CONTINUATION_NPOT,
390
temp2, (pandecode_indent + 1) * 2);
391
i++;
392
break;
393
}
394
case MALI_ATTRIBUTE_TYPE_3D_LINEAR:
395
case MALI_ATTRIBUTE_TYPE_3D_INTERLEAVED: {
396
pan_unpack(cl + (i + 1) * MALI_ATTRIBUTE_BUFFER_CONTINUATION_3D_LENGTH,
397
ATTRIBUTE_BUFFER_CONTINUATION_3D, temp2);
398
pan_print(pandecode_dump_stream, ATTRIBUTE_BUFFER_CONTINUATION_3D,
399
temp2, (pandecode_indent + 1) * 2);
400
i++;
401
break;
402
}
403
default:
404
break;
405
}
406
}
407
pandecode_log("\n");
408
}
409
410
/* Decodes a Bifrost blend constant. See the notes in bifrost_blend_rt */
411
412
static mali_ptr
413
pandecode_bifrost_blend(void *descs, int job_no, int rt_no, mali_ptr frag_shader)
414
{
415
pan_unpack(descs + (rt_no * MALI_BLEND_LENGTH), BLEND, b);
416
DUMP_UNPACKED(BLEND, b, "Blend RT %d:\n", rt_no);
417
if (b.bifrost.internal.mode != MALI_BIFROST_BLEND_MODE_SHADER)
418
return 0;
419
420
return (frag_shader & 0xFFFFFFFF00000000ULL) | b.bifrost.internal.shader.pc;
421
}
422
423
static mali_ptr
424
pandecode_midgard_blend_mrt(void *descs, int job_no, int rt_no)
425
{
426
pan_unpack(descs + (rt_no * MALI_BLEND_LENGTH), BLEND, b);
427
DUMP_UNPACKED(BLEND, b, "Blend RT %d:\n", rt_no);
428
return b.midgard.blend_shader ? (b.midgard.shader_pc & ~0xf) : 0;
429
}
430
431
static unsigned
432
pandecode_attribute_meta(int count, mali_ptr attribute, bool varying)
433
{
434
unsigned max = 0;
435
436
for (int i = 0; i < count; ++i, attribute += MALI_ATTRIBUTE_LENGTH) {
437
MAP_ADDR(ATTRIBUTE, attribute, cl);
438
pan_unpack(cl, ATTRIBUTE, a);
439
DUMP_UNPACKED(ATTRIBUTE, a, "%s:\n", varying ? "Varying" : "Attribute");
440
max = MAX2(max, a.buffer_index);
441
}
442
443
pandecode_log("\n");
444
return MIN2(max + 1, 256);
445
}
446
447
/* return bits [lo, hi) of word */
448
static u32
449
bits(u32 word, u32 lo, u32 hi)
450
{
451
if (hi - lo >= 32)
452
return word; // avoid undefined behavior with the shift
453
454
return (word >> lo) & ((1 << (hi - lo)) - 1);
455
}
456
457
static void
458
pandecode_invocation(const void *i)
459
{
460
/* Decode invocation_count. See the comment before the definition of
461
* invocation_count for an explanation.
462
*/
463
pan_unpack(i, INVOCATION, invocation);
464
465
unsigned size_x = bits(invocation.invocations, 0, invocation.size_y_shift) + 1;
466
unsigned size_y = bits(invocation.invocations, invocation.size_y_shift, invocation.size_z_shift) + 1;
467
unsigned size_z = bits(invocation.invocations, invocation.size_z_shift, invocation.workgroups_x_shift) + 1;
468
469
unsigned groups_x = bits(invocation.invocations, invocation.workgroups_x_shift, invocation.workgroups_y_shift) + 1;
470
unsigned groups_y = bits(invocation.invocations, invocation.workgroups_y_shift, invocation.workgroups_z_shift) + 1;
471
unsigned groups_z = bits(invocation.invocations, invocation.workgroups_z_shift, 32) + 1;
472
473
pandecode_log("Invocation (%d, %d, %d) x (%d, %d, %d)\n",
474
size_x, size_y, size_z,
475
groups_x, groups_y, groups_z);
476
477
DUMP_UNPACKED(INVOCATION, invocation, "Invocation:\n")
478
}
479
480
static void
481
pandecode_primitive(const void *p)
482
{
483
pan_unpack(p, PRIMITIVE, primitive);
484
DUMP_UNPACKED(PRIMITIVE, primitive, "Primitive:\n");
485
486
/* Validate an index buffer is present if we need one. TODO: verify
487
* relationship between invocation_count and index_count */
488
489
if (primitive.indices) {
490
/* Grab the size */
491
unsigned size = (primitive.index_type == MALI_INDEX_TYPE_UINT32) ?
492
sizeof(uint32_t) : primitive.index_type;
493
494
/* Ensure we got a size, and if so, validate the index buffer
495
* is large enough to hold a full set of indices of the given
496
* size */
497
498
if (!size)
499
pandecode_msg("XXX: index size missing\n");
500
else
501
pandecode_validate_buffer(primitive.indices, primitive.index_count * size);
502
} else if (primitive.index_type)
503
pandecode_msg("XXX: unexpected index size\n");
504
}
505
506
static void
507
pandecode_uniform_buffers(mali_ptr pubufs, int ubufs_count, int job_no)
508
{
509
struct pandecode_mapped_memory *umem = pandecode_find_mapped_gpu_mem_containing(pubufs);
510
uint64_t *PANDECODE_PTR_VAR(ubufs, umem, pubufs);
511
512
for (int i = 0; i < ubufs_count; i++) {
513
mali_ptr addr = (ubufs[i] >> 10) << 2;
514
unsigned size = addr ? (((ubufs[i] & ((1 << 10) - 1)) + 1) * 16) : 0;
515
516
pandecode_validate_buffer(addr, size);
517
518
char *ptr = pointer_as_memory_reference(addr);
519
pandecode_log("ubuf_%d[%u] = %s;\n", i, size, ptr);
520
free(ptr);
521
}
522
523
pandecode_log("\n");
524
}
525
526
static void
527
pandecode_uniforms(mali_ptr uniforms, unsigned uniform_count)
528
{
529
pandecode_validate_buffer(uniforms, uniform_count * 16);
530
531
char *ptr = pointer_as_memory_reference(uniforms);
532
pandecode_log("vec4 uniforms[%u] = %s;\n", uniform_count, ptr);
533
free(ptr);
534
pandecode_log("\n");
535
}
536
537
static const char *
538
shader_type_for_job(unsigned type)
539
{
540
switch (type) {
541
case MALI_JOB_TYPE_VERTEX: return "VERTEX";
542
case MALI_JOB_TYPE_TILER: return "FRAGMENT";
543
case MALI_JOB_TYPE_FRAGMENT: return "FRAGMENT";
544
case MALI_JOB_TYPE_COMPUTE: return "COMPUTE";
545
default: return "UNKNOWN";
546
}
547
}
548
549
static unsigned shader_id = 0;
550
551
static struct midgard_disasm_stats
552
pandecode_shader_disassemble(mali_ptr shader_ptr, int shader_no, int type,
553
bool is_bifrost, unsigned gpu_id)
554
{
555
struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(shader_ptr);
556
uint8_t *PANDECODE_PTR_VAR(code, mem, shader_ptr);
557
558
/* Compute maximum possible size */
559
size_t sz = mem->length - (shader_ptr - mem->gpu_va);
560
561
/* Print some boilerplate to clearly denote the assembly (which doesn't
562
* obey indentation rules), and actually do the disassembly! */
563
564
pandecode_log_cont("\n\n");
565
566
struct midgard_disasm_stats stats;
567
568
if (is_bifrost) {
569
disassemble_bifrost(pandecode_dump_stream, code, sz, true);
570
571
/* TODO: Extend stats to Bifrost */
572
stats.texture_count = -128;
573
stats.sampler_count = -128;
574
stats.attribute_count = -128;
575
stats.varying_count = -128;
576
stats.uniform_count = -128;
577
stats.uniform_buffer_count = -128;
578
stats.work_count = -128;
579
580
stats.instruction_count = 0;
581
stats.bundle_count = 0;
582
stats.quadword_count = 0;
583
stats.helper_invocations = false;
584
} else {
585
stats = disassemble_midgard(pandecode_dump_stream,
586
code, sz, gpu_id, true);
587
}
588
589
unsigned nr_threads =
590
(stats.work_count <= 4) ? 4 :
591
(stats.work_count <= 8) ? 2 :
592
1;
593
594
pandecode_log_cont("shader%d - MESA_SHADER_%s shader: "
595
"%u inst, %u bundles, %u quadwords, "
596
"%u registers, %u threads, 0 loops, 0:0 spills:fills\n\n\n",
597
shader_id++,
598
shader_type_for_job(type),
599
stats.instruction_count, stats.bundle_count, stats.quadword_count,
600
stats.work_count, nr_threads);
601
602
return stats;
603
}
604
605
static void
606
pandecode_texture_payload(mali_ptr payload,
607
enum mali_texture_dimension dim,
608
enum mali_texture_layout layout,
609
bool manual_stride,
610
uint8_t levels,
611
uint16_t nr_samples,
612
uint16_t array_size,
613
struct pandecode_mapped_memory *tmem)
614
{
615
pandecode_log(".payload = {\n");
616
pandecode_indent++;
617
618
/* A bunch of bitmap pointers follow.
619
* We work out the correct number,
620
* based on the mipmap/cubemap
621
* properties, but dump extra
622
* possibilities to futureproof */
623
624
int bitmap_count = levels;
625
626
/* Miptree for each face */
627
if (dim == MALI_TEXTURE_DIMENSION_CUBE)
628
bitmap_count *= 6;
629
630
/* Array of layers */
631
bitmap_count *= nr_samples;
632
633
/* Array of textures */
634
bitmap_count *= array_size;
635
636
/* Stride for each element */
637
if (manual_stride)
638
bitmap_count *= 2;
639
640
mali_ptr *pointers_and_strides = pandecode_fetch_gpu_mem(tmem,
641
payload, sizeof(mali_ptr) * bitmap_count);
642
for (int i = 0; i < bitmap_count; ++i) {
643
/* How we dump depends if this is a stride or a pointer */
644
645
if (manual_stride && (i & 1)) {
646
/* signed 32-bit snuck in as a 64-bit pointer */
647
uint64_t stride_set = pointers_and_strides[i];
648
int32_t line_stride = stride_set;
649
int32_t surface_stride = stride_set >> 32;
650
pandecode_log("(mali_ptr) %d /* surface stride */ %d /* line stride */, \n",
651
surface_stride, line_stride);
652
} else {
653
char *a = pointer_as_memory_reference(pointers_and_strides[i]);
654
pandecode_log("%s, \n", a);
655
free(a);
656
}
657
}
658
659
pandecode_indent--;
660
pandecode_log("},\n");
661
}
662
663
static void
664
pandecode_texture(mali_ptr u,
665
struct pandecode_mapped_memory *tmem,
666
unsigned job_no, unsigned tex)
667
{
668
struct pandecode_mapped_memory *mapped_mem = pandecode_find_mapped_gpu_mem_containing(u);
669
const uint8_t *cl = pandecode_fetch_gpu_mem(mapped_mem, u, MALI_MIDGARD_TEXTURE_LENGTH);
670
671
pan_unpack(cl, MIDGARD_TEXTURE, temp);
672
DUMP_UNPACKED(MIDGARD_TEXTURE, temp, "Texture:\n")
673
674
pandecode_indent++;
675
unsigned nr_samples = temp.dimension == MALI_TEXTURE_DIMENSION_3D ?
676
1 : temp.sample_count;
677
pandecode_texture_payload(u + MALI_MIDGARD_TEXTURE_LENGTH,
678
temp.dimension, temp.texel_ordering, temp.manual_stride,
679
temp.levels, nr_samples, temp.array_size, mapped_mem);
680
pandecode_indent--;
681
}
682
683
static void
684
pandecode_bifrost_texture(
685
const void *cl,
686
unsigned job_no,
687
unsigned tex)
688
{
689
pan_unpack(cl, BIFROST_TEXTURE, temp);
690
DUMP_UNPACKED(BIFROST_TEXTURE, temp, "Texture:\n")
691
692
struct pandecode_mapped_memory *tmem = pandecode_find_mapped_gpu_mem_containing(temp.surfaces);
693
unsigned nr_samples = temp.dimension == MALI_TEXTURE_DIMENSION_3D ?
694
1 : temp.sample_count;
695
pandecode_indent++;
696
pandecode_texture_payload(temp.surfaces, temp.dimension, temp.texel_ordering,
697
true, temp.levels, nr_samples, temp.array_size, tmem);
698
pandecode_indent--;
699
}
700
701
static void
702
pandecode_blend_shader_disassemble(mali_ptr shader, int job_no, int job_type,
703
bool is_bifrost, unsigned gpu_id)
704
{
705
struct midgard_disasm_stats stats =
706
pandecode_shader_disassemble(shader, job_no, job_type, is_bifrost, gpu_id);
707
708
bool has_texture = (stats.texture_count > 0);
709
bool has_sampler = (stats.sampler_count > 0);
710
bool has_attribute = (stats.attribute_count > 0);
711
bool has_varying = (stats.varying_count > 0);
712
bool has_uniform = (stats.uniform_count > 0);
713
bool has_ubo = (stats.uniform_buffer_count > 0);
714
715
if (has_texture || has_sampler)
716
pandecode_msg("XXX: blend shader accessing textures\n");
717
718
if (has_attribute || has_varying)
719
pandecode_msg("XXX: blend shader accessing interstage\n");
720
721
if (has_uniform || has_ubo)
722
pandecode_msg("XXX: blend shader accessing uniforms\n");
723
}
724
725
static void
726
pandecode_textures(mali_ptr textures, unsigned texture_count, int job_no, bool is_bifrost)
727
{
728
struct pandecode_mapped_memory *mmem = pandecode_find_mapped_gpu_mem_containing(textures);
729
730
if (!mmem)
731
return;
732
733
pandecode_log("Textures %"PRIx64"_%d:\n", textures, job_no);
734
pandecode_indent++;
735
736
if (is_bifrost) {
737
const void *cl = pandecode_fetch_gpu_mem(mmem,
738
textures, MALI_BIFROST_TEXTURE_LENGTH *
739
texture_count);
740
741
for (unsigned tex = 0; tex < texture_count; ++tex) {
742
pandecode_bifrost_texture(cl +
743
MALI_BIFROST_TEXTURE_LENGTH * tex,
744
job_no, tex);
745
}
746
} else {
747
mali_ptr *PANDECODE_PTR_VAR(u, mmem, textures);
748
749
for (int tex = 0; tex < texture_count; ++tex) {
750
mali_ptr *PANDECODE_PTR_VAR(u, mmem, textures + tex * sizeof(mali_ptr));
751
char *a = pointer_as_memory_reference(*u);
752
pandecode_log("%s,\n", a);
753
free(a);
754
}
755
756
/* Now, finally, descend down into the texture descriptor */
757
for (unsigned tex = 0; tex < texture_count; ++tex) {
758
mali_ptr *PANDECODE_PTR_VAR(u, mmem, textures + tex * sizeof(mali_ptr));
759
struct pandecode_mapped_memory *tmem = pandecode_find_mapped_gpu_mem_containing(*u);
760
if (tmem)
761
pandecode_texture(*u, tmem, job_no, tex);
762
}
763
}
764
pandecode_indent--;
765
pandecode_log("\n");
766
}
767
768
static void
769
pandecode_samplers(mali_ptr samplers, unsigned sampler_count, int job_no, bool is_bifrost)
770
{
771
pandecode_log("Samplers %"PRIx64"_%d:\n", samplers, job_no);
772
pandecode_indent++;
773
774
for (int i = 0; i < sampler_count; ++i) {
775
if (is_bifrost) {
776
DUMP_ADDR(BIFROST_SAMPLER, samplers + (MALI_BIFROST_SAMPLER_LENGTH * i), "Sampler %d:\n", i);
777
} else {
778
DUMP_ADDR(MIDGARD_SAMPLER, samplers + (MALI_MIDGARD_SAMPLER_LENGTH * i), "Sampler %d:\n", i);
779
}
780
}
781
782
pandecode_indent--;
783
pandecode_log("\n");
784
}
785
786
static void
787
pandecode_dcd(const struct MALI_DRAW *p,
788
int job_no, enum mali_job_type job_type,
789
char *suffix, bool is_bifrost, unsigned gpu_id)
790
{
791
struct pandecode_mapped_memory *attr_mem;
792
793
bool idvs = (job_type == MALI_JOB_TYPE_INDEXED_VERTEX);
794
795
struct pandecode_fbd fbd_info = {
796
/* Default for Bifrost */
797
.rt_count = 1
798
};
799
800
if ((job_type != MALI_JOB_TYPE_TILER) || is_bifrost)
801
pandecode_local_storage(p->fbd & ~1, job_no);
802
else if (p->fbd & MALI_FBD_TAG_IS_MFBD)
803
fbd_info = pandecode_mfbd_bfr((u64) ((uintptr_t) p->fbd) & ~MALI_FBD_TAG_MASK,
804
job_no, false, false, gpu_id);
805
else
806
fbd_info = pandecode_sfbd((u64) (uintptr_t) p->fbd, job_no, false, gpu_id);
807
808
int varying_count = 0, attribute_count = 0, uniform_count = 0, uniform_buffer_count = 0;
809
int texture_count = 0, sampler_count = 0;
810
811
if (p->state) {
812
struct pandecode_mapped_memory *smem = pandecode_find_mapped_gpu_mem_containing(p->state);
813
uint32_t *cl = pandecode_fetch_gpu_mem(smem, p->state, MALI_RENDERER_STATE_LENGTH);
814
815
pan_unpack(cl, RENDERER_STATE, state);
816
817
if (state.shader.shader & ~0xF)
818
pandecode_shader_disassemble(state.shader.shader & ~0xF, job_no, job_type, is_bifrost, gpu_id);
819
820
if (idvs && state.secondary_shader)
821
pandecode_shader_disassemble(state.secondary_shader, job_no, job_type, is_bifrost, gpu_id);
822
DUMP_UNPACKED(RENDERER_STATE, state, "State:\n");
823
pandecode_indent++;
824
825
/* Save for dumps */
826
attribute_count = state.shader.attribute_count;
827
varying_count = state.shader.varying_count;
828
texture_count = state.shader.texture_count;
829
sampler_count = state.shader.sampler_count;
830
uniform_buffer_count = state.properties.uniform_buffer_count;
831
832
if (is_bifrost)
833
uniform_count = state.preload.uniform_count;
834
else
835
uniform_count = state.properties.midgard.uniform_count;
836
837
if (is_bifrost)
838
DUMP_UNPACKED(PRELOAD, state.preload, "Preload:\n");
839
840
if (!is_bifrost) {
841
mali_ptr shader = state.sfbd_blend_shader & ~0xF;
842
if (state.multisample_misc.sfbd_blend_shader && shader)
843
pandecode_blend_shader_disassemble(shader, job_no, job_type, false, gpu_id);
844
}
845
pandecode_indent--;
846
pandecode_log("\n");
847
848
/* MRT blend fields are used whenever MFBD is used, with
849
* per-RT descriptors */
850
851
if ((job_type == MALI_JOB_TYPE_TILER || job_type == MALI_JOB_TYPE_FRAGMENT) &&
852
(is_bifrost || p->fbd & MALI_FBD_TAG_IS_MFBD)) {
853
void* blend_base = ((void *) cl) + MALI_RENDERER_STATE_LENGTH;
854
855
for (unsigned i = 0; i < fbd_info.rt_count; i++) {
856
mali_ptr shader = 0;
857
858
if (is_bifrost)
859
shader = pandecode_bifrost_blend(blend_base, job_no, i,
860
state.shader.shader);
861
else
862
shader = pandecode_midgard_blend_mrt(blend_base, job_no, i);
863
864
if (shader & ~0xF)
865
pandecode_blend_shader_disassemble(shader, job_no, job_type,
866
is_bifrost, gpu_id);
867
}
868
}
869
} else
870
pandecode_msg("XXX: missing shader descriptor\n");
871
872
if (p->viewport) {
873
DUMP_ADDR(VIEWPORT, p->viewport, "Viewport:\n");
874
pandecode_log("\n");
875
}
876
877
unsigned max_attr_index = 0;
878
879
if (p->attributes)
880
max_attr_index = pandecode_attribute_meta(attribute_count, p->attributes, false);
881
882
if (p->attribute_buffers) {
883
attr_mem = pandecode_find_mapped_gpu_mem_containing(p->attribute_buffers);
884
pandecode_attributes(attr_mem, p->attribute_buffers, job_no, suffix, max_attr_index, false, job_type);
885
}
886
887
if (p->varyings) {
888
varying_count = pandecode_attribute_meta(varying_count, p->varyings, true);
889
}
890
891
if (p->varying_buffers) {
892
attr_mem = pandecode_find_mapped_gpu_mem_containing(p->varying_buffers);
893
pandecode_attributes(attr_mem, p->varying_buffers, job_no, suffix, varying_count, true, job_type);
894
}
895
896
if (p->uniform_buffers) {
897
if (uniform_buffer_count)
898
pandecode_uniform_buffers(p->uniform_buffers, uniform_buffer_count, job_no);
899
else
900
pandecode_msg("warn: UBOs specified but not referenced\n");
901
} else if (uniform_buffer_count)
902
pandecode_msg("XXX: UBOs referenced but not specified\n");
903
904
/* We don't want to actually dump uniforms, but we do need to validate
905
* that the counts we were given are sane */
906
907
if (p->push_uniforms) {
908
if (uniform_count)
909
pandecode_uniforms(p->push_uniforms, uniform_count);
910
else
911
pandecode_msg("warn: Uniforms specified but not referenced\n");
912
} else if (uniform_count)
913
pandecode_msg("XXX: Uniforms referenced but not specified\n");
914
915
if (p->textures)
916
pandecode_textures(p->textures, texture_count, job_no, is_bifrost);
917
918
if (p->samplers)
919
pandecode_samplers(p->samplers, sampler_count, job_no, is_bifrost);
920
}
921
922
static void
923
pandecode_bifrost_tiler_heap(mali_ptr gpu_va, int job_no)
924
{
925
struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(gpu_va);
926
pan_unpack(PANDECODE_PTR(mem, gpu_va, void), BIFROST_TILER_HEAP, h);
927
DUMP_UNPACKED(BIFROST_TILER_HEAP, h, "Bifrost Tiler Heap:\n");
928
}
929
930
static void
931
pandecode_bifrost_tiler(mali_ptr gpu_va, int job_no)
932
{
933
struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(gpu_va);
934
pan_unpack(PANDECODE_PTR(mem, gpu_va, void), BIFROST_TILER, t);
935
936
pandecode_bifrost_tiler_heap(t.heap, job_no);
937
938
DUMP_UNPACKED(BIFROST_TILER, t, "Bifrost Tiler:\n");
939
pandecode_indent++;
940
if (t.hierarchy_mask != 0xa &&
941
t.hierarchy_mask != 0x14 &&
942
t.hierarchy_mask != 0x28 &&
943
t.hierarchy_mask != 0x50 &&
944
t.hierarchy_mask != 0xa0)
945
pandecode_msg("XXX: Unexpected hierarchy_mask (not 0xa, 0x14, 0x28, 0x50 or 0xa0)!");
946
947
pandecode_indent--;
948
}
949
950
static void
951
pandecode_primitive_size(const void *s, bool constant)
952
{
953
pan_unpack(s, PRIMITIVE_SIZE, ps);
954
if (ps.size_array == 0x0)
955
return;
956
957
DUMP_UNPACKED(PRIMITIVE_SIZE, ps, "Primitive Size:\n")
958
}
959
960
static void
961
pandecode_vertex_compute_geometry_job(const struct MALI_JOB_HEADER *h,
962
const struct pandecode_mapped_memory *mem,
963
mali_ptr job, int job_no, bool is_bifrost,
964
unsigned gpu_id)
965
{
966
struct mali_compute_job_packed *PANDECODE_PTR_VAR(p, mem, job);
967
pan_section_unpack(p, COMPUTE_JOB, DRAW, draw);
968
pandecode_dcd(&draw, job_no, h->type, "", is_bifrost, gpu_id);
969
970
pandecode_log("Vertex Job Payload:\n");
971
pandecode_indent++;
972
pandecode_invocation(pan_section_ptr(p, COMPUTE_JOB, INVOCATION));
973
DUMP_SECTION(COMPUTE_JOB, PARAMETERS, p, "Vertex Job Parameters:\n");
974
DUMP_UNPACKED(DRAW, draw, "Draw:\n");
975
pandecode_indent--;
976
pandecode_log("\n");
977
}
978
979
static void
980
pandecode_indexed_vertex_job(const struct MALI_JOB_HEADER *h,
981
const struct pandecode_mapped_memory *mem,
982
mali_ptr job, int job_no, bool is_bifrost,
983
unsigned gpu_id)
984
{
985
struct mali_bifrost_indexed_vertex_job_packed *PANDECODE_PTR_VAR(p, mem, job);
986
987
pandecode_log("Vertex:\n");
988
pan_section_unpack(p, BIFROST_INDEXED_VERTEX_JOB, VERTEX_DRAW, vert_draw);
989
pandecode_dcd(&vert_draw, job_no, h->type, "", is_bifrost, gpu_id);
990
DUMP_UNPACKED(DRAW, vert_draw, "Vertex Draw:\n");
991
992
pandecode_log("Fragment:\n");
993
pan_section_unpack(p, BIFROST_INDEXED_VERTEX_JOB, FRAGMENT_DRAW, frag_draw);
994
pandecode_dcd(&frag_draw, job_no, MALI_JOB_TYPE_FRAGMENT, "", is_bifrost, gpu_id);
995
DUMP_UNPACKED(DRAW, frag_draw, "Fragment Draw:\n");
996
997
pan_section_unpack(p, BIFROST_INDEXED_VERTEX_JOB, TILER, tiler_ptr);
998
pandecode_log("Tiler Job Payload:\n");
999
pandecode_indent++;
1000
pandecode_bifrost_tiler(tiler_ptr.address, job_no);
1001
pandecode_indent--;
1002
1003
pandecode_invocation(pan_section_ptr(p, BIFROST_INDEXED_VERTEX_JOB, INVOCATION));
1004
pandecode_primitive(pan_section_ptr(p, BIFROST_INDEXED_VERTEX_JOB, PRIMITIVE));
1005
1006
/* TODO: gl_PointSize on Bifrost */
1007
pandecode_primitive_size(pan_section_ptr(p, BIFROST_INDEXED_VERTEX_JOB, PRIMITIVE_SIZE), true);
1008
1009
pan_section_unpack(p, BIFROST_INDEXED_VERTEX_JOB, PADDING, padding);
1010
pan_section_unpack(p, BIFROST_INDEXED_VERTEX_JOB, FRAGMENT_DRAW_PADDING, f_padding);
1011
pan_section_unpack(p, BIFROST_INDEXED_VERTEX_JOB, VERTEX_DRAW_PADDING, v_padding);
1012
}
1013
1014
static void
1015
pandecode_tiler_job_bfr(const struct MALI_JOB_HEADER *h,
1016
const struct pandecode_mapped_memory *mem,
1017
mali_ptr job, int job_no, unsigned gpu_id)
1018
{
1019
struct mali_bifrost_tiler_job_packed *PANDECODE_PTR_VAR(p, mem, job);
1020
pan_section_unpack(p, BIFROST_TILER_JOB, DRAW, draw);
1021
pan_section_unpack(p, BIFROST_TILER_JOB, TILER, tiler_ptr);
1022
pandecode_dcd(&draw, job_no, h->type, "", true, gpu_id);
1023
1024
pandecode_log("Tiler Job Payload:\n");
1025
pandecode_indent++;
1026
pandecode_bifrost_tiler(tiler_ptr.address, job_no);
1027
1028
pandecode_invocation(pan_section_ptr(p, BIFROST_TILER_JOB, INVOCATION));
1029
pandecode_primitive(pan_section_ptr(p, BIFROST_TILER_JOB, PRIMITIVE));
1030
1031
/* TODO: gl_PointSize on Bifrost */
1032
pandecode_primitive_size(pan_section_ptr(p, BIFROST_TILER_JOB, PRIMITIVE_SIZE), true);
1033
pan_section_unpack(p, BIFROST_TILER_JOB, PADDING, padding);
1034
DUMP_UNPACKED(DRAW, draw, "Draw:\n");
1035
pandecode_indent--;
1036
pandecode_log("\n");
1037
}
1038
1039
static void
1040
pandecode_tiler_job_mdg(const struct MALI_JOB_HEADER *h,
1041
const struct pandecode_mapped_memory *mem,
1042
mali_ptr job, int job_no, unsigned gpu_id)
1043
{
1044
struct mali_midgard_tiler_job_packed *PANDECODE_PTR_VAR(p, mem, job);
1045
pan_section_unpack(p, MIDGARD_TILER_JOB, DRAW, draw);
1046
pandecode_dcd(&draw, job_no, h->type, "", false, gpu_id);
1047
1048
pandecode_log("Tiler Job Payload:\n");
1049
pandecode_indent++;
1050
pandecode_invocation(pan_section_ptr(p, MIDGARD_TILER_JOB, INVOCATION));
1051
pandecode_primitive(pan_section_ptr(p, MIDGARD_TILER_JOB, PRIMITIVE));
1052
DUMP_UNPACKED(DRAW, draw, "Draw:\n");
1053
1054
pan_section_unpack(p, MIDGARD_TILER_JOB, PRIMITIVE, primitive);
1055
pandecode_primitive_size(pan_section_ptr(p, MIDGARD_TILER_JOB, PRIMITIVE_SIZE),
1056
primitive.point_size_array_format == MALI_POINT_SIZE_ARRAY_FORMAT_NONE);
1057
pandecode_indent--;
1058
pandecode_log("\n");
1059
}
1060
1061
static void
1062
pandecode_fragment_job(const struct pandecode_mapped_memory *mem,
1063
mali_ptr job, int job_no,
1064
bool is_bifrost, unsigned gpu_id)
1065
{
1066
struct mali_fragment_job_packed *PANDECODE_PTR_VAR(p, mem, job);
1067
pan_section_unpack(p, FRAGMENT_JOB, PAYLOAD, s);
1068
1069
bool is_mfbd = s.framebuffer & MALI_FBD_TAG_IS_MFBD;
1070
1071
if (!is_mfbd && is_bifrost)
1072
pandecode_msg("XXX: Bifrost fragment must use MFBD\n");
1073
1074
struct pandecode_fbd info;
1075
1076
if (is_mfbd)
1077
info = pandecode_mfbd_bfr(s.framebuffer & ~MALI_FBD_TAG_MASK, job_no,
1078
true, is_bifrost, gpu_id);
1079
else
1080
info = pandecode_sfbd(s.framebuffer & ~MALI_FBD_TAG_MASK, job_no,
1081
true, gpu_id);
1082
1083
/* Compute the tag for the tagged pointer. This contains the type of
1084
* FBD (MFBD/SFBD), and in the case of an MFBD, information about which
1085
* additional structures follow the MFBD header (an extra payload or
1086
* not, as well as a count of render targets) */
1087
1088
unsigned expected_tag = is_mfbd ? MALI_FBD_TAG_IS_MFBD : 0;
1089
1090
if (is_mfbd) {
1091
if (info.has_extra)
1092
expected_tag |= MALI_FBD_TAG_HAS_ZS_RT;
1093
1094
expected_tag |= (MALI_POSITIVE(info.rt_count) << 2);
1095
}
1096
1097
DUMP_UNPACKED(FRAGMENT_JOB_PAYLOAD, s, "Fragment Job Payload:\n");
1098
1099
/* The FBD is a tagged pointer */
1100
1101
unsigned tag = (s.framebuffer & MALI_FBD_TAG_MASK);
1102
1103
if (tag != expected_tag)
1104
pandecode_msg("XXX: expected FBD tag %X but got %X\n", expected_tag, tag);
1105
1106
pandecode_log("\n");
1107
}
1108
1109
static void
1110
pandecode_write_value_job(const struct pandecode_mapped_memory *mem,
1111
mali_ptr job, int job_no)
1112
{
1113
struct mali_write_value_job_packed *PANDECODE_PTR_VAR(p, mem, job);
1114
pan_section_unpack(p, WRITE_VALUE_JOB, PAYLOAD, u);
1115
DUMP_SECTION(WRITE_VALUE_JOB, PAYLOAD, p, "Write Value Payload:\n");
1116
pandecode_log("\n");
1117
}
1118
1119
static void
1120
pandecode_cache_flush_job(const struct pandecode_mapped_memory *mem,
1121
mali_ptr job, int job_no)
1122
{
1123
struct mali_cache_flush_job_packed *PANDECODE_PTR_VAR(p, mem, job);
1124
pan_section_unpack(p, CACHE_FLUSH_JOB, PAYLOAD, u);
1125
DUMP_SECTION(CACHE_FLUSH_JOB, PAYLOAD, p, "Cache Flush Payload:\n");
1126
pandecode_log("\n");
1127
}
1128
1129
/* Entrypoint to start tracing. jc_gpu_va is the GPU address for the first job
1130
* in the chain; later jobs are found by walking the chain. Bifrost is, well,
1131
* if it's bifrost or not. GPU ID is the more finegrained ID (at some point, we
1132
* might wish to combine this with the bifrost parameter) because some details
1133
* are model-specific even within a particular architecture. */
1134
1135
void
1136
pandecode_jc(mali_ptr jc_gpu_va, bool bifrost, unsigned gpu_id)
1137
{
1138
pandecode_dump_file_open();
1139
1140
unsigned job_descriptor_number = 0;
1141
mali_ptr next_job = 0;
1142
1143
do {
1144
struct pandecode_mapped_memory *mem =
1145
pandecode_find_mapped_gpu_mem_containing(jc_gpu_va);
1146
1147
pan_unpack(PANDECODE_PTR(mem, jc_gpu_va, struct mali_job_header_packed),
1148
JOB_HEADER, h);
1149
next_job = h.next;
1150
1151
int job_no = job_descriptor_number++;
1152
1153
DUMP_UNPACKED(JOB_HEADER, h, "Job Header:\n");
1154
pandecode_log("\n");
1155
1156
switch (h.type) {
1157
case MALI_JOB_TYPE_WRITE_VALUE:
1158
pandecode_write_value_job(mem, jc_gpu_va, job_no);
1159
break;
1160
1161
case MALI_JOB_TYPE_CACHE_FLUSH:
1162
pandecode_cache_flush_job(mem, jc_gpu_va, job_no);
1163
break;
1164
1165
case MALI_JOB_TYPE_TILER:
1166
if (bifrost)
1167
pandecode_tiler_job_bfr(&h, mem, jc_gpu_va, job_no, gpu_id);
1168
else
1169
pandecode_tiler_job_mdg(&h, mem, jc_gpu_va, job_no, gpu_id);
1170
break;
1171
1172
case MALI_JOB_TYPE_VERTEX:
1173
case MALI_JOB_TYPE_COMPUTE:
1174
pandecode_vertex_compute_geometry_job(&h, mem, jc_gpu_va, job_no,
1175
bifrost, gpu_id);
1176
break;
1177
1178
case MALI_JOB_TYPE_INDEXED_VERTEX:
1179
pandecode_indexed_vertex_job(&h, mem, jc_gpu_va, job_no,
1180
bifrost, gpu_id);
1181
break;
1182
1183
case MALI_JOB_TYPE_FRAGMENT:
1184
pandecode_fragment_job(mem, jc_gpu_va, job_no, bifrost, gpu_id);
1185
break;
1186
1187
default:
1188
break;
1189
}
1190
} while ((jc_gpu_va = next_job));
1191
1192
fflush(pandecode_dump_stream);
1193
pandecode_map_read_write();
1194
}
1195
1196
void
1197
pandecode_abort_on_fault(mali_ptr jc_gpu_va)
1198
{
1199
mali_ptr next_job = 0;
1200
1201
do {
1202
struct pandecode_mapped_memory *mem =
1203
pandecode_find_mapped_gpu_mem_containing(jc_gpu_va);
1204
1205
pan_unpack(PANDECODE_PTR(mem, jc_gpu_va, struct mali_job_header_packed),
1206
JOB_HEADER, h);
1207
next_job = h.next;
1208
1209
/* Ensure the job is marked COMPLETE */
1210
if (h.exception_status != 0x1) {
1211
fprintf(stderr, "Incomplete job or timeout");
1212
abort();
1213
}
1214
} while ((jc_gpu_va = next_job));
1215
1216
pandecode_map_read_write();
1217
}
1218
1219