Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/panfrost/midgard/midgard.h
4564 views
1
/* Author(s):
2
* Connor Abbott
3
* Alyssa Rosenzweig
4
*
5
* Copyright (c) 2013 Connor Abbott ([email protected])
6
* Copyright (c) 2018 Alyssa Rosenzweig ([email protected])
7
* Copyright (C) 2019-2020 Collabora, Ltd.
8
*
9
* Permission is hereby granted, free of charge, to any person obtaining a copy
10
* of this software and associated documentation files (the "Software"), to deal
11
* in the Software without restriction, including without limitation the rights
12
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13
* copies of the Software, and to permit persons to whom the Software is
14
* furnished to do so, subject to the following conditions:
15
*
16
* The above copyright notice and this permission notice shall be included in
17
* all copies or substantial portions of the Software.
18
*
19
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25
* THE SOFTWARE.
26
*/
27
28
#ifndef __midgard_h__
29
#define __midgard_h__
30
31
#include <stdint.h>
32
#include <stdbool.h>
33
34
#define MIDGARD_DBG_MSGS 0x0001
35
#define MIDGARD_DBG_SHADERS 0x0002
36
#define MIDGARD_DBG_SHADERDB 0x0004
37
#define MIDGARD_DBG_INORDER 0x0008
38
#define MIDGARD_DBG_VERBOSE 0x0010
39
#define MIDGARD_DBG_INTERNAL 0x0020
40
41
extern int midgard_debug;
42
43
typedef enum {
44
midgard_word_type_alu,
45
midgard_word_type_load_store,
46
midgard_word_type_texture,
47
midgard_word_type_unknown
48
} midgard_word_type;
49
50
typedef enum {
51
midgard_alu_vmul,
52
midgard_alu_sadd,
53
midgard_alu_smul,
54
midgard_alu_vadd,
55
midgard_alu_lut
56
} midgard_alu;
57
58
enum {
59
TAG_INVALID = 0x0,
60
TAG_BREAK = 0x1,
61
TAG_TEXTURE_4_VTX = 0x2,
62
TAG_TEXTURE_4 = 0x3,
63
TAG_TEXTURE_4_BARRIER = 0x4,
64
TAG_LOAD_STORE_4 = 0x5,
65
TAG_UNKNOWN_1 = 0x6,
66
TAG_UNKNOWN_2 = 0x7,
67
TAG_ALU_4 = 0x8,
68
TAG_ALU_8 = 0x9,
69
TAG_ALU_12 = 0xA,
70
TAG_ALU_16 = 0xB,
71
TAG_ALU_4_WRITEOUT = 0xC,
72
TAG_ALU_8_WRITEOUT = 0xD,
73
TAG_ALU_12_WRITEOUT = 0xE,
74
TAG_ALU_16_WRITEOUT = 0xF
75
};
76
77
/*
78
* ALU words
79
*/
80
81
typedef enum {
82
midgard_alu_op_fadd = 0x10, /* round to even */
83
midgard_alu_op_fadd_rtz = 0x11,
84
midgard_alu_op_fadd_rtn = 0x12,
85
midgard_alu_op_fadd_rtp = 0x13,
86
midgard_alu_op_fmul = 0x14, /* round to even */
87
midgard_alu_op_fmul_rtz = 0x15,
88
midgard_alu_op_fmul_rtn = 0x16,
89
midgard_alu_op_fmul_rtp = 0x17,
90
91
midgard_alu_op_fmin = 0x28, /* if an operand is NaN, propagate the other */
92
midgard_alu_op_fmin_nan = 0x29, /* if an operand is NaN, propagate it */
93
midgard_alu_op_fabsmin = 0x2A, /* min(abs(a,b)) */
94
midgard_alu_op_fabsmin_nan = 0x2B, /* min_nan(abs(a,b)) */
95
midgard_alu_op_fmax = 0x2C, /* if an operand is NaN, propagate the other */
96
midgard_alu_op_fmax_nan = 0x2D, /* if an operand is NaN, propagate it */
97
midgard_alu_op_fabsmax = 0x2E, /* max(abs(a,b)) */
98
midgard_alu_op_fabsmax_nan = 0x2F, /* max_nan(abs(a,b)) */
99
100
midgard_alu_op_fmov = 0x30, /* fmov_rte */
101
midgard_alu_op_fmov_rtz = 0x31,
102
midgard_alu_op_fmov_rtn = 0x32,
103
midgard_alu_op_fmov_rtp = 0x33,
104
midgard_alu_op_froundeven = 0x34,
105
midgard_alu_op_ftrunc = 0x35,
106
midgard_alu_op_ffloor = 0x36,
107
midgard_alu_op_fceil = 0x37,
108
midgard_alu_op_ffma = 0x38, /* rte */
109
midgard_alu_op_ffma_rtz = 0x39,
110
midgard_alu_op_ffma_rtn = 0x3A,
111
midgard_alu_op_ffma_rtp = 0x3B,
112
midgard_alu_op_fdot3 = 0x3C,
113
midgard_alu_op_fdot3r = 0x3D,
114
midgard_alu_op_fdot4 = 0x3E,
115
midgard_alu_op_freduce = 0x3F,
116
117
midgard_alu_op_iadd = 0x40,
118
midgard_alu_op_ishladd = 0x41, /* (a<<1) + b */
119
midgard_alu_op_isub = 0x46,
120
midgard_alu_op_ishlsub = 0x47, /* (a<<1) - b */
121
midgard_alu_op_iaddsat = 0x48,
122
midgard_alu_op_uaddsat = 0x49,
123
midgard_alu_op_isubsat = 0x4E,
124
midgard_alu_op_usubsat = 0x4F,
125
126
midgard_alu_op_imul = 0x58,
127
/* Multiplies two ints and stores the result in the next larger datasize. */
128
midgard_alu_op_iwmul = 0x59, /* sint * sint = sint */
129
midgard_alu_op_uwmul = 0x5A, /* uint * uint = uint */
130
midgard_alu_op_iuwmul = 0x5B, /* sint * uint = sint */
131
132
midgard_alu_op_imin = 0x60,
133
midgard_alu_op_umin = 0x61,
134
midgard_alu_op_imax = 0x62,
135
midgard_alu_op_umax = 0x63,
136
midgard_alu_op_iavg = 0x64,
137
midgard_alu_op_uavg = 0x65,
138
midgard_alu_op_iravg = 0x66,
139
midgard_alu_op_uravg = 0x67,
140
midgard_alu_op_iasr = 0x68,
141
midgard_alu_op_ilsr = 0x69,
142
midgard_alu_op_ishlsat = 0x6C,
143
midgard_alu_op_ushlsat = 0x6D,
144
midgard_alu_op_ishl = 0x6E,
145
146
midgard_alu_op_iand = 0x70,
147
midgard_alu_op_ior = 0x71,
148
midgard_alu_op_inand = 0x72, /* ~(a & b), for inot let a = b */
149
midgard_alu_op_inor = 0x73, /* ~(a | b) */
150
midgard_alu_op_iandnot = 0x74, /* (a & ~b), used for not/b2f */
151
midgard_alu_op_iornot = 0x75, /* (a | ~b) */
152
midgard_alu_op_ixor = 0x76,
153
midgard_alu_op_inxor = 0x77, /* ~(a ^ b) */
154
midgard_alu_op_iclz = 0x78, /* Number of zeroes on left */
155
midgard_alu_op_ipopcnt = 0x7A, /* Population count */
156
midgard_alu_op_imov = 0x7B,
157
midgard_alu_op_iabsdiff = 0x7C,
158
midgard_alu_op_uabsdiff = 0x7D,
159
midgard_alu_op_ichoose = 0x7E, /* vector, component number - dupe for shuffle() */
160
161
midgard_alu_op_feq = 0x80,
162
midgard_alu_op_fne = 0x81,
163
midgard_alu_op_flt = 0x82,
164
midgard_alu_op_fle = 0x83,
165
midgard_alu_op_fball_eq = 0x88,
166
midgard_alu_op_fball_neq = 0x89,
167
midgard_alu_op_fball_lt = 0x8A, /* all(lessThan(.., ..)) */
168
midgard_alu_op_fball_lte = 0x8B, /* all(lessThanEqual(.., ..)) */
169
170
midgard_alu_op_fbany_eq = 0x90,
171
midgard_alu_op_fbany_neq = 0x91,
172
midgard_alu_op_fbany_lt = 0x92, /* any(lessThan(.., ..)) */
173
midgard_alu_op_fbany_lte = 0x93, /* any(lessThanEqual(.., ..)) */
174
175
midgard_alu_op_f2i_rte = 0x98,
176
midgard_alu_op_f2i_rtz = 0x99,
177
midgard_alu_op_f2i_rtn = 0x9A,
178
midgard_alu_op_f2i_rtp = 0x9B,
179
midgard_alu_op_f2u_rte = 0x9C,
180
midgard_alu_op_f2u_rtz = 0x9D,
181
midgard_alu_op_f2u_rtn = 0x9E,
182
midgard_alu_op_f2u_rtp = 0x9F,
183
184
midgard_alu_op_ieq = 0xA0,
185
midgard_alu_op_ine = 0xA1,
186
midgard_alu_op_ult = 0xA2,
187
midgard_alu_op_ule = 0xA3,
188
midgard_alu_op_ilt = 0xA4,
189
midgard_alu_op_ile = 0xA5,
190
midgard_alu_op_iball_eq = 0xA8,
191
midgard_alu_op_iball_neq = 0xA9,
192
midgard_alu_op_uball_lt = 0xAA,
193
midgard_alu_op_uball_lte = 0xAB,
194
midgard_alu_op_iball_lt = 0xAC,
195
midgard_alu_op_iball_lte = 0xAD,
196
197
midgard_alu_op_ibany_eq = 0xB0,
198
midgard_alu_op_ibany_neq = 0xB1,
199
midgard_alu_op_ubany_lt = 0xB2,
200
midgard_alu_op_ubany_lte = 0xB3,
201
midgard_alu_op_ibany_lt = 0xB4, /* any(lessThan(.., ..)) */
202
midgard_alu_op_ibany_lte = 0xB5, /* any(lessThanEqual(.., ..)) */
203
midgard_alu_op_i2f_rte = 0xB8,
204
midgard_alu_op_i2f_rtz = 0xB9,
205
midgard_alu_op_i2f_rtn = 0xBA,
206
midgard_alu_op_i2f_rtp = 0xBB,
207
midgard_alu_op_u2f_rte = 0xBC,
208
midgard_alu_op_u2f_rtz = 0xBD,
209
midgard_alu_op_u2f_rtn = 0xBE,
210
midgard_alu_op_u2f_rtp = 0xBF,
211
212
/* All csel* instructions use as a condition the output of the previous
213
* vector or scalar unit, thus it must run on the second pipeline stage
214
* and be scheduled to the same bundle as the opcode that it uses as a
215
* condition. */
216
midgard_alu_op_icsel_v = 0xC0,
217
midgard_alu_op_icsel = 0xC1,
218
midgard_alu_op_fcsel_v = 0xC4,
219
midgard_alu_op_fcsel = 0xC5,
220
midgard_alu_op_froundaway = 0xC6, /* round to nearest away */
221
222
midgard_alu_op_fatan2_pt2 = 0xE8,
223
midgard_alu_op_fpow_pt1 = 0xEC,
224
midgard_alu_op_fpown_pt1 = 0xED,
225
midgard_alu_op_fpowr_pt1 = 0xEE,
226
227
midgard_alu_op_frcp = 0xF0,
228
midgard_alu_op_frsqrt = 0xF2,
229
midgard_alu_op_fsqrt = 0xF3,
230
midgard_alu_op_fexp2 = 0xF4,
231
midgard_alu_op_flog2 = 0xF5,
232
midgard_alu_op_fsinpi = 0xF6, /* sin(pi * x) */
233
midgard_alu_op_fcospi = 0xF7, /* cos(pi * x) */
234
midgard_alu_op_fatan2_pt1 = 0xF9,
235
} midgard_alu_op;
236
237
typedef enum {
238
midgard_outmod_none = 0,
239
midgard_outmod_clamp_0_inf = 1, /* max(x, 0.0), NaNs become +0.0 */
240
midgard_outmod_clamp_m1_1 = 2, /* clamp(x, -1.0, 1.0), NaNs become -1.0 */
241
midgard_outmod_clamp_0_1 = 3 /* clamp(x, 0.0, 1.0), NaNs become +0.0 */
242
} midgard_outmod_float;
243
244
/* These are applied to the resulting value that's going to be stored in the dest reg.
245
* This should be set to midgard_outmod_keeplo when shrink_mode is midgard_shrink_mode_none. */
246
typedef enum {
247
midgard_outmod_ssat = 0,
248
midgard_outmod_usat = 1,
249
midgard_outmod_keeplo = 2, /* Keep low half */
250
midgard_outmod_keephi = 3, /* Keep high half */
251
} midgard_outmod_int;
252
253
typedef enum {
254
midgard_reg_mode_8 = 0,
255
midgard_reg_mode_16 = 1,
256
midgard_reg_mode_32 = 2,
257
midgard_reg_mode_64 = 3
258
} midgard_reg_mode;
259
260
typedef enum {
261
midgard_shrink_mode_lower = 0,
262
midgard_shrink_mode_upper = 1,
263
midgard_shrink_mode_none = 2
264
} midgard_shrink_mode;
265
266
/* Only used if midgard_src_expand_mode is set to one of midgard_src_expand_*. */
267
typedef enum {
268
midgard_int_sign_extend = 0,
269
midgard_int_zero_extend = 1,
270
midgard_int_replicate = 2,
271
midgard_int_left_shift = 3
272
} midgard_int_mod;
273
274
/* Unlike midgard_int_mod, fload modifiers are applied after the expansion happens, so
275
* they don't depend on midgard_src_expand_mode. */
276
#define MIDGARD_FLOAT_MOD_ABS (1 << 0)
277
#define MIDGARD_FLOAT_MOD_NEG (1 << 1)
278
279
/* The expand options depend on both midgard_int_mod and midgard_reg_mode. For
280
* example, a vec4 with midgard_int_sign_extend and midgard_src_expand_low is
281
* treated as a vec8 and each 16-bit element from the low 64-bits is then sign
282
* extended, resulting in a vec4 where each 32-bit element corresponds to a
283
* 16-bit element from the low 64-bits of the input vector. */
284
typedef enum {
285
midgard_src_passthrough = 0,
286
midgard_src_rep_low = 1, /* replicate lower 64 bits to higher 64 bits */
287
midgard_src_rep_high = 2, /* replicate higher 64 bits to lower 64 bits */
288
midgard_src_swap = 3, /* swap lower 64 bits with higher 64 bits */
289
midgard_src_expand_low = 4, /* expand low 64 bits */
290
midgard_src_expand_high = 5, /* expand high 64 bits */
291
midgard_src_expand_low_swap = 6, /* expand low 64 bits, then swap */
292
midgard_src_expand_high_swap = 7, /* expand high 64 bits, then swap */
293
} midgard_src_expand_mode;
294
295
#define INPUT_EXPANDS(a) \
296
(a >= midgard_src_expand_low && a <= midgard_src_expand_high_swap)
297
298
#define INPUT_SWAPS(a) \
299
(a == midgard_src_swap || a >= midgard_src_expand_low_swap)
300
301
typedef struct
302
__attribute__((__packed__))
303
{
304
/* Either midgard_int_mod or from midgard_float_mod_*, depending on the
305
* type of op */
306
unsigned mod : 2;
307
midgard_src_expand_mode expand_mode : 3;
308
unsigned swizzle : 8;
309
}
310
midgard_vector_alu_src;
311
312
typedef struct
313
__attribute__((__packed__))
314
{
315
midgard_alu_op op : 8;
316
midgard_reg_mode reg_mode : 2;
317
unsigned src1 : 13;
318
unsigned src2 : 13;
319
midgard_shrink_mode shrink_mode : 2;
320
unsigned outmod : 2;
321
unsigned mask : 8;
322
}
323
midgard_vector_alu;
324
325
typedef struct
326
__attribute__((__packed__))
327
{
328
unsigned mod : 2;
329
bool full : 1; /* 0 = 16-bit, 1 = 32-bit */
330
unsigned component : 3;
331
}
332
midgard_scalar_alu_src;
333
334
typedef struct
335
__attribute__((__packed__))
336
{
337
midgard_alu_op op : 8;
338
unsigned src1 : 6;
339
/* last 5 bits are used when src2 is an immediate */
340
unsigned src2 : 11;
341
unsigned unknown : 1;
342
unsigned outmod : 2;
343
bool output_full : 1;
344
unsigned output_component : 3;
345
}
346
midgard_scalar_alu;
347
348
typedef struct
349
__attribute__((__packed__))
350
{
351
unsigned src1_reg : 5;
352
unsigned src2_reg : 5;
353
unsigned out_reg : 5;
354
bool src2_imm : 1;
355
}
356
midgard_reg_info;
357
358
/* In addition to conditional branches and jumps (unconditional branches),
359
* Midgard implements a bit of fixed function functionality used in fragment
360
* shaders via specially crafted branches. These have special branch opcodes,
361
* which perform a fixed-function operation and/or use the results of a
362
* fixed-function operation as the branch condition. */
363
364
typedef enum {
365
/* Regular branches */
366
midgard_jmp_writeout_op_branch_uncond = 1,
367
midgard_jmp_writeout_op_branch_cond = 2,
368
369
/* In a fragment shader, execute a discard_if instruction, with the
370
* corresponding condition code. Terminates the shader, so generally
371
* set the branch target to out of the shader */
372
midgard_jmp_writeout_op_discard = 4,
373
374
/* Branch if the tilebuffer is not yet ready. At the beginning of a
375
* fragment shader that reads from the tile buffer, for instance via
376
* ARM_shader_framebuffer_fetch or EXT_pixel_local_storage, this branch
377
* operation should be used as a loop. An instruction like
378
* "br.tilebuffer.always -1" does the trick, corresponding to
379
* "while(!is_tilebuffer_ready) */
380
midgard_jmp_writeout_op_tilebuffer_pending = 6,
381
382
/* In a fragment shader, try to write out the value pushed to r0 to the
383
* tilebuffer, subject to unknown state in r1.z and r1.w. If this
384
* succeeds, the shader terminates. If it fails, it branches to the
385
* specified branch target. Generally, this should be used in a loop to
386
* itself, acting as "do { write(r0); } while(!write_successful);" */
387
midgard_jmp_writeout_op_writeout = 7,
388
} midgard_jmp_writeout_op;
389
390
typedef enum {
391
midgard_condition_write0 = 0,
392
393
/* These condition codes denote a conditional branch on FALSE and on
394
* TRUE respectively */
395
midgard_condition_false = 1,
396
midgard_condition_true = 2,
397
398
/* This condition code always branches. For a pure branch, the
399
* unconditional branch coding should be used instead, but for
400
* fixed-function branch opcodes, this is still useful */
401
midgard_condition_always = 3,
402
} midgard_condition;
403
404
typedef struct
405
__attribute__((__packed__))
406
{
407
midgard_jmp_writeout_op op : 3; /* == branch_uncond */
408
unsigned dest_tag : 4; /* tag of branch destination */
409
unsigned unknown : 2;
410
int offset : 7;
411
}
412
midgard_branch_uncond;
413
414
typedef struct
415
__attribute__((__packed__))
416
{
417
midgard_jmp_writeout_op op : 3; /* == branch_cond */
418
unsigned dest_tag : 4; /* tag of branch destination */
419
int offset : 7;
420
midgard_condition cond : 2;
421
}
422
midgard_branch_cond;
423
424
typedef struct
425
__attribute__((__packed__))
426
{
427
midgard_jmp_writeout_op op : 3; /* == branch_cond */
428
unsigned dest_tag : 4; /* tag of branch destination */
429
unsigned unknown : 2;
430
signed offset : 23;
431
432
/* Extended branches permit inputting up to 4 conditions loaded into
433
* r31 (two in r31.w and two in r31.x). In the most general case, we
434
* specify a function f(A, B, C, D) mapping 4 1-bit conditions to a
435
* single 1-bit branch criteria. Note that the domain of f has 2^(2^4)
436
* elements, each mapping to 1-bit of output, so we can trivially
437
* construct a Godel numbering of f as a (2^4)=16-bit integer. This
438
* 16-bit integer serves as a lookup table to compute f, subject to
439
* some swaps for ordering.
440
*
441
* Interesting, the standard 2-bit condition codes are also a LUT with
442
* the same format (2^1-bit), but it's usually easier to use enums. */
443
444
unsigned cond : 16;
445
}
446
midgard_branch_extended;
447
448
typedef struct
449
__attribute__((__packed__))
450
{
451
midgard_jmp_writeout_op op : 3; /* == writeout */
452
unsigned unknown : 13;
453
}
454
midgard_writeout;
455
456
/*
457
* Load/store words
458
*/
459
460
typedef enum {
461
midgard_op_ld_st_noop = 0x03,
462
463
/* Unpacks a colour from a native format to <format> */
464
midgard_op_unpack_colour_f32 = 0x04,
465
midgard_op_unpack_colour_f16 = 0x05,
466
midgard_op_unpack_colour_u32 = 0x06,
467
midgard_op_unpack_colour_s32 = 0x07,
468
469
/* Packs a colour from <format> to a native format */
470
midgard_op_pack_colour_f32 = 0x08,
471
midgard_op_pack_colour_f16 = 0x09,
472
midgard_op_pack_colour_u32 = 0x0A,
473
midgard_op_pack_colour_s32 = 0x0B,
474
475
/* Computes the effective address of a mem address expression */
476
midgard_op_lea = 0x0C,
477
478
/* Converts image coordinates into mem address */
479
midgard_op_lea_image = 0x0D,
480
481
/* Unclear why this is on the L/S unit, but moves fp32 cube map
482
* coordinates in r27 to its cube map texture coordinate destination
483
* (e.g r29). */
484
485
midgard_op_ld_cubemap_coords = 0x0E,
486
487
/* A mov between registers that the ldst pipeline can access */
488
midgard_op_ldst_mov = 0x10,
489
490
/* The L/S unit can do perspective division a clock faster than the ALU
491
* if you're lucky. Put the vec4 in r27, and call with 0x24 as the
492
* unknown state; the output will be <x/w, y/w, z/w, 1>. Replace w with
493
* z for the z version */
494
midgard_op_ldst_perspective_div_y = 0x11,
495
midgard_op_ldst_perspective_div_z = 0x12,
496
midgard_op_ldst_perspective_div_w = 0x13,
497
498
/* val in r27.y, address embedded, outputs result to argument. Invert val for sub. Let val = +-1 for inc/dec. */
499
midgard_op_atomic_add = 0x40,
500
midgard_op_atomic_add64 = 0x41,
501
midgard_op_atomic_add_be = 0x42,
502
midgard_op_atomic_add64_be = 0x43,
503
504
midgard_op_atomic_and = 0x44,
505
midgard_op_atomic_and64 = 0x45,
506
midgard_op_atomic_and_be = 0x46,
507
midgard_op_atomic_and64_be = 0x47,
508
midgard_op_atomic_or = 0x48,
509
midgard_op_atomic_or64 = 0x49,
510
midgard_op_atomic_or_be = 0x4A,
511
midgard_op_atomic_or64_be = 0x4B,
512
midgard_op_atomic_xor = 0x4C,
513
midgard_op_atomic_xor64 = 0x4D,
514
midgard_op_atomic_xor_be = 0x4E,
515
midgard_op_atomic_xor64_be = 0x4F,
516
517
midgard_op_atomic_imin = 0x50,
518
midgard_op_atomic_imin64 = 0x51,
519
midgard_op_atomic_imin_be = 0x52,
520
midgard_op_atomic_imin64_be = 0x53,
521
midgard_op_atomic_umin = 0x54,
522
midgard_op_atomic_umin64 = 0x55,
523
midgard_op_atomic_umin_be = 0x56,
524
midgard_op_atomic_umin64_be = 0x57,
525
midgard_op_atomic_imax = 0x58,
526
midgard_op_atomic_imax64 = 0x59,
527
midgard_op_atomic_imax_be = 0x5A,
528
midgard_op_atomic_imax64_be = 0x5B,
529
midgard_op_atomic_umax = 0x5C,
530
midgard_op_atomic_umax64 = 0x5D,
531
midgard_op_atomic_umax_be = 0x5E,
532
midgard_op_atomic_umax64_be = 0x5F,
533
534
midgard_op_atomic_xchg = 0x60,
535
midgard_op_atomic_xchg64 = 0x61,
536
midgard_op_atomic_xchg_be = 0x62,
537
midgard_op_atomic_xchg64_be = 0x63,
538
539
midgard_op_atomic_cmpxchg = 0x64,
540
midgard_op_atomic_cmpxchg64 = 0x65,
541
midgard_op_atomic_cmpxchg_be = 0x66,
542
midgard_op_atomic_cmpxchg64_be = 0x67,
543
544
/* Used for compute shader's __global arguments, __local
545
* variables (or for register spilling) */
546
547
midgard_op_ld_u8 = 0x80, /* zero extends */
548
midgard_op_ld_i8 = 0x81, /* sign extends */
549
midgard_op_ld_u16 = 0x84, /* zero extends */
550
midgard_op_ld_i16 = 0x85, /* sign extends */
551
midgard_op_ld_u16_be = 0x86, /* zero extends, big endian */
552
midgard_op_ld_i16_be = 0x87, /* sign extends, big endian */
553
midgard_op_ld_32 = 0x88, /* short2, int, float */
554
midgard_op_ld_32_bswap2 = 0x89, /* 16-bit big endian vector */
555
midgard_op_ld_32_bswap4 = 0x8A, /* 32-bit big endian scalar */
556
midgard_op_ld_64 = 0x8C, /* int2, float2, long */
557
midgard_op_ld_64_bswap2 = 0x8D, /* 16-bit big endian vector */
558
midgard_op_ld_64_bswap4 = 0x8E, /* 32-bit big endian vector */
559
midgard_op_ld_64_bswap8 = 0x8F, /* 64-bit big endian scalar */
560
midgard_op_ld_128 = 0x90, /* float4, long2 */
561
midgard_op_ld_128_bswap2 = 0x91, /* 16-bit big endian vector */
562
midgard_op_ld_128_bswap4 = 0x92, /* 32-bit big endian vector */
563
midgard_op_ld_128_bswap8 = 0x93, /* 64-bit big endian vector */
564
565
midgard_op_ld_attr_32 = 0x94,
566
midgard_op_ld_attr_16 = 0x95,
567
midgard_op_ld_attr_32u = 0x96,
568
midgard_op_ld_attr_32i = 0x97,
569
midgard_op_ld_vary_32 = 0x98,
570
midgard_op_ld_vary_16 = 0x99,
571
midgard_op_ld_vary_32u = 0x9A,
572
midgard_op_ld_vary_32i = 0x9B,
573
574
/* This instruction behaves differently depending if the gpu is a v4
575
* or a newer gpu. The main difference hinges on which values of the
576
* second argument are valid for each gpu.
577
* TODO: properly document and decode each possible value for the
578
* second argument. */
579
midgard_op_ld_special_32f = 0x9C,
580
midgard_op_ld_special_16f = 0x9D,
581
midgard_op_ld_special_32u = 0x9E,
582
midgard_op_ld_special_32i = 0x9F,
583
584
/* The distinction between these ops is the alignment
585
* requirement / accompanying shift. Thus, the offset to
586
* ld_ubo_128 is in 16-byte units and can load 128-bit. The
587
* offset to ld_ubo_64 is in 8-byte units; ld_ubo_32 in 4-byte
588
* units. */
589
midgard_op_ld_ubo_u8 = 0xA0, /* theoretical */
590
midgard_op_ld_ubo_i8 = 0xA1, /* theoretical */
591
midgard_op_ld_ubo_u16 = 0xA4, /* theoretical */
592
midgard_op_ld_ubo_i16 = 0xA5, /* theoretical */
593
midgard_op_ld_ubo_u16_be = 0xA6, /* theoretical */
594
midgard_op_ld_ubo_i16_be = 0xA7, /* theoretical */
595
midgard_op_ld_ubo_32 = 0xA8,
596
midgard_op_ld_ubo_32_bswap2 = 0xA9,
597
midgard_op_ld_ubo_32_bswap4 = 0xAA,
598
midgard_op_ld_ubo_64 = 0xAC,
599
midgard_op_ld_ubo_64_bswap2 = 0xAD,
600
midgard_op_ld_ubo_64_bswap4 = 0xAE,
601
midgard_op_ld_ubo_64_bswap8 = 0xAF,
602
midgard_op_ld_ubo_128 = 0xB0,
603
midgard_op_ld_ubo_128_bswap2 = 0xB1,
604
midgard_op_ld_ubo_128_bswap4 = 0xB2,
605
midgard_op_ld_ubo_128_bswap8 = 0xB3,
606
607
midgard_op_ld_image_32f = 0xB4,
608
midgard_op_ld_image_16f = 0xB5,
609
midgard_op_ld_image_32u = 0xB6,
610
midgard_op_ld_image_32i = 0xB7,
611
612
/* Only works on v5 or newer.
613
* Older cards must use ld_special with tilebuffer selectors. */
614
midgard_op_ld_tilebuffer_32f = 0xB8,
615
midgard_op_ld_tilebuffer_16f = 0xB9,
616
midgard_op_ld_tilebuffer_raw = 0xBA,
617
618
midgard_op_st_u8 = 0xC0, /* zero extends */
619
midgard_op_st_i8 = 0xC1, /* sign extends */
620
midgard_op_st_u16 = 0xC4, /* zero extends */
621
midgard_op_st_i16 = 0xC5, /* sign extends */
622
midgard_op_st_u16_be = 0xC6, /* zero extends, big endian */
623
midgard_op_st_i16_be = 0xC7, /* sign extends, big endian */
624
midgard_op_st_32 = 0xC8, /* short2, int, float */
625
midgard_op_st_32_bswap2 = 0xC9, /* 16-bit big endian vector */
626
midgard_op_st_32_bswap4 = 0xCA, /* 32-bit big endian scalar */
627
midgard_op_st_64 = 0xCC, /* int2, float2, long */
628
midgard_op_st_64_bswap2 = 0xCD, /* 16-bit big endian vector */
629
midgard_op_st_64_bswap4 = 0xCE, /* 32-bit big endian vector */
630
midgard_op_st_64_bswap8 = 0xCF, /* 64-bit big endian scalar */
631
midgard_op_st_128 = 0xD0, /* float4, long2 */
632
midgard_op_st_128_bswap2 = 0xD1, /* 16-bit big endian vector */
633
midgard_op_st_128_bswap4 = 0xD2, /* 32-bit big endian vector */
634
midgard_op_st_128_bswap8 = 0xD3, /* 64-bit big endian vector */
635
636
midgard_op_st_vary_32 = 0xD4,
637
midgard_op_st_vary_16 = 0xD5,
638
midgard_op_st_vary_32u = 0xD6,
639
midgard_op_st_vary_32i = 0xD7,
640
641
/* Value to st in r27, location r26.w as short2 */
642
midgard_op_st_image_32f = 0xD8,
643
midgard_op_st_image_16f = 0xD9,
644
midgard_op_st_image_32u = 0xDA,
645
midgard_op_st_image_32i = 0xDB,
646
647
midgard_op_st_special_32f = 0xDC,
648
midgard_op_st_special_16f = 0xDD,
649
midgard_op_st_special_32u = 0xDE,
650
midgard_op_st_special_32i = 0xDF,
651
652
/* Only works on v5 or newer.
653
* Older cards must use ld_special with tilebuffer selectors. */
654
midgard_op_st_tilebuffer_32f = 0xE8,
655
midgard_op_st_tilebuffer_16f = 0xE9,
656
midgard_op_st_tilebuffer_raw = 0xEA,
657
midgard_op_trap = 0xFC,
658
} midgard_load_store_op;
659
660
typedef enum {
661
midgard_interp_sample = 0,
662
midgard_interp_centroid = 1,
663
midgard_interp_default = 2
664
} midgard_interpolation;
665
666
typedef enum {
667
midgard_varying_mod_none = 0,
668
669
/* Take the would-be result and divide all components by its y/z/w
670
* (perspective division baked in with the load) */
671
midgard_varying_mod_perspective_y = 1,
672
midgard_varying_mod_perspective_z = 2,
673
midgard_varying_mod_perspective_w = 3,
674
675
/* The result is a 64-bit cubemap descriptor to use with
676
* midgard_tex_op_normal or midgard_tex_op_gradient */
677
midgard_varying_mod_cubemap = 4,
678
} midgard_varying_modifier;
679
680
typedef struct
681
__attribute__((__packed__))
682
{
683
midgard_varying_modifier modifier : 3;
684
685
bool flat_shading : 1;
686
687
/* These are ignored if flat_shading is enabled. */
688
bool perspective_correction : 1;
689
bool centroid_mapping : 1;
690
691
/* This is ignored if the shader only runs once per pixel. */
692
bool interpolate_sample : 1;
693
694
bool zero0 : 1; /* Always zero */
695
696
unsigned direct_sample_pos_x : 4;
697
unsigned direct_sample_pos_y : 4;
698
}
699
midgard_varying_params;
700
701
/* 8-bit register/etc selector for load/store ops */
702
typedef struct
703
__attribute__((__packed__))
704
{
705
/* Indexes into the register */
706
unsigned component : 2;
707
708
/* Register select between r26/r27 */
709
unsigned select : 1;
710
711
unsigned unknown : 2;
712
713
/* Like any good Arm instruction set, load/store arguments can be
714
* implicitly left-shifted... but only the second argument. Zero for no
715
* shifting, up to <<7 possible though. This is useful for indexing.
716
*
717
* For the first argument, it's unknown what these bits mean */
718
unsigned shift : 3;
719
}
720
midgard_ldst_register_select;
721
722
typedef enum {
723
/* 0 is reserved */
724
midgard_index_address_u64 = 1,
725
midgard_index_address_u32 = 2,
726
midgard_index_address_s32 = 3,
727
} midgard_index_address_format;
728
729
typedef struct
730
__attribute__((__packed__))
731
{
732
midgard_load_store_op op : 8;
733
734
/* Source/dest reg */
735
unsigned reg : 5;
736
737
/* Generally is a writemask.
738
* For ST_ATTR and ST_TEX, unused.
739
* For other stores, each bit masks 1/4th of the output. */
740
unsigned mask : 4;
741
742
/* Swizzle for stores, but for atomics it encodes also the source
743
* register. This fits because atomics dont need a swizzle since they
744
* are not vectorized instructions. */
745
unsigned swizzle : 8;
746
747
/* Arg reg, meaning changes according to each opcode */
748
unsigned arg_comp : 2;
749
unsigned arg_reg : 3;
750
751
/* 64-bit address enable
752
* 32-bit data type enable for CUBEMAP and perspective div.
753
* Explicit indexing enable for LD_ATTR.
754
* 64-bit coordinate enable for LD_IMAGE. */
755
bool bitsize_toggle : 1;
756
757
/* These are mainly used for opcodes that have addresses.
758
* For cmpxchg, index_reg is used for the comparison value.
759
* For ops that access the attrib table, bit 1 encodes which table.
760
* For LD_VAR and LD/ST_ATTR, bit 0 enables dest/src type inferral. */
761
midgard_index_address_format index_format : 2;
762
unsigned index_comp : 2;
763
unsigned index_reg : 3;
764
unsigned index_shift : 4;
765
766
/* Generaly is a signed offset, but has different bitsize and starts at
767
* different bits depending on the opcode, LDST_*_DISPLACEMENT helpers
768
* are recommended when packing/unpacking this attribute.
769
* For LD_UBO, bit 0 enables ubo index immediate.
770
* For LD_TILEBUFFER_RAW, bit 0 disables sample index immediate. */
771
int signed_offset : 18;
772
}
773
midgard_load_store_word;
774
775
typedef struct
776
__attribute__((__packed__))
777
{
778
unsigned type : 4;
779
unsigned next_type : 4;
780
uint64_t word1 : 60;
781
uint64_t word2 : 60;
782
}
783
midgard_load_store;
784
785
/* 8-bit register selector used in texture ops to select a bias/LOD/gradient
786
* register, shoved into the `bias` field */
787
788
typedef struct
789
__attribute__((__packed__))
790
{
791
/* 32-bit register, clear for half-register */
792
unsigned full : 1;
793
794
/* Register select between r28/r29 */
795
unsigned select : 1;
796
797
/* For a half-register, selects the upper half */
798
unsigned upper : 1;
799
800
/* Indexes into the register */
801
unsigned component : 2;
802
803
/* Padding to make this 8-bit */
804
unsigned zero : 3;
805
}
806
midgard_tex_register_select;
807
808
/* Texture pipeline results are in r28-r29 */
809
#define REG_TEX_BASE 28
810
811
enum mali_texture_op {
812
/* [texture + LOD bias]
813
* If the texture is mipmapped, barriers must be enabled in the
814
* instruction word in order for this opcode to compute the output
815
* correctly. */
816
midgard_tex_op_normal = 1,
817
818
/* [texture + gradient for LOD and anisotropy]
819
* Unlike midgard_tex_op_normal, this opcode does not require barriers
820
* to compute the output correctly. */
821
midgard_tex_op_gradient = 2,
822
823
/* [unfiltered texturing]
824
* Unlike midgard_tex_op_normal, this opcode does not require barriers
825
* to compute the output correctly. */
826
midgard_tex_op_fetch = 4,
827
828
/* [gradient from derivative] */
829
midgard_tex_op_grad_from_derivative = 9,
830
831
/* [mov] */
832
midgard_tex_op_mov = 10,
833
834
/* [noop]
835
* Mostly used for barriers. */
836
midgard_tex_op_barrier = 11,
837
838
/* [gradient from coords] */
839
midgard_tex_op_grad_from_coords = 12,
840
841
/* [derivative]
842
* Computes derivatives in 2x2 fragment blocks. */
843
midgard_tex_op_derivative = 13
844
};
845
846
enum mali_sampler_type {
847
/* 0 is reserved */
848
MALI_SAMPLER_FLOAT = 0x1, /* sampler */
849
MALI_SAMPLER_UNSIGNED = 0x2, /* usampler */
850
MALI_SAMPLER_SIGNED = 0x3, /* isampler */
851
};
852
853
/* Texture modes */
854
enum mali_texture_mode {
855
TEXTURE_NORMAL = 1,
856
TEXTURE_SHADOW = 5,
857
TEXTURE_GATHER_SHADOW = 6,
858
TEXTURE_GATHER_X = 8,
859
TEXTURE_GATHER_Y = 9,
860
TEXTURE_GATHER_Z = 10,
861
TEXTURE_GATHER_W = 11,
862
};
863
864
enum mali_derivative_mode {
865
TEXTURE_DFDX = 0,
866
TEXTURE_DFDY = 1,
867
};
868
869
typedef struct
870
__attribute__((__packed__))
871
{
872
unsigned type : 4;
873
unsigned next_type : 4;
874
875
enum mali_texture_op op : 4;
876
unsigned mode : 4;
877
878
/* A little obscure, but last is set for the last texture operation in
879
* a shader. cont appears to just be last's opposite (?). Yeah, I know,
880
* kind of funky.. BiOpen thinks it could do with memory hinting, or
881
* tile locking? */
882
883
unsigned cont : 1;
884
unsigned last : 1;
885
886
unsigned format : 2;
887
888
/* Are sampler_handle/texture_handler respectively set by registers? If
889
* true, the lower 8-bits of the respective field is a register word.
890
* If false, they are an immediate */
891
892
unsigned sampler_register : 1;
893
unsigned texture_register : 1;
894
895
/* Is a register used to specify the
896
* LOD/bias/offset? If set, use the `bias` field as
897
* a register index. If clear, use the `bias` field
898
* as an immediate. */
899
unsigned lod_register : 1;
900
901
/* Is a register used to specify an offset? If set, use the
902
* offset_reg_* fields to encode this, duplicated for each of the
903
* components. If clear, there is implcitly always an immediate offst
904
* specificed in offset_imm_* */
905
unsigned offset_register : 1;
906
907
unsigned in_reg_full : 1;
908
unsigned in_reg_select : 1;
909
unsigned in_reg_upper : 1;
910
unsigned in_reg_swizzle : 8;
911
912
unsigned unknown8 : 2;
913
914
unsigned out_full : 1;
915
916
enum mali_sampler_type sampler_type : 2;
917
918
unsigned out_reg_select : 1;
919
unsigned out_upper : 1;
920
921
unsigned mask : 4;
922
923
/* Intriguingly, textures can take an outmod just like alu ops. Int
924
* outmods are not supported as far as I can tell, so this is only
925
* meaningful for float samplers */
926
midgard_outmod_float outmod : 2;
927
928
unsigned swizzle : 8;
929
930
/* These indicate how many bundles after this texture op may be
931
* executed in parallel with this op. We may execute only ALU and
932
* ld/st in parallel (not other textures), and obviously there cannot
933
* be any dependency (the blob appears to forbid even accessing other
934
* channels of a given texture register). */
935
936
unsigned out_of_order : 2;
937
unsigned unknown4 : 10;
938
939
/* In immediate mode, each offset field is an immediate range [0, 7].
940
*
941
* In register mode, offset_x becomes a register (full, select, upper)
942
* triplet followed by a vec3 swizzle is splattered across
943
* offset_y/offset_z in a genuinely bizarre way.
944
*
945
* For texel fetches in immediate mode, the range is the full [-8, 7],
946
* but for normal texturing the top bit must be zero and a register
947
* used instead. It's not clear where this limitation is from.
948
*
949
* union {
950
* struct {
951
* signed offset_x : 4;
952
* signed offset_y : 4;
953
* signed offset_z : 4;
954
* } immediate;
955
* struct {
956
* bool full : 1;
957
* bool select : 1;
958
* bool upper : 1;
959
* unsigned swizzle : 8;
960
* unsigned zero : 1;
961
* } register;
962
* }
963
*/
964
965
unsigned offset : 12;
966
967
/* In immediate bias mode, for a normal texture op, this is
968
* texture bias, computed as int(2^8 * frac(biasf)), with
969
* bias_int = floor(bias). For a textureLod, it's that, but
970
* s/bias/lod. For a texel fetch, this is the LOD as-is.
971
*
972
* In register mode, this is a midgard_tex_register_select
973
* structure and bias_int is zero */
974
975
unsigned bias : 8;
976
signed bias_int : 8;
977
978
/* If sampler/texture_register is set, the bottom 8-bits are
979
* midgard_tex_register_select and the top 8-bits are zero. If they are
980
* clear, they are immediate texture indices */
981
982
unsigned sampler_handle : 16;
983
unsigned texture_handle : 16;
984
}
985
midgard_texture_word;
986
987
/* Technically barriers are texture instructions but it's less work to add them
988
* as an explicitly zeroed special case, since most fields are forced to go to
989
* zero */
990
991
typedef struct
992
__attribute__((__packed__))
993
{
994
unsigned type : 4;
995
unsigned next_type : 4;
996
997
/* op = TEXTURE_OP_BARRIER */
998
unsigned op : 6;
999
unsigned zero1 : 2;
1000
1001
/* Since helper invocations don't make any sense, these are forced to one */
1002
unsigned cont : 1;
1003
unsigned last : 1;
1004
unsigned zero2 : 14;
1005
1006
unsigned zero3 : 24;
1007
unsigned out_of_order : 4;
1008
unsigned zero4 : 4;
1009
1010
uint64_t zero5;
1011
} midgard_texture_barrier_word;
1012
1013
typedef union midgard_constants {
1014
double f64[2];
1015
uint64_t u64[2];
1016
int64_t i64[2];
1017
float f32[4];
1018
uint32_t u32[4];
1019
int32_t i32[4];
1020
uint16_t f16[8];
1021
uint16_t u16[8];
1022
int16_t i16[8];
1023
uint8_t u8[16];
1024
int8_t i8[16];
1025
}
1026
midgard_constants;
1027
1028
enum midgard_roundmode {
1029
MIDGARD_RTE = 0x0, /* round to even */
1030
MIDGARD_RTZ = 0x1, /* round to zero */
1031
MIDGARD_RTN = 0x2, /* round to negative */
1032
MIDGARD_RTP = 0x3, /* round to positive */
1033
};
1034
1035
#endif
1036
1037