Path: blob/21.2-virgl/src/panfrost/midgard/midgard.h
4564 views
/* Author(s):1* Connor Abbott2* Alyssa Rosenzweig3*4* Copyright (c) 2013 Connor Abbott ([email protected])5* Copyright (c) 2018 Alyssa Rosenzweig ([email protected])6* Copyright (C) 2019-2020 Collabora, Ltd.7*8* Permission is hereby granted, free of charge, to any person obtaining a copy9* of this software and associated documentation files (the "Software"), to deal10* in the Software without restriction, including without limitation the rights11* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell12* copies of the Software, and to permit persons to whom the Software is13* furnished to do so, subject to the following conditions:14*15* The above copyright notice and this permission notice shall be included in16* all copies or substantial portions of the Software.17*18* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR19* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,20* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE21* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER22* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,23* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN24* THE SOFTWARE.25*/2627#ifndef __midgard_h__28#define __midgard_h__2930#include <stdint.h>31#include <stdbool.h>3233#define MIDGARD_DBG_MSGS 0x000134#define MIDGARD_DBG_SHADERS 0x000235#define MIDGARD_DBG_SHADERDB 0x000436#define MIDGARD_DBG_INORDER 0x000837#define MIDGARD_DBG_VERBOSE 0x001038#define MIDGARD_DBG_INTERNAL 0x00203940extern int midgard_debug;4142typedef enum {43midgard_word_type_alu,44midgard_word_type_load_store,45midgard_word_type_texture,46midgard_word_type_unknown47} midgard_word_type;4849typedef enum {50midgard_alu_vmul,51midgard_alu_sadd,52midgard_alu_smul,53midgard_alu_vadd,54midgard_alu_lut55} midgard_alu;5657enum {58TAG_INVALID = 0x0,59TAG_BREAK = 0x1,60TAG_TEXTURE_4_VTX = 0x2,61TAG_TEXTURE_4 = 0x3,62TAG_TEXTURE_4_BARRIER = 0x4,63TAG_LOAD_STORE_4 = 0x5,64TAG_UNKNOWN_1 = 0x6,65TAG_UNKNOWN_2 = 0x7,66TAG_ALU_4 = 0x8,67TAG_ALU_8 = 0x9,68TAG_ALU_12 = 0xA,69TAG_ALU_16 = 0xB,70TAG_ALU_4_WRITEOUT = 0xC,71TAG_ALU_8_WRITEOUT = 0xD,72TAG_ALU_12_WRITEOUT = 0xE,73TAG_ALU_16_WRITEOUT = 0xF74};7576/*77* ALU words78*/7980typedef enum {81midgard_alu_op_fadd = 0x10, /* round to even */82midgard_alu_op_fadd_rtz = 0x11,83midgard_alu_op_fadd_rtn = 0x12,84midgard_alu_op_fadd_rtp = 0x13,85midgard_alu_op_fmul = 0x14, /* round to even */86midgard_alu_op_fmul_rtz = 0x15,87midgard_alu_op_fmul_rtn = 0x16,88midgard_alu_op_fmul_rtp = 0x17,8990midgard_alu_op_fmin = 0x28, /* if an operand is NaN, propagate the other */91midgard_alu_op_fmin_nan = 0x29, /* if an operand is NaN, propagate it */92midgard_alu_op_fabsmin = 0x2A, /* min(abs(a,b)) */93midgard_alu_op_fabsmin_nan = 0x2B, /* min_nan(abs(a,b)) */94midgard_alu_op_fmax = 0x2C, /* if an operand is NaN, propagate the other */95midgard_alu_op_fmax_nan = 0x2D, /* if an operand is NaN, propagate it */96midgard_alu_op_fabsmax = 0x2E, /* max(abs(a,b)) */97midgard_alu_op_fabsmax_nan = 0x2F, /* max_nan(abs(a,b)) */9899midgard_alu_op_fmov = 0x30, /* fmov_rte */100midgard_alu_op_fmov_rtz = 0x31,101midgard_alu_op_fmov_rtn = 0x32,102midgard_alu_op_fmov_rtp = 0x33,103midgard_alu_op_froundeven = 0x34,104midgard_alu_op_ftrunc = 0x35,105midgard_alu_op_ffloor = 0x36,106midgard_alu_op_fceil = 0x37,107midgard_alu_op_ffma = 0x38, /* rte */108midgard_alu_op_ffma_rtz = 0x39,109midgard_alu_op_ffma_rtn = 0x3A,110midgard_alu_op_ffma_rtp = 0x3B,111midgard_alu_op_fdot3 = 0x3C,112midgard_alu_op_fdot3r = 0x3D,113midgard_alu_op_fdot4 = 0x3E,114midgard_alu_op_freduce = 0x3F,115116midgard_alu_op_iadd = 0x40,117midgard_alu_op_ishladd = 0x41, /* (a<<1) + b */118midgard_alu_op_isub = 0x46,119midgard_alu_op_ishlsub = 0x47, /* (a<<1) - b */120midgard_alu_op_iaddsat = 0x48,121midgard_alu_op_uaddsat = 0x49,122midgard_alu_op_isubsat = 0x4E,123midgard_alu_op_usubsat = 0x4F,124125midgard_alu_op_imul = 0x58,126/* Multiplies two ints and stores the result in the next larger datasize. */127midgard_alu_op_iwmul = 0x59, /* sint * sint = sint */128midgard_alu_op_uwmul = 0x5A, /* uint * uint = uint */129midgard_alu_op_iuwmul = 0x5B, /* sint * uint = sint */130131midgard_alu_op_imin = 0x60,132midgard_alu_op_umin = 0x61,133midgard_alu_op_imax = 0x62,134midgard_alu_op_umax = 0x63,135midgard_alu_op_iavg = 0x64,136midgard_alu_op_uavg = 0x65,137midgard_alu_op_iravg = 0x66,138midgard_alu_op_uravg = 0x67,139midgard_alu_op_iasr = 0x68,140midgard_alu_op_ilsr = 0x69,141midgard_alu_op_ishlsat = 0x6C,142midgard_alu_op_ushlsat = 0x6D,143midgard_alu_op_ishl = 0x6E,144145midgard_alu_op_iand = 0x70,146midgard_alu_op_ior = 0x71,147midgard_alu_op_inand = 0x72, /* ~(a & b), for inot let a = b */148midgard_alu_op_inor = 0x73, /* ~(a | b) */149midgard_alu_op_iandnot = 0x74, /* (a & ~b), used for not/b2f */150midgard_alu_op_iornot = 0x75, /* (a | ~b) */151midgard_alu_op_ixor = 0x76,152midgard_alu_op_inxor = 0x77, /* ~(a ^ b) */153midgard_alu_op_iclz = 0x78, /* Number of zeroes on left */154midgard_alu_op_ipopcnt = 0x7A, /* Population count */155midgard_alu_op_imov = 0x7B,156midgard_alu_op_iabsdiff = 0x7C,157midgard_alu_op_uabsdiff = 0x7D,158midgard_alu_op_ichoose = 0x7E, /* vector, component number - dupe for shuffle() */159160midgard_alu_op_feq = 0x80,161midgard_alu_op_fne = 0x81,162midgard_alu_op_flt = 0x82,163midgard_alu_op_fle = 0x83,164midgard_alu_op_fball_eq = 0x88,165midgard_alu_op_fball_neq = 0x89,166midgard_alu_op_fball_lt = 0x8A, /* all(lessThan(.., ..)) */167midgard_alu_op_fball_lte = 0x8B, /* all(lessThanEqual(.., ..)) */168169midgard_alu_op_fbany_eq = 0x90,170midgard_alu_op_fbany_neq = 0x91,171midgard_alu_op_fbany_lt = 0x92, /* any(lessThan(.., ..)) */172midgard_alu_op_fbany_lte = 0x93, /* any(lessThanEqual(.., ..)) */173174midgard_alu_op_f2i_rte = 0x98,175midgard_alu_op_f2i_rtz = 0x99,176midgard_alu_op_f2i_rtn = 0x9A,177midgard_alu_op_f2i_rtp = 0x9B,178midgard_alu_op_f2u_rte = 0x9C,179midgard_alu_op_f2u_rtz = 0x9D,180midgard_alu_op_f2u_rtn = 0x9E,181midgard_alu_op_f2u_rtp = 0x9F,182183midgard_alu_op_ieq = 0xA0,184midgard_alu_op_ine = 0xA1,185midgard_alu_op_ult = 0xA2,186midgard_alu_op_ule = 0xA3,187midgard_alu_op_ilt = 0xA4,188midgard_alu_op_ile = 0xA5,189midgard_alu_op_iball_eq = 0xA8,190midgard_alu_op_iball_neq = 0xA9,191midgard_alu_op_uball_lt = 0xAA,192midgard_alu_op_uball_lte = 0xAB,193midgard_alu_op_iball_lt = 0xAC,194midgard_alu_op_iball_lte = 0xAD,195196midgard_alu_op_ibany_eq = 0xB0,197midgard_alu_op_ibany_neq = 0xB1,198midgard_alu_op_ubany_lt = 0xB2,199midgard_alu_op_ubany_lte = 0xB3,200midgard_alu_op_ibany_lt = 0xB4, /* any(lessThan(.., ..)) */201midgard_alu_op_ibany_lte = 0xB5, /* any(lessThanEqual(.., ..)) */202midgard_alu_op_i2f_rte = 0xB8,203midgard_alu_op_i2f_rtz = 0xB9,204midgard_alu_op_i2f_rtn = 0xBA,205midgard_alu_op_i2f_rtp = 0xBB,206midgard_alu_op_u2f_rte = 0xBC,207midgard_alu_op_u2f_rtz = 0xBD,208midgard_alu_op_u2f_rtn = 0xBE,209midgard_alu_op_u2f_rtp = 0xBF,210211/* All csel* instructions use as a condition the output of the previous212* vector or scalar unit, thus it must run on the second pipeline stage213* and be scheduled to the same bundle as the opcode that it uses as a214* condition. */215midgard_alu_op_icsel_v = 0xC0,216midgard_alu_op_icsel = 0xC1,217midgard_alu_op_fcsel_v = 0xC4,218midgard_alu_op_fcsel = 0xC5,219midgard_alu_op_froundaway = 0xC6, /* round to nearest away */220221midgard_alu_op_fatan2_pt2 = 0xE8,222midgard_alu_op_fpow_pt1 = 0xEC,223midgard_alu_op_fpown_pt1 = 0xED,224midgard_alu_op_fpowr_pt1 = 0xEE,225226midgard_alu_op_frcp = 0xF0,227midgard_alu_op_frsqrt = 0xF2,228midgard_alu_op_fsqrt = 0xF3,229midgard_alu_op_fexp2 = 0xF4,230midgard_alu_op_flog2 = 0xF5,231midgard_alu_op_fsinpi = 0xF6, /* sin(pi * x) */232midgard_alu_op_fcospi = 0xF7, /* cos(pi * x) */233midgard_alu_op_fatan2_pt1 = 0xF9,234} midgard_alu_op;235236typedef enum {237midgard_outmod_none = 0,238midgard_outmod_clamp_0_inf = 1, /* max(x, 0.0), NaNs become +0.0 */239midgard_outmod_clamp_m1_1 = 2, /* clamp(x, -1.0, 1.0), NaNs become -1.0 */240midgard_outmod_clamp_0_1 = 3 /* clamp(x, 0.0, 1.0), NaNs become +0.0 */241} midgard_outmod_float;242243/* These are applied to the resulting value that's going to be stored in the dest reg.244* This should be set to midgard_outmod_keeplo when shrink_mode is midgard_shrink_mode_none. */245typedef enum {246midgard_outmod_ssat = 0,247midgard_outmod_usat = 1,248midgard_outmod_keeplo = 2, /* Keep low half */249midgard_outmod_keephi = 3, /* Keep high half */250} midgard_outmod_int;251252typedef enum {253midgard_reg_mode_8 = 0,254midgard_reg_mode_16 = 1,255midgard_reg_mode_32 = 2,256midgard_reg_mode_64 = 3257} midgard_reg_mode;258259typedef enum {260midgard_shrink_mode_lower = 0,261midgard_shrink_mode_upper = 1,262midgard_shrink_mode_none = 2263} midgard_shrink_mode;264265/* Only used if midgard_src_expand_mode is set to one of midgard_src_expand_*. */266typedef enum {267midgard_int_sign_extend = 0,268midgard_int_zero_extend = 1,269midgard_int_replicate = 2,270midgard_int_left_shift = 3271} midgard_int_mod;272273/* Unlike midgard_int_mod, fload modifiers are applied after the expansion happens, so274* they don't depend on midgard_src_expand_mode. */275#define MIDGARD_FLOAT_MOD_ABS (1 << 0)276#define MIDGARD_FLOAT_MOD_NEG (1 << 1)277278/* The expand options depend on both midgard_int_mod and midgard_reg_mode. For279* example, a vec4 with midgard_int_sign_extend and midgard_src_expand_low is280* treated as a vec8 and each 16-bit element from the low 64-bits is then sign281* extended, resulting in a vec4 where each 32-bit element corresponds to a282* 16-bit element from the low 64-bits of the input vector. */283typedef enum {284midgard_src_passthrough = 0,285midgard_src_rep_low = 1, /* replicate lower 64 bits to higher 64 bits */286midgard_src_rep_high = 2, /* replicate higher 64 bits to lower 64 bits */287midgard_src_swap = 3, /* swap lower 64 bits with higher 64 bits */288midgard_src_expand_low = 4, /* expand low 64 bits */289midgard_src_expand_high = 5, /* expand high 64 bits */290midgard_src_expand_low_swap = 6, /* expand low 64 bits, then swap */291midgard_src_expand_high_swap = 7, /* expand high 64 bits, then swap */292} midgard_src_expand_mode;293294#define INPUT_EXPANDS(a) \295(a >= midgard_src_expand_low && a <= midgard_src_expand_high_swap)296297#define INPUT_SWAPS(a) \298(a == midgard_src_swap || a >= midgard_src_expand_low_swap)299300typedef struct301__attribute__((__packed__))302{303/* Either midgard_int_mod or from midgard_float_mod_*, depending on the304* type of op */305unsigned mod : 2;306midgard_src_expand_mode expand_mode : 3;307unsigned swizzle : 8;308}309midgard_vector_alu_src;310311typedef struct312__attribute__((__packed__))313{314midgard_alu_op op : 8;315midgard_reg_mode reg_mode : 2;316unsigned src1 : 13;317unsigned src2 : 13;318midgard_shrink_mode shrink_mode : 2;319unsigned outmod : 2;320unsigned mask : 8;321}322midgard_vector_alu;323324typedef struct325__attribute__((__packed__))326{327unsigned mod : 2;328bool full : 1; /* 0 = 16-bit, 1 = 32-bit */329unsigned component : 3;330}331midgard_scalar_alu_src;332333typedef struct334__attribute__((__packed__))335{336midgard_alu_op op : 8;337unsigned src1 : 6;338/* last 5 bits are used when src2 is an immediate */339unsigned src2 : 11;340unsigned unknown : 1;341unsigned outmod : 2;342bool output_full : 1;343unsigned output_component : 3;344}345midgard_scalar_alu;346347typedef struct348__attribute__((__packed__))349{350unsigned src1_reg : 5;351unsigned src2_reg : 5;352unsigned out_reg : 5;353bool src2_imm : 1;354}355midgard_reg_info;356357/* In addition to conditional branches and jumps (unconditional branches),358* Midgard implements a bit of fixed function functionality used in fragment359* shaders via specially crafted branches. These have special branch opcodes,360* which perform a fixed-function operation and/or use the results of a361* fixed-function operation as the branch condition. */362363typedef enum {364/* Regular branches */365midgard_jmp_writeout_op_branch_uncond = 1,366midgard_jmp_writeout_op_branch_cond = 2,367368/* In a fragment shader, execute a discard_if instruction, with the369* corresponding condition code. Terminates the shader, so generally370* set the branch target to out of the shader */371midgard_jmp_writeout_op_discard = 4,372373/* Branch if the tilebuffer is not yet ready. At the beginning of a374* fragment shader that reads from the tile buffer, for instance via375* ARM_shader_framebuffer_fetch or EXT_pixel_local_storage, this branch376* operation should be used as a loop. An instruction like377* "br.tilebuffer.always -1" does the trick, corresponding to378* "while(!is_tilebuffer_ready) */379midgard_jmp_writeout_op_tilebuffer_pending = 6,380381/* In a fragment shader, try to write out the value pushed to r0 to the382* tilebuffer, subject to unknown state in r1.z and r1.w. If this383* succeeds, the shader terminates. If it fails, it branches to the384* specified branch target. Generally, this should be used in a loop to385* itself, acting as "do { write(r0); } while(!write_successful);" */386midgard_jmp_writeout_op_writeout = 7,387} midgard_jmp_writeout_op;388389typedef enum {390midgard_condition_write0 = 0,391392/* These condition codes denote a conditional branch on FALSE and on393* TRUE respectively */394midgard_condition_false = 1,395midgard_condition_true = 2,396397/* This condition code always branches. For a pure branch, the398* unconditional branch coding should be used instead, but for399* fixed-function branch opcodes, this is still useful */400midgard_condition_always = 3,401} midgard_condition;402403typedef struct404__attribute__((__packed__))405{406midgard_jmp_writeout_op op : 3; /* == branch_uncond */407unsigned dest_tag : 4; /* tag of branch destination */408unsigned unknown : 2;409int offset : 7;410}411midgard_branch_uncond;412413typedef struct414__attribute__((__packed__))415{416midgard_jmp_writeout_op op : 3; /* == branch_cond */417unsigned dest_tag : 4; /* tag of branch destination */418int offset : 7;419midgard_condition cond : 2;420}421midgard_branch_cond;422423typedef struct424__attribute__((__packed__))425{426midgard_jmp_writeout_op op : 3; /* == branch_cond */427unsigned dest_tag : 4; /* tag of branch destination */428unsigned unknown : 2;429signed offset : 23;430431/* Extended branches permit inputting up to 4 conditions loaded into432* r31 (two in r31.w and two in r31.x). In the most general case, we433* specify a function f(A, B, C, D) mapping 4 1-bit conditions to a434* single 1-bit branch criteria. Note that the domain of f has 2^(2^4)435* elements, each mapping to 1-bit of output, so we can trivially436* construct a Godel numbering of f as a (2^4)=16-bit integer. This437* 16-bit integer serves as a lookup table to compute f, subject to438* some swaps for ordering.439*440* Interesting, the standard 2-bit condition codes are also a LUT with441* the same format (2^1-bit), but it's usually easier to use enums. */442443unsigned cond : 16;444}445midgard_branch_extended;446447typedef struct448__attribute__((__packed__))449{450midgard_jmp_writeout_op op : 3; /* == writeout */451unsigned unknown : 13;452}453midgard_writeout;454455/*456* Load/store words457*/458459typedef enum {460midgard_op_ld_st_noop = 0x03,461462/* Unpacks a colour from a native format to <format> */463midgard_op_unpack_colour_f32 = 0x04,464midgard_op_unpack_colour_f16 = 0x05,465midgard_op_unpack_colour_u32 = 0x06,466midgard_op_unpack_colour_s32 = 0x07,467468/* Packs a colour from <format> to a native format */469midgard_op_pack_colour_f32 = 0x08,470midgard_op_pack_colour_f16 = 0x09,471midgard_op_pack_colour_u32 = 0x0A,472midgard_op_pack_colour_s32 = 0x0B,473474/* Computes the effective address of a mem address expression */475midgard_op_lea = 0x0C,476477/* Converts image coordinates into mem address */478midgard_op_lea_image = 0x0D,479480/* Unclear why this is on the L/S unit, but moves fp32 cube map481* coordinates in r27 to its cube map texture coordinate destination482* (e.g r29). */483484midgard_op_ld_cubemap_coords = 0x0E,485486/* A mov between registers that the ldst pipeline can access */487midgard_op_ldst_mov = 0x10,488489/* The L/S unit can do perspective division a clock faster than the ALU490* if you're lucky. Put the vec4 in r27, and call with 0x24 as the491* unknown state; the output will be <x/w, y/w, z/w, 1>. Replace w with492* z for the z version */493midgard_op_ldst_perspective_div_y = 0x11,494midgard_op_ldst_perspective_div_z = 0x12,495midgard_op_ldst_perspective_div_w = 0x13,496497/* val in r27.y, address embedded, outputs result to argument. Invert val for sub. Let val = +-1 for inc/dec. */498midgard_op_atomic_add = 0x40,499midgard_op_atomic_add64 = 0x41,500midgard_op_atomic_add_be = 0x42,501midgard_op_atomic_add64_be = 0x43,502503midgard_op_atomic_and = 0x44,504midgard_op_atomic_and64 = 0x45,505midgard_op_atomic_and_be = 0x46,506midgard_op_atomic_and64_be = 0x47,507midgard_op_atomic_or = 0x48,508midgard_op_atomic_or64 = 0x49,509midgard_op_atomic_or_be = 0x4A,510midgard_op_atomic_or64_be = 0x4B,511midgard_op_atomic_xor = 0x4C,512midgard_op_atomic_xor64 = 0x4D,513midgard_op_atomic_xor_be = 0x4E,514midgard_op_atomic_xor64_be = 0x4F,515516midgard_op_atomic_imin = 0x50,517midgard_op_atomic_imin64 = 0x51,518midgard_op_atomic_imin_be = 0x52,519midgard_op_atomic_imin64_be = 0x53,520midgard_op_atomic_umin = 0x54,521midgard_op_atomic_umin64 = 0x55,522midgard_op_atomic_umin_be = 0x56,523midgard_op_atomic_umin64_be = 0x57,524midgard_op_atomic_imax = 0x58,525midgard_op_atomic_imax64 = 0x59,526midgard_op_atomic_imax_be = 0x5A,527midgard_op_atomic_imax64_be = 0x5B,528midgard_op_atomic_umax = 0x5C,529midgard_op_atomic_umax64 = 0x5D,530midgard_op_atomic_umax_be = 0x5E,531midgard_op_atomic_umax64_be = 0x5F,532533midgard_op_atomic_xchg = 0x60,534midgard_op_atomic_xchg64 = 0x61,535midgard_op_atomic_xchg_be = 0x62,536midgard_op_atomic_xchg64_be = 0x63,537538midgard_op_atomic_cmpxchg = 0x64,539midgard_op_atomic_cmpxchg64 = 0x65,540midgard_op_atomic_cmpxchg_be = 0x66,541midgard_op_atomic_cmpxchg64_be = 0x67,542543/* Used for compute shader's __global arguments, __local544* variables (or for register spilling) */545546midgard_op_ld_u8 = 0x80, /* zero extends */547midgard_op_ld_i8 = 0x81, /* sign extends */548midgard_op_ld_u16 = 0x84, /* zero extends */549midgard_op_ld_i16 = 0x85, /* sign extends */550midgard_op_ld_u16_be = 0x86, /* zero extends, big endian */551midgard_op_ld_i16_be = 0x87, /* sign extends, big endian */552midgard_op_ld_32 = 0x88, /* short2, int, float */553midgard_op_ld_32_bswap2 = 0x89, /* 16-bit big endian vector */554midgard_op_ld_32_bswap4 = 0x8A, /* 32-bit big endian scalar */555midgard_op_ld_64 = 0x8C, /* int2, float2, long */556midgard_op_ld_64_bswap2 = 0x8D, /* 16-bit big endian vector */557midgard_op_ld_64_bswap4 = 0x8E, /* 32-bit big endian vector */558midgard_op_ld_64_bswap8 = 0x8F, /* 64-bit big endian scalar */559midgard_op_ld_128 = 0x90, /* float4, long2 */560midgard_op_ld_128_bswap2 = 0x91, /* 16-bit big endian vector */561midgard_op_ld_128_bswap4 = 0x92, /* 32-bit big endian vector */562midgard_op_ld_128_bswap8 = 0x93, /* 64-bit big endian vector */563564midgard_op_ld_attr_32 = 0x94,565midgard_op_ld_attr_16 = 0x95,566midgard_op_ld_attr_32u = 0x96,567midgard_op_ld_attr_32i = 0x97,568midgard_op_ld_vary_32 = 0x98,569midgard_op_ld_vary_16 = 0x99,570midgard_op_ld_vary_32u = 0x9A,571midgard_op_ld_vary_32i = 0x9B,572573/* This instruction behaves differently depending if the gpu is a v4574* or a newer gpu. The main difference hinges on which values of the575* second argument are valid for each gpu.576* TODO: properly document and decode each possible value for the577* second argument. */578midgard_op_ld_special_32f = 0x9C,579midgard_op_ld_special_16f = 0x9D,580midgard_op_ld_special_32u = 0x9E,581midgard_op_ld_special_32i = 0x9F,582583/* The distinction between these ops is the alignment584* requirement / accompanying shift. Thus, the offset to585* ld_ubo_128 is in 16-byte units and can load 128-bit. The586* offset to ld_ubo_64 is in 8-byte units; ld_ubo_32 in 4-byte587* units. */588midgard_op_ld_ubo_u8 = 0xA0, /* theoretical */589midgard_op_ld_ubo_i8 = 0xA1, /* theoretical */590midgard_op_ld_ubo_u16 = 0xA4, /* theoretical */591midgard_op_ld_ubo_i16 = 0xA5, /* theoretical */592midgard_op_ld_ubo_u16_be = 0xA6, /* theoretical */593midgard_op_ld_ubo_i16_be = 0xA7, /* theoretical */594midgard_op_ld_ubo_32 = 0xA8,595midgard_op_ld_ubo_32_bswap2 = 0xA9,596midgard_op_ld_ubo_32_bswap4 = 0xAA,597midgard_op_ld_ubo_64 = 0xAC,598midgard_op_ld_ubo_64_bswap2 = 0xAD,599midgard_op_ld_ubo_64_bswap4 = 0xAE,600midgard_op_ld_ubo_64_bswap8 = 0xAF,601midgard_op_ld_ubo_128 = 0xB0,602midgard_op_ld_ubo_128_bswap2 = 0xB1,603midgard_op_ld_ubo_128_bswap4 = 0xB2,604midgard_op_ld_ubo_128_bswap8 = 0xB3,605606midgard_op_ld_image_32f = 0xB4,607midgard_op_ld_image_16f = 0xB5,608midgard_op_ld_image_32u = 0xB6,609midgard_op_ld_image_32i = 0xB7,610611/* Only works on v5 or newer.612* Older cards must use ld_special with tilebuffer selectors. */613midgard_op_ld_tilebuffer_32f = 0xB8,614midgard_op_ld_tilebuffer_16f = 0xB9,615midgard_op_ld_tilebuffer_raw = 0xBA,616617midgard_op_st_u8 = 0xC0, /* zero extends */618midgard_op_st_i8 = 0xC1, /* sign extends */619midgard_op_st_u16 = 0xC4, /* zero extends */620midgard_op_st_i16 = 0xC5, /* sign extends */621midgard_op_st_u16_be = 0xC6, /* zero extends, big endian */622midgard_op_st_i16_be = 0xC7, /* sign extends, big endian */623midgard_op_st_32 = 0xC8, /* short2, int, float */624midgard_op_st_32_bswap2 = 0xC9, /* 16-bit big endian vector */625midgard_op_st_32_bswap4 = 0xCA, /* 32-bit big endian scalar */626midgard_op_st_64 = 0xCC, /* int2, float2, long */627midgard_op_st_64_bswap2 = 0xCD, /* 16-bit big endian vector */628midgard_op_st_64_bswap4 = 0xCE, /* 32-bit big endian vector */629midgard_op_st_64_bswap8 = 0xCF, /* 64-bit big endian scalar */630midgard_op_st_128 = 0xD0, /* float4, long2 */631midgard_op_st_128_bswap2 = 0xD1, /* 16-bit big endian vector */632midgard_op_st_128_bswap4 = 0xD2, /* 32-bit big endian vector */633midgard_op_st_128_bswap8 = 0xD3, /* 64-bit big endian vector */634635midgard_op_st_vary_32 = 0xD4,636midgard_op_st_vary_16 = 0xD5,637midgard_op_st_vary_32u = 0xD6,638midgard_op_st_vary_32i = 0xD7,639640/* Value to st in r27, location r26.w as short2 */641midgard_op_st_image_32f = 0xD8,642midgard_op_st_image_16f = 0xD9,643midgard_op_st_image_32u = 0xDA,644midgard_op_st_image_32i = 0xDB,645646midgard_op_st_special_32f = 0xDC,647midgard_op_st_special_16f = 0xDD,648midgard_op_st_special_32u = 0xDE,649midgard_op_st_special_32i = 0xDF,650651/* Only works on v5 or newer.652* Older cards must use ld_special with tilebuffer selectors. */653midgard_op_st_tilebuffer_32f = 0xE8,654midgard_op_st_tilebuffer_16f = 0xE9,655midgard_op_st_tilebuffer_raw = 0xEA,656midgard_op_trap = 0xFC,657} midgard_load_store_op;658659typedef enum {660midgard_interp_sample = 0,661midgard_interp_centroid = 1,662midgard_interp_default = 2663} midgard_interpolation;664665typedef enum {666midgard_varying_mod_none = 0,667668/* Take the would-be result and divide all components by its y/z/w669* (perspective division baked in with the load) */670midgard_varying_mod_perspective_y = 1,671midgard_varying_mod_perspective_z = 2,672midgard_varying_mod_perspective_w = 3,673674/* The result is a 64-bit cubemap descriptor to use with675* midgard_tex_op_normal or midgard_tex_op_gradient */676midgard_varying_mod_cubemap = 4,677} midgard_varying_modifier;678679typedef struct680__attribute__((__packed__))681{682midgard_varying_modifier modifier : 3;683684bool flat_shading : 1;685686/* These are ignored if flat_shading is enabled. */687bool perspective_correction : 1;688bool centroid_mapping : 1;689690/* This is ignored if the shader only runs once per pixel. */691bool interpolate_sample : 1;692693bool zero0 : 1; /* Always zero */694695unsigned direct_sample_pos_x : 4;696unsigned direct_sample_pos_y : 4;697}698midgard_varying_params;699700/* 8-bit register/etc selector for load/store ops */701typedef struct702__attribute__((__packed__))703{704/* Indexes into the register */705unsigned component : 2;706707/* Register select between r26/r27 */708unsigned select : 1;709710unsigned unknown : 2;711712/* Like any good Arm instruction set, load/store arguments can be713* implicitly left-shifted... but only the second argument. Zero for no714* shifting, up to <<7 possible though. This is useful for indexing.715*716* For the first argument, it's unknown what these bits mean */717unsigned shift : 3;718}719midgard_ldst_register_select;720721typedef enum {722/* 0 is reserved */723midgard_index_address_u64 = 1,724midgard_index_address_u32 = 2,725midgard_index_address_s32 = 3,726} midgard_index_address_format;727728typedef struct729__attribute__((__packed__))730{731midgard_load_store_op op : 8;732733/* Source/dest reg */734unsigned reg : 5;735736/* Generally is a writemask.737* For ST_ATTR and ST_TEX, unused.738* For other stores, each bit masks 1/4th of the output. */739unsigned mask : 4;740741/* Swizzle for stores, but for atomics it encodes also the source742* register. This fits because atomics dont need a swizzle since they743* are not vectorized instructions. */744unsigned swizzle : 8;745746/* Arg reg, meaning changes according to each opcode */747unsigned arg_comp : 2;748unsigned arg_reg : 3;749750/* 64-bit address enable751* 32-bit data type enable for CUBEMAP and perspective div.752* Explicit indexing enable for LD_ATTR.753* 64-bit coordinate enable for LD_IMAGE. */754bool bitsize_toggle : 1;755756/* These are mainly used for opcodes that have addresses.757* For cmpxchg, index_reg is used for the comparison value.758* For ops that access the attrib table, bit 1 encodes which table.759* For LD_VAR and LD/ST_ATTR, bit 0 enables dest/src type inferral. */760midgard_index_address_format index_format : 2;761unsigned index_comp : 2;762unsigned index_reg : 3;763unsigned index_shift : 4;764765/* Generaly is a signed offset, but has different bitsize and starts at766* different bits depending on the opcode, LDST_*_DISPLACEMENT helpers767* are recommended when packing/unpacking this attribute.768* For LD_UBO, bit 0 enables ubo index immediate.769* For LD_TILEBUFFER_RAW, bit 0 disables sample index immediate. */770int signed_offset : 18;771}772midgard_load_store_word;773774typedef struct775__attribute__((__packed__))776{777unsigned type : 4;778unsigned next_type : 4;779uint64_t word1 : 60;780uint64_t word2 : 60;781}782midgard_load_store;783784/* 8-bit register selector used in texture ops to select a bias/LOD/gradient785* register, shoved into the `bias` field */786787typedef struct788__attribute__((__packed__))789{790/* 32-bit register, clear for half-register */791unsigned full : 1;792793/* Register select between r28/r29 */794unsigned select : 1;795796/* For a half-register, selects the upper half */797unsigned upper : 1;798799/* Indexes into the register */800unsigned component : 2;801802/* Padding to make this 8-bit */803unsigned zero : 3;804}805midgard_tex_register_select;806807/* Texture pipeline results are in r28-r29 */808#define REG_TEX_BASE 28809810enum mali_texture_op {811/* [texture + LOD bias]812* If the texture is mipmapped, barriers must be enabled in the813* instruction word in order for this opcode to compute the output814* correctly. */815midgard_tex_op_normal = 1,816817/* [texture + gradient for LOD and anisotropy]818* Unlike midgard_tex_op_normal, this opcode does not require barriers819* to compute the output correctly. */820midgard_tex_op_gradient = 2,821822/* [unfiltered texturing]823* Unlike midgard_tex_op_normal, this opcode does not require barriers824* to compute the output correctly. */825midgard_tex_op_fetch = 4,826827/* [gradient from derivative] */828midgard_tex_op_grad_from_derivative = 9,829830/* [mov] */831midgard_tex_op_mov = 10,832833/* [noop]834* Mostly used for barriers. */835midgard_tex_op_barrier = 11,836837/* [gradient from coords] */838midgard_tex_op_grad_from_coords = 12,839840/* [derivative]841* Computes derivatives in 2x2 fragment blocks. */842midgard_tex_op_derivative = 13843};844845enum mali_sampler_type {846/* 0 is reserved */847MALI_SAMPLER_FLOAT = 0x1, /* sampler */848MALI_SAMPLER_UNSIGNED = 0x2, /* usampler */849MALI_SAMPLER_SIGNED = 0x3, /* isampler */850};851852/* Texture modes */853enum mali_texture_mode {854TEXTURE_NORMAL = 1,855TEXTURE_SHADOW = 5,856TEXTURE_GATHER_SHADOW = 6,857TEXTURE_GATHER_X = 8,858TEXTURE_GATHER_Y = 9,859TEXTURE_GATHER_Z = 10,860TEXTURE_GATHER_W = 11,861};862863enum mali_derivative_mode {864TEXTURE_DFDX = 0,865TEXTURE_DFDY = 1,866};867868typedef struct869__attribute__((__packed__))870{871unsigned type : 4;872unsigned next_type : 4;873874enum mali_texture_op op : 4;875unsigned mode : 4;876877/* A little obscure, but last is set for the last texture operation in878* a shader. cont appears to just be last's opposite (?). Yeah, I know,879* kind of funky.. BiOpen thinks it could do with memory hinting, or880* tile locking? */881882unsigned cont : 1;883unsigned last : 1;884885unsigned format : 2;886887/* Are sampler_handle/texture_handler respectively set by registers? If888* true, the lower 8-bits of the respective field is a register word.889* If false, they are an immediate */890891unsigned sampler_register : 1;892unsigned texture_register : 1;893894/* Is a register used to specify the895* LOD/bias/offset? If set, use the `bias` field as896* a register index. If clear, use the `bias` field897* as an immediate. */898unsigned lod_register : 1;899900/* Is a register used to specify an offset? If set, use the901* offset_reg_* fields to encode this, duplicated for each of the902* components. If clear, there is implcitly always an immediate offst903* specificed in offset_imm_* */904unsigned offset_register : 1;905906unsigned in_reg_full : 1;907unsigned in_reg_select : 1;908unsigned in_reg_upper : 1;909unsigned in_reg_swizzle : 8;910911unsigned unknown8 : 2;912913unsigned out_full : 1;914915enum mali_sampler_type sampler_type : 2;916917unsigned out_reg_select : 1;918unsigned out_upper : 1;919920unsigned mask : 4;921922/* Intriguingly, textures can take an outmod just like alu ops. Int923* outmods are not supported as far as I can tell, so this is only924* meaningful for float samplers */925midgard_outmod_float outmod : 2;926927unsigned swizzle : 8;928929/* These indicate how many bundles after this texture op may be930* executed in parallel with this op. We may execute only ALU and931* ld/st in parallel (not other textures), and obviously there cannot932* be any dependency (the blob appears to forbid even accessing other933* channels of a given texture register). */934935unsigned out_of_order : 2;936unsigned unknown4 : 10;937938/* In immediate mode, each offset field is an immediate range [0, 7].939*940* In register mode, offset_x becomes a register (full, select, upper)941* triplet followed by a vec3 swizzle is splattered across942* offset_y/offset_z in a genuinely bizarre way.943*944* For texel fetches in immediate mode, the range is the full [-8, 7],945* but for normal texturing the top bit must be zero and a register946* used instead. It's not clear where this limitation is from.947*948* union {949* struct {950* signed offset_x : 4;951* signed offset_y : 4;952* signed offset_z : 4;953* } immediate;954* struct {955* bool full : 1;956* bool select : 1;957* bool upper : 1;958* unsigned swizzle : 8;959* unsigned zero : 1;960* } register;961* }962*/963964unsigned offset : 12;965966/* In immediate bias mode, for a normal texture op, this is967* texture bias, computed as int(2^8 * frac(biasf)), with968* bias_int = floor(bias). For a textureLod, it's that, but969* s/bias/lod. For a texel fetch, this is the LOD as-is.970*971* In register mode, this is a midgard_tex_register_select972* structure and bias_int is zero */973974unsigned bias : 8;975signed bias_int : 8;976977/* If sampler/texture_register is set, the bottom 8-bits are978* midgard_tex_register_select and the top 8-bits are zero. If they are979* clear, they are immediate texture indices */980981unsigned sampler_handle : 16;982unsigned texture_handle : 16;983}984midgard_texture_word;985986/* Technically barriers are texture instructions but it's less work to add them987* as an explicitly zeroed special case, since most fields are forced to go to988* zero */989990typedef struct991__attribute__((__packed__))992{993unsigned type : 4;994unsigned next_type : 4;995996/* op = TEXTURE_OP_BARRIER */997unsigned op : 6;998unsigned zero1 : 2;9991000/* Since helper invocations don't make any sense, these are forced to one */1001unsigned cont : 1;1002unsigned last : 1;1003unsigned zero2 : 14;10041005unsigned zero3 : 24;1006unsigned out_of_order : 4;1007unsigned zero4 : 4;10081009uint64_t zero5;1010} midgard_texture_barrier_word;10111012typedef union midgard_constants {1013double f64[2];1014uint64_t u64[2];1015int64_t i64[2];1016float f32[4];1017uint32_t u32[4];1018int32_t i32[4];1019uint16_t f16[8];1020uint16_t u16[8];1021int16_t i16[8];1022uint8_t u8[16];1023int8_t i8[16];1024}1025midgard_constants;10261027enum midgard_roundmode {1028MIDGARD_RTE = 0x0, /* round to even */1029MIDGARD_RTZ = 0x1, /* round to zero */1030MIDGARD_RTN = 0x2, /* round to negative */1031MIDGARD_RTP = 0x3, /* round to positive */1032};10331034#endif103510361037