Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/freedreno/ir3/ir3_context.h
4565 views
1
/*
2
* Copyright (C) 2015-2018 Rob Clark <[email protected]>
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
* SOFTWARE.
22
*
23
* Authors:
24
* Rob Clark <[email protected]>
25
*/
26
27
#ifndef IR3_CONTEXT_H_
28
#define IR3_CONTEXT_H_
29
30
#include "ir3.h"
31
#include "ir3_compiler.h"
32
#include "ir3_nir.h"
33
34
/* for conditionally setting boolean flag(s): */
35
#define COND(bool, val) ((bool) ? (val) : 0)
36
37
#define DBG(fmt, ...) \
38
do { \
39
mesa_logd("%s:%d: " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
40
} while (0)
41
42
/**
43
* The context for compilation of a single shader.
44
*/
45
struct ir3_context {
46
struct ir3_compiler *compiler;
47
const struct ir3_context_funcs *funcs;
48
49
struct nir_shader *s;
50
51
struct nir_instr *cur_instr; /* current instruction, just for debug */
52
53
struct ir3 *ir;
54
struct ir3_shader_variant *so;
55
56
/* Tables of scalar inputs/outputs. Because of the way varying packing
57
* works, we could have inputs w/ fractional location, which is a bit
58
* awkward to deal with unless we keep track of the split scalar in/
59
* out components.
60
*
61
* These *only* have inputs/outputs that are touched by load_*input and
62
* store_output.
63
*/
64
unsigned ninputs, noutputs;
65
struct ir3_instruction **inputs;
66
struct ir3_instruction **outputs;
67
68
struct ir3_block *block; /* the current block */
69
struct ir3_block *in_block; /* block created for shader inputs */
70
71
nir_function_impl *impl;
72
73
/* For fragment shaders, varyings are not actual shader inputs,
74
* instead the hw passes a ij coord which is used with
75
* bary.f.
76
*
77
* But NIR doesn't know that, it still declares varyings as
78
* inputs. So we do all the input tracking normally and fix
79
* things up after compile_instructions()
80
*/
81
struct ir3_instruction *ij[IJ_COUNT];
82
83
/* for fragment shaders, for gl_FrontFacing and gl_FragCoord: */
84
struct ir3_instruction *frag_face, *frag_coord;
85
86
/* For vertex shaders, keep track of the system values sources */
87
struct ir3_instruction *vertex_id, *basevertex, *instance_id, *base_instance,
88
*draw_id, *view_index;
89
90
/* For fragment shaders: */
91
struct ir3_instruction *samp_id, *samp_mask_in;
92
93
/* For geometry shaders: */
94
struct ir3_instruction *primitive_id;
95
struct ir3_instruction *gs_header;
96
97
/* For tessellation shaders: */
98
struct ir3_instruction *patch_vertices_in;
99
struct ir3_instruction *tcs_header;
100
struct ir3_instruction *tess_coord;
101
102
/* Compute shader inputs: */
103
struct ir3_instruction *local_invocation_id, *work_group_id;
104
105
/* mapping from nir_register to defining instruction: */
106
struct hash_table *def_ht;
107
108
unsigned num_arrays;
109
110
/* Tracking for max level of flowcontrol (branchstack) needed
111
* by a5xx+:
112
*/
113
unsigned stack, max_stack;
114
115
unsigned loop_id;
116
117
/* a common pattern for indirect addressing is to request the
118
* same address register multiple times. To avoid generating
119
* duplicate instruction sequences (which our backend does not
120
* try to clean up, since that should be done as the NIR stage)
121
* we cache the address value generated for a given src value:
122
*
123
* Note that we have to cache these per alignment, since same
124
* src used for an array of vec1 cannot be also used for an
125
* array of vec4.
126
*/
127
struct hash_table *addr0_ht[4];
128
129
/* The same for a1.x. We only support immediate values for a1.x, as this
130
* is the only use so far.
131
*/
132
struct hash_table_u64 *addr1_ht;
133
134
struct hash_table *sel_cond_conversions;
135
136
/* last dst array, for indirect we need to insert a var-store.
137
*/
138
struct ir3_instruction **last_dst;
139
unsigned last_dst_n;
140
141
/* maps nir_block to ir3_block, mostly for the purposes of
142
* figuring out the blocks successors
143
*/
144
struct hash_table *block_ht;
145
146
/* maps nir_block at the top of a loop to ir3_block collecting continue
147
* edges.
148
*/
149
struct hash_table *continue_block_ht;
150
151
/* on a4xx, bitmask of samplers which need astc+srgb workaround: */
152
unsigned astc_srgb;
153
154
unsigned samples; /* bitmask of x,y sample shifts */
155
156
unsigned max_texture_index;
157
158
unsigned prefetch_limit;
159
160
/* set if we encounter something we can't handle yet, so we
161
* can bail cleanly and fallback to TGSI compiler f/e
162
*/
163
bool error;
164
};
165
166
struct ir3_context_funcs {
167
void (*emit_intrinsic_load_ssbo)(struct ir3_context *ctx,
168
nir_intrinsic_instr *intr,
169
struct ir3_instruction **dst);
170
void (*emit_intrinsic_store_ssbo)(struct ir3_context *ctx,
171
nir_intrinsic_instr *intr);
172
struct ir3_instruction *(*emit_intrinsic_atomic_ssbo)(
173
struct ir3_context *ctx, nir_intrinsic_instr *intr);
174
void (*emit_intrinsic_load_image)(struct ir3_context *ctx,
175
nir_intrinsic_instr *intr,
176
struct ir3_instruction **dst);
177
void (*emit_intrinsic_store_image)(struct ir3_context *ctx,
178
nir_intrinsic_instr *intr);
179
struct ir3_instruction *(*emit_intrinsic_atomic_image)(
180
struct ir3_context *ctx, nir_intrinsic_instr *intr);
181
void (*emit_intrinsic_image_size)(struct ir3_context *ctx,
182
nir_intrinsic_instr *intr,
183
struct ir3_instruction **dst);
184
void (*emit_intrinsic_load_global_ir3)(struct ir3_context *ctx,
185
nir_intrinsic_instr *intr,
186
struct ir3_instruction **dst);
187
void (*emit_intrinsic_store_global_ir3)(struct ir3_context *ctx,
188
nir_intrinsic_instr *intr);
189
};
190
191
extern const struct ir3_context_funcs ir3_a4xx_funcs;
192
extern const struct ir3_context_funcs ir3_a6xx_funcs;
193
194
struct ir3_context *ir3_context_init(struct ir3_compiler *compiler,
195
struct ir3_shader_variant *so);
196
void ir3_context_free(struct ir3_context *ctx);
197
198
struct ir3_instruction **ir3_get_dst_ssa(struct ir3_context *ctx,
199
nir_ssa_def *dst, unsigned n);
200
struct ir3_instruction **ir3_get_dst(struct ir3_context *ctx, nir_dest *dst,
201
unsigned n);
202
struct ir3_instruction *const *ir3_get_src(struct ir3_context *ctx,
203
nir_src *src);
204
void ir3_put_dst(struct ir3_context *ctx, nir_dest *dst);
205
struct ir3_instruction *ir3_create_collect(struct ir3_context *ctx,
206
struct ir3_instruction *const *arr,
207
unsigned arrsz);
208
void ir3_split_dest(struct ir3_block *block, struct ir3_instruction **dst,
209
struct ir3_instruction *src, unsigned base, unsigned n);
210
void ir3_handle_bindless_cat6(struct ir3_instruction *instr, nir_src rsrc);
211
void ir3_handle_nonuniform(struct ir3_instruction *instr,
212
nir_intrinsic_instr *intrin);
213
void emit_intrinsic_image_size_tex(struct ir3_context *ctx,
214
nir_intrinsic_instr *intr,
215
struct ir3_instruction **dst);
216
217
#define ir3_collect(ctx, ...) \
218
({ \
219
struct ir3_instruction *__arr[] = {__VA_ARGS__}; \
220
ir3_create_collect(ctx, __arr, ARRAY_SIZE(__arr)); \
221
})
222
223
NORETURN void ir3_context_error(struct ir3_context *ctx, const char *format,
224
...);
225
226
#define compile_assert(ctx, cond) \
227
do { \
228
if (!(cond)) \
229
ir3_context_error((ctx), "failed assert: " #cond "\n"); \
230
} while (0)
231
232
struct ir3_instruction *ir3_get_addr0(struct ir3_context *ctx,
233
struct ir3_instruction *src, int align);
234
struct ir3_instruction *ir3_get_addr1(struct ir3_context *ctx,
235
unsigned const_val);
236
struct ir3_instruction *ir3_get_predicate(struct ir3_context *ctx,
237
struct ir3_instruction *src);
238
239
void ir3_declare_array(struct ir3_context *ctx, nir_register *reg);
240
struct ir3_array *ir3_get_array(struct ir3_context *ctx, nir_register *reg);
241
struct ir3_instruction *ir3_create_array_load(struct ir3_context *ctx,
242
struct ir3_array *arr, int n,
243
struct ir3_instruction *address);
244
void ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr,
245
int n, struct ir3_instruction *src,
246
struct ir3_instruction *address);
247
248
static inline type_t
249
utype_for_size(unsigned bit_size)
250
{
251
switch (bit_size) {
252
case 32:
253
return TYPE_U32;
254
case 16:
255
return TYPE_U16;
256
case 8:
257
return TYPE_U8;
258
default:
259
unreachable("bad bitsize");
260
return ~0;
261
}
262
}
263
264
static inline type_t
265
utype_src(nir_src src)
266
{
267
return utype_for_size(nir_src_bit_size(src));
268
}
269
270
static inline type_t
271
utype_dst(nir_dest dst)
272
{
273
return utype_for_size(nir_dest_bit_size(dst));
274
}
275
276
#endif /* IR3_CONTEXT_H_ */
277
278