Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/freedreno/ir3/ir3_compiler.h
4565 views
1
/*
2
* Copyright (C) 2013 Rob Clark <[email protected]>
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
* SOFTWARE.
22
*
23
* Authors:
24
* Rob Clark <[email protected]>
25
*/
26
27
#ifndef IR3_COMPILER_H_
28
#define IR3_COMPILER_H_
29
30
#include "util/disk_cache.h"
31
#include "util/log.h"
32
33
#include "ir3.h"
34
35
struct ir3_ra_reg_set;
36
struct ir3_shader;
37
38
struct ir3_compiler {
39
struct fd_device *dev;
40
uint32_t gpu_id;
41
uint32_t shader_count;
42
43
struct disk_cache *disk_cache;
44
45
/* If true, UBO accesses are assumed to be bounds-checked as defined by
46
* VK_EXT_robustness2 and optimizations may have to be more conservative.
47
*/
48
bool robust_ubo_access;
49
50
/*
51
* Configuration options for things that are handled differently on
52
* different generations:
53
*/
54
55
/* a4xx (and later) drops SP_FS_FLAT_SHAD_MODE_REG_* for flat-interpolate
56
* so we need to use ldlv.u32 to load the varying directly:
57
*/
58
bool flat_bypass;
59
60
/* on a3xx, we need to add one to # of array levels:
61
*/
62
bool levels_add_one;
63
64
/* on a3xx, we need to scale up integer coords for isaml based
65
* on LoD:
66
*/
67
bool unminify_coords;
68
69
/* on a3xx do txf_ms w/ isaml and scaled coords: */
70
bool txf_ms_with_isaml;
71
72
/* on a4xx, for array textures we need to add 0.5 to the array
73
* index coordinate:
74
*/
75
bool array_index_add_half;
76
77
/* on a6xx, rewrite samgp to sequence of samgq0-3 in vertex shaders:
78
*/
79
bool samgq_workaround;
80
81
/* on a650, vertex shader <-> tess control io uses LDL/STL */
82
bool tess_use_shared;
83
84
/* The maximum number of constants, in vec4's, across the entire graphics
85
* pipeline.
86
*/
87
uint16_t max_const_pipeline;
88
89
/* The maximum number of constants, in vec4's, for VS+HS+DS+GS. */
90
uint16_t max_const_geom;
91
92
/* The maximum number of constants, in vec4's, for FS. */
93
uint16_t max_const_frag;
94
95
/* A "safe" max constlen that can be applied to each shader in the
96
* pipeline which we guarantee will never exceed any combined limits.
97
*/
98
uint16_t max_const_safe;
99
100
/* The maximum number of constants, in vec4's, for compute shaders. */
101
uint16_t max_const_compute;
102
103
/* Number of instructions that the shader's base address and length
104
* (instrlen divides instruction count by this) must be aligned to.
105
*/
106
uint32_t instr_align;
107
108
/* on a3xx, the unit of indirect const load is higher than later gens (in
109
* vec4 units):
110
*/
111
uint32_t const_upload_unit;
112
113
/* The base number of threads per wave. Some stages may be able to double
114
* this.
115
*/
116
uint32_t threadsize_base;
117
118
/* On at least a6xx, waves are always launched in pairs. In calculations
119
* about occupancy, we pretend that each wave pair is actually one wave,
120
* which simplifies many of the calculations, but means we have to
121
* multiply threadsize_base by this number.
122
*/
123
uint32_t wave_granularity;
124
125
/* The maximum number of simultaneous waves per core. */
126
uint32_t max_waves;
127
128
/* This is theoretical maximum number of vec4 registers that one wave of
129
* the base threadsize could use. To get the actual size of the register
130
* file in bytes one would need to compute:
131
*
132
* reg_size_vec4 * threadsize_base * wave_granularity * 16 (bytes per vec4)
133
*
134
* However this number is more often what we actually need. For example, a
135
* max_reg more than half of this will result in a doubled threadsize
136
* being impossible (because double-sized waves take up twice as many
137
* registers). Also, the formula for the occupancy given a particular
138
* register footprint is simpler.
139
*
140
* It is in vec4 units because the register file is allocated
141
* with vec4 granularity, so it's in the same units as max_reg.
142
*/
143
uint32_t reg_size_vec4;
144
145
/* The size of local memory in bytes */
146
uint32_t local_mem_size;
147
148
/* The number of total branch stack entries, divided by wave_granularity. */
149
uint32_t branchstack_size;
150
151
/* Whether clip+cull distances are supported */
152
bool has_clip_cull;
153
154
/* Whether private memory is supported */
155
bool has_pvtmem;
156
};
157
158
void ir3_compiler_destroy(struct ir3_compiler *compiler);
159
struct ir3_compiler *ir3_compiler_create(struct fd_device *dev, uint32_t gpu_id,
160
bool robust_ubo_access);
161
162
void ir3_disk_cache_init(struct ir3_compiler *compiler);
163
void ir3_disk_cache_init_shader_key(struct ir3_compiler *compiler,
164
struct ir3_shader *shader);
165
bool ir3_disk_cache_retrieve(struct ir3_compiler *compiler,
166
struct ir3_shader_variant *v);
167
void ir3_disk_cache_store(struct ir3_compiler *compiler,
168
struct ir3_shader_variant *v);
169
170
int ir3_compile_shader_nir(struct ir3_compiler *compiler,
171
struct ir3_shader_variant *so);
172
173
/* gpu pointer size in units of 32bit registers/slots */
174
static inline unsigned
175
ir3_pointer_size(struct ir3_compiler *compiler)
176
{
177
return (compiler->gpu_id >= 500) ? 2 : 1;
178
}
179
180
enum ir3_shader_debug {
181
IR3_DBG_SHADER_VS = BITFIELD_BIT(0),
182
IR3_DBG_SHADER_TCS = BITFIELD_BIT(1),
183
IR3_DBG_SHADER_TES = BITFIELD_BIT(2),
184
IR3_DBG_SHADER_GS = BITFIELD_BIT(3),
185
IR3_DBG_SHADER_FS = BITFIELD_BIT(4),
186
IR3_DBG_SHADER_CS = BITFIELD_BIT(5),
187
IR3_DBG_DISASM = BITFIELD_BIT(6),
188
IR3_DBG_OPTMSGS = BITFIELD_BIT(7),
189
IR3_DBG_FORCES2EN = BITFIELD_BIT(8),
190
IR3_DBG_NOUBOOPT = BITFIELD_BIT(9),
191
IR3_DBG_NOFP16 = BITFIELD_BIT(10),
192
IR3_DBG_NOCACHE = BITFIELD_BIT(11),
193
194
/* DEBUG-only options: */
195
IR3_DBG_SCHEDMSGS = BITFIELD_BIT(20),
196
IR3_DBG_RAMSGS = BITFIELD_BIT(21),
197
198
/* Only used for the disk-caching logic: */
199
IR3_DBG_ROBUST_UBO_ACCESS = BITFIELD_BIT(30),
200
};
201
202
extern enum ir3_shader_debug ir3_shader_debug;
203
extern const char *ir3_shader_override_path;
204
205
static inline bool
206
shader_debug_enabled(gl_shader_stage type)
207
{
208
if (ir3_shader_debug & IR3_DBG_DISASM)
209
return true;
210
211
switch (type) {
212
case MESA_SHADER_VERTEX:
213
return !!(ir3_shader_debug & IR3_DBG_SHADER_VS);
214
case MESA_SHADER_TESS_CTRL:
215
return !!(ir3_shader_debug & IR3_DBG_SHADER_TCS);
216
case MESA_SHADER_TESS_EVAL:
217
return !!(ir3_shader_debug & IR3_DBG_SHADER_TES);
218
case MESA_SHADER_GEOMETRY:
219
return !!(ir3_shader_debug & IR3_DBG_SHADER_GS);
220
case MESA_SHADER_FRAGMENT:
221
return !!(ir3_shader_debug & IR3_DBG_SHADER_FS);
222
case MESA_SHADER_COMPUTE:
223
return !!(ir3_shader_debug & IR3_DBG_SHADER_CS);
224
default:
225
debug_assert(0);
226
return false;
227
}
228
}
229
230
static inline void
231
ir3_debug_print(struct ir3 *ir, const char *when)
232
{
233
if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
234
mesa_logi("%s:", when);
235
ir3_print(ir);
236
}
237
}
238
239
#endif /* IR3_COMPILER_H_ */
240
241