Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/iris/iris_draw.c
4565 views
1
/*
2
* Copyright © 2017 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice shall be included
12
* in all copies or substantial portions of the Software.
13
*
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20
* DEALINGS IN THE SOFTWARE.
21
*/
22
23
/**
24
* @file iris_draw.c
25
*
26
* The main driver hooks for drawing and launching compute shaders.
27
*/
28
29
#include <stdio.h>
30
#include <errno.h>
31
#include "pipe/p_defines.h"
32
#include "pipe/p_state.h"
33
#include "pipe/p_context.h"
34
#include "pipe/p_screen.h"
35
#include "util/u_draw.h"
36
#include "util/u_inlines.h"
37
#include "util/u_transfer.h"
38
#include "util/u_upload_mgr.h"
39
#include "intel/compiler/brw_compiler.h"
40
#include "intel/compiler/brw_eu_defines.h"
41
#include "iris_context.h"
42
#include "iris_defines.h"
43
44
static bool
45
prim_is_points_or_lines(const struct pipe_draw_info *draw)
46
{
47
/* We don't need to worry about adjacency - it can only be used with
48
* geometry shaders, and we don't care about this info when GS is on.
49
*/
50
return draw->mode == PIPE_PRIM_POINTS ||
51
draw->mode == PIPE_PRIM_LINES ||
52
draw->mode == PIPE_PRIM_LINE_LOOP ||
53
draw->mode == PIPE_PRIM_LINE_STRIP;
54
}
55
56
/**
57
* Record the current primitive mode and restart information, flagging
58
* related packets as dirty if necessary.
59
*
60
* This must be called before updating compiled shaders, because the patch
61
* information informs the TCS key.
62
*/
63
static void
64
iris_update_draw_info(struct iris_context *ice,
65
const struct pipe_draw_info *info)
66
{
67
struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
68
const struct brw_compiler *compiler = screen->compiler;
69
70
if (ice->state.prim_mode != info->mode) {
71
ice->state.prim_mode = info->mode;
72
ice->state.dirty |= IRIS_DIRTY_VF_TOPOLOGY;
73
74
75
/* For XY Clip enables */
76
bool points_or_lines = prim_is_points_or_lines(info);
77
if (points_or_lines != ice->state.prim_is_points_or_lines) {
78
ice->state.prim_is_points_or_lines = points_or_lines;
79
ice->state.dirty |= IRIS_DIRTY_CLIP;
80
}
81
}
82
83
if (info->mode == PIPE_PRIM_PATCHES &&
84
ice->state.vertices_per_patch != info->vertices_per_patch) {
85
ice->state.vertices_per_patch = info->vertices_per_patch;
86
ice->state.dirty |= IRIS_DIRTY_VF_TOPOLOGY;
87
88
/* 8_PATCH TCS needs this for key->input_vertices */
89
if (compiler->use_tcs_8_patch)
90
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_UNCOMPILED_TCS;
91
92
/* Flag constants dirty for gl_PatchVerticesIn if needed. */
93
const struct shader_info *tcs_info =
94
iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
95
if (tcs_info &&
96
BITSET_TEST(tcs_info->system_values_read, SYSTEM_VALUE_VERTICES_IN)) {
97
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_TCS;
98
ice->state.shaders[MESA_SHADER_TESS_CTRL].sysvals_need_upload = true;
99
}
100
}
101
102
/* Track restart_index changes only if primitive_restart is true */
103
const unsigned cut_index = info->primitive_restart ? info->restart_index :
104
ice->state.cut_index;
105
if (ice->state.primitive_restart != info->primitive_restart ||
106
ice->state.cut_index != cut_index) {
107
ice->state.dirty |= IRIS_DIRTY_VF;
108
ice->state.primitive_restart = info->primitive_restart;
109
ice->state.cut_index = cut_index;
110
}
111
}
112
113
/**
114
* Update shader draw parameters, flagging VF packets as dirty if necessary.
115
*/
116
static void
117
iris_update_draw_parameters(struct iris_context *ice,
118
const struct pipe_draw_info *info,
119
unsigned drawid_offset,
120
const struct pipe_draw_indirect_info *indirect,
121
const struct pipe_draw_start_count_bias *draw)
122
{
123
bool changed = false;
124
125
if (ice->state.vs_uses_draw_params) {
126
struct iris_state_ref *draw_params = &ice->draw.draw_params;
127
128
if (indirect && indirect->buffer) {
129
pipe_resource_reference(&draw_params->res, indirect->buffer);
130
draw_params->offset =
131
indirect->offset + (info->index_size ? 12 : 8);
132
133
changed = true;
134
ice->draw.params_valid = false;
135
} else {
136
int firstvertex = info->index_size ? draw->index_bias : draw->start;
137
138
if (!ice->draw.params_valid ||
139
ice->draw.params.firstvertex != firstvertex ||
140
ice->draw.params.baseinstance != info->start_instance) {
141
142
changed = true;
143
ice->draw.params.firstvertex = firstvertex;
144
ice->draw.params.baseinstance = info->start_instance;
145
ice->draw.params_valid = true;
146
147
u_upload_data(ice->ctx.const_uploader, 0,
148
sizeof(ice->draw.params), 4, &ice->draw.params,
149
&draw_params->offset, &draw_params->res);
150
}
151
}
152
}
153
154
if (ice->state.vs_uses_derived_draw_params) {
155
struct iris_state_ref *derived_params = &ice->draw.derived_draw_params;
156
int is_indexed_draw = info->index_size ? -1 : 0;
157
158
if (ice->draw.derived_params.drawid != drawid_offset ||
159
ice->draw.derived_params.is_indexed_draw != is_indexed_draw) {
160
161
changed = true;
162
ice->draw.derived_params.drawid = drawid_offset;
163
ice->draw.derived_params.is_indexed_draw = is_indexed_draw;
164
165
u_upload_data(ice->ctx.const_uploader, 0,
166
sizeof(ice->draw.derived_params), 4,
167
&ice->draw.derived_params,
168
&derived_params->offset, &derived_params->res);
169
}
170
}
171
172
if (changed) {
173
ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
174
IRIS_DIRTY_VERTEX_ELEMENTS |
175
IRIS_DIRTY_VF_SGVS;
176
}
177
}
178
179
static void
180
iris_indirect_draw_vbo(struct iris_context *ice,
181
const struct pipe_draw_info *dinfo,
182
unsigned drawid_offset,
183
const struct pipe_draw_indirect_info *dindirect,
184
const struct pipe_draw_start_count_bias *draw)
185
{
186
struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
187
struct pipe_draw_info info = *dinfo;
188
struct pipe_draw_indirect_info indirect = *dindirect;
189
190
if (indirect.indirect_draw_count &&
191
ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) {
192
/* Upload MI_PREDICATE_RESULT to GPR15.*/
193
batch->screen->vtbl.load_register_reg64(batch, CS_GPR(15), MI_PREDICATE_RESULT);
194
}
195
196
const uint64_t orig_dirty = ice->state.dirty;
197
const uint64_t orig_stage_dirty = ice->state.stage_dirty;
198
199
for (int i = 0; i < indirect.draw_count; i++) {
200
iris_batch_maybe_flush(batch, 1500);
201
202
iris_update_draw_parameters(ice, &info, drawid_offset + i, &indirect, draw);
203
204
batch->screen->vtbl.upload_render_state(ice, batch, &info, drawid_offset + i, &indirect, draw);
205
206
ice->state.dirty &= ~IRIS_ALL_DIRTY_FOR_RENDER;
207
ice->state.stage_dirty &= ~IRIS_ALL_STAGE_DIRTY_FOR_RENDER;
208
209
indirect.offset += indirect.stride;
210
}
211
212
if (indirect.indirect_draw_count &&
213
ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) {
214
/* Restore MI_PREDICATE_RESULT. */
215
batch->screen->vtbl.load_register_reg64(batch, MI_PREDICATE_RESULT, CS_GPR(15));
216
}
217
218
/* Put this back for post-draw resolves, we'll clear it again after. */
219
ice->state.dirty = orig_dirty;
220
ice->state.stage_dirty = orig_stage_dirty;
221
}
222
223
static void
224
iris_simple_draw_vbo(struct iris_context *ice,
225
const struct pipe_draw_info *draw,
226
unsigned drawid_offset,
227
const struct pipe_draw_indirect_info *indirect,
228
const struct pipe_draw_start_count_bias *sc)
229
{
230
struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
231
232
iris_batch_maybe_flush(batch, 1500);
233
234
iris_update_draw_parameters(ice, draw, drawid_offset, indirect, sc);
235
236
batch->screen->vtbl.upload_render_state(ice, batch, draw, drawid_offset, indirect, sc);
237
}
238
239
/**
240
* The pipe->draw_vbo() driver hook. Performs a draw on the GPU.
241
*/
242
void
243
iris_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info,
244
unsigned drawid_offset,
245
const struct pipe_draw_indirect_info *indirect,
246
const struct pipe_draw_start_count_bias *draws,
247
unsigned num_draws)
248
{
249
if (num_draws > 1) {
250
util_draw_multi(ctx, info, drawid_offset, indirect, draws, num_draws);
251
return;
252
}
253
254
if (!indirect && (!draws[0].count || !info->instance_count))
255
return;
256
257
struct iris_context *ice = (struct iris_context *) ctx;
258
struct iris_screen *screen = (struct iris_screen*)ice->ctx.screen;
259
const struct intel_device_info *devinfo = &screen->devinfo;
260
struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
261
262
if (ice->state.predicate == IRIS_PREDICATE_STATE_DONT_RENDER)
263
return;
264
265
if (INTEL_DEBUG & DEBUG_REEMIT) {
266
ice->state.dirty |= IRIS_ALL_DIRTY_FOR_RENDER;
267
ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_FOR_RENDER;
268
}
269
270
iris_update_draw_info(ice, info);
271
272
if (devinfo->ver == 9)
273
gfx9_toggle_preemption(ice, batch, info);
274
275
iris_update_compiled_shaders(ice);
276
277
if (ice->state.dirty & IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES) {
278
bool draw_aux_buffer_disabled[BRW_MAX_DRAW_BUFFERS] = { };
279
for (gl_shader_stage stage = 0; stage < MESA_SHADER_COMPUTE; stage++) {
280
if (ice->shaders.prog[stage])
281
iris_predraw_resolve_inputs(ice, batch, draw_aux_buffer_disabled,
282
stage, true);
283
}
284
iris_predraw_resolve_framebuffer(ice, batch, draw_aux_buffer_disabled);
285
}
286
287
iris_binder_reserve_3d(ice);
288
289
batch->screen->vtbl.update_surface_base_address(batch, &ice->state.binder);
290
291
iris_handle_always_flush_cache(batch);
292
293
if (indirect && indirect->buffer)
294
iris_indirect_draw_vbo(ice, info, drawid_offset, indirect, &draws[0]);
295
else
296
iris_simple_draw_vbo(ice, info, drawid_offset, indirect, &draws[0]);
297
298
iris_handle_always_flush_cache(batch);
299
300
iris_postdraw_update_resolve_tracking(ice, batch);
301
302
ice->state.dirty &= ~IRIS_ALL_DIRTY_FOR_RENDER;
303
ice->state.stage_dirty &= ~IRIS_ALL_STAGE_DIRTY_FOR_RENDER;
304
}
305
306
static void
307
iris_update_grid_size_resource(struct iris_context *ice,
308
const struct pipe_grid_info *grid)
309
{
310
const struct iris_screen *screen = (void *) ice->ctx.screen;
311
const struct isl_device *isl_dev = &screen->isl_dev;
312
struct iris_state_ref *grid_ref = &ice->state.grid_size;
313
struct iris_state_ref *state_ref = &ice->state.grid_surf_state;
314
315
const struct iris_compiled_shader *shader = ice->shaders.prog[MESA_SHADER_COMPUTE];
316
bool grid_needs_surface = shader->bt.used_mask[IRIS_SURFACE_GROUP_CS_WORK_GROUPS];
317
bool grid_updated = false;
318
319
if (grid->indirect) {
320
pipe_resource_reference(&grid_ref->res, grid->indirect);
321
grid_ref->offset = grid->indirect_offset;
322
323
/* Zero out the grid size so that the next non-indirect grid launch will
324
* re-upload it properly.
325
*/
326
memset(ice->state.last_grid, 0, sizeof(ice->state.last_grid));
327
grid_updated = true;
328
} else if (memcmp(ice->state.last_grid, grid->grid, sizeof(grid->grid)) != 0) {
329
memcpy(ice->state.last_grid, grid->grid, sizeof(grid->grid));
330
u_upload_data(ice->state.dynamic_uploader, 0, sizeof(grid->grid), 4,
331
grid->grid, &grid_ref->offset, &grid_ref->res);
332
grid_updated = true;
333
}
334
335
/* If we changed the grid, the old surface state is invalid. */
336
if (grid_updated)
337
pipe_resource_reference(&state_ref->res, NULL);
338
339
/* Skip surface upload if we don't need it or we already have one */
340
if (!grid_needs_surface || state_ref->res)
341
return;
342
343
struct iris_bo *grid_bo = iris_resource_bo(grid_ref->res);
344
345
void *surf_map = NULL;
346
u_upload_alloc(ice->state.surface_uploader, 0, isl_dev->ss.size,
347
isl_dev->ss.align, &state_ref->offset, &state_ref->res,
348
&surf_map);
349
state_ref->offset +=
350
iris_bo_offset_from_base_address(iris_resource_bo(state_ref->res));
351
isl_buffer_fill_state(&screen->isl_dev, surf_map,
352
.address = grid_ref->offset + grid_bo->gtt_offset,
353
.size_B = sizeof(grid->grid),
354
.format = ISL_FORMAT_RAW,
355
.stride_B = 1,
356
.mocs = iris_mocs(grid_bo, isl_dev,
357
ISL_SURF_USAGE_CONSTANT_BUFFER_BIT));
358
359
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_CS;
360
}
361
362
void
363
iris_launch_grid(struct pipe_context *ctx, const struct pipe_grid_info *grid)
364
{
365
struct iris_context *ice = (struct iris_context *) ctx;
366
struct iris_batch *batch = &ice->batches[IRIS_BATCH_COMPUTE];
367
368
if (ice->state.predicate == IRIS_PREDICATE_STATE_DONT_RENDER)
369
return;
370
371
if (INTEL_DEBUG & DEBUG_REEMIT) {
372
ice->state.dirty |= IRIS_ALL_DIRTY_FOR_COMPUTE;
373
ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE;
374
}
375
376
if (ice->state.dirty & IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES)
377
iris_predraw_resolve_inputs(ice, batch, NULL, MESA_SHADER_COMPUTE, false);
378
379
iris_batch_maybe_flush(batch, 1500);
380
381
iris_update_compiled_compute_shader(ice);
382
383
if (memcmp(ice->state.last_block, grid->block, sizeof(grid->block)) != 0) {
384
memcpy(ice->state.last_block, grid->block, sizeof(grid->block));
385
ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_CS;
386
ice->state.shaders[MESA_SHADER_COMPUTE].sysvals_need_upload = true;
387
}
388
389
iris_update_grid_size_resource(ice, grid);
390
391
iris_binder_reserve_compute(ice);
392
batch->screen->vtbl.update_surface_base_address(batch, &ice->state.binder);
393
394
if (ice->state.compute_predicate) {
395
batch->screen->vtbl.load_register_mem64(batch, MI_PREDICATE_RESULT,
396
ice->state.compute_predicate, 0);
397
ice->state.compute_predicate = NULL;
398
}
399
400
iris_handle_always_flush_cache(batch);
401
402
batch->screen->vtbl.upload_compute_state(ice, batch, grid);
403
404
iris_handle_always_flush_cache(batch);
405
406
ice->state.dirty &= ~IRIS_ALL_DIRTY_FOR_COMPUTE;
407
ice->state.stage_dirty &= ~IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE;
408
409
/* Note: since compute shaders can't access the framebuffer, there's
410
* no need to call iris_postdraw_update_resolve_tracking.
411
*/
412
}
413
414