Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/freedreno/a6xx/fd6_draw.c
4574 views
1
/*
2
* Copyright (C) 2016 Rob Clark <[email protected]>
3
* Copyright © 2018 Google, Inc.
4
*
5
* Permission is hereby granted, free of charge, to any person obtaining a
6
* copy of this software and associated documentation files (the "Software"),
7
* to deal in the Software without restriction, including without limitation
8
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
* and/or sell copies of the Software, and to permit persons to whom the
10
* Software is furnished to do so, subject to the following conditions:
11
*
12
* The above copyright notice and this permission notice (including the next
13
* paragraph) shall be included in all copies or substantial portions of the
14
* Software.
15
*
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
* SOFTWARE.
23
*
24
* Authors:
25
* Rob Clark <[email protected]>
26
*/
27
28
#include "pipe/p_state.h"
29
#include "util/u_memory.h"
30
#include "util/u_prim.h"
31
#include "util/u_string.h"
32
33
#include "freedreno_resource.h"
34
#include "freedreno_state.h"
35
36
#include "fd6_context.h"
37
#include "fd6_draw.h"
38
#include "fd6_emit.h"
39
#include "fd6_format.h"
40
#include "fd6_program.h"
41
#include "fd6_vsc.h"
42
#include "fd6_zsa.h"
43
44
#include "fd6_pack.h"
45
46
static void
47
draw_emit_xfb(struct fd_ringbuffer *ring, struct CP_DRAW_INDX_OFFSET_0 *draw0,
48
const struct pipe_draw_info *info,
49
const struct pipe_draw_indirect_info *indirect)
50
{
51
struct fd_stream_output_target *target =
52
fd_stream_output_target(indirect->count_from_stream_output);
53
struct fd_resource *offset = fd_resource(target->offset_buf);
54
55
/* All known firmware versions do not wait for WFI's with CP_DRAW_AUTO.
56
* Plus, for the common case where the counter buffer is written by
57
* vkCmdEndTransformFeedback, we need to wait for the CP_WAIT_MEM_WRITES to
58
* complete which means we need a WAIT_FOR_ME anyway.
59
*/
60
OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
61
62
OUT_PKT7(ring, CP_DRAW_AUTO, 6);
63
OUT_RING(ring, pack_CP_DRAW_INDX_OFFSET_0(*draw0).value);
64
OUT_RING(ring, info->instance_count);
65
OUT_RELOC(ring, offset->bo, 0, 0, 0);
66
OUT_RING(
67
ring,
68
0); /* byte counter offset subtraced from the value read from above */
69
OUT_RING(ring, target->stride);
70
}
71
72
static void
73
draw_emit_indirect(struct fd_ringbuffer *ring,
74
struct CP_DRAW_INDX_OFFSET_0 *draw0,
75
const struct pipe_draw_info *info,
76
const struct pipe_draw_indirect_info *indirect,
77
unsigned index_offset)
78
{
79
struct fd_resource *ind = fd_resource(indirect->buffer);
80
81
if (info->index_size) {
82
struct pipe_resource *idx = info->index.resource;
83
unsigned max_indices = (idx->width0 - index_offset) / info->index_size;
84
85
OUT_PKT(ring, CP_DRAW_INDX_INDIRECT, pack_CP_DRAW_INDX_OFFSET_0(*draw0),
86
A5XX_CP_DRAW_INDX_INDIRECT_INDX_BASE(fd_resource(idx)->bo,
87
index_offset),
88
A5XX_CP_DRAW_INDX_INDIRECT_3(.max_indices = max_indices),
89
A5XX_CP_DRAW_INDX_INDIRECT_INDIRECT(ind->bo, indirect->offset));
90
} else {
91
OUT_PKT(ring, CP_DRAW_INDIRECT, pack_CP_DRAW_INDX_OFFSET_0(*draw0),
92
A5XX_CP_DRAW_INDIRECT_INDIRECT(ind->bo, indirect->offset));
93
}
94
}
95
96
static void
97
draw_emit(struct fd_ringbuffer *ring, struct CP_DRAW_INDX_OFFSET_0 *draw0,
98
const struct pipe_draw_info *info,
99
const struct pipe_draw_start_count_bias *draw, unsigned index_offset)
100
{
101
if (info->index_size) {
102
assert(!info->has_user_indices);
103
104
struct pipe_resource *idx_buffer = info->index.resource;
105
unsigned max_indices =
106
(idx_buffer->width0 - index_offset) / info->index_size;
107
108
OUT_PKT(ring, CP_DRAW_INDX_OFFSET, pack_CP_DRAW_INDX_OFFSET_0(*draw0),
109
CP_DRAW_INDX_OFFSET_1(.num_instances = info->instance_count),
110
CP_DRAW_INDX_OFFSET_2(.num_indices = draw->count),
111
CP_DRAW_INDX_OFFSET_3(.first_indx = draw->start),
112
A5XX_CP_DRAW_INDX_OFFSET_INDX_BASE(fd_resource(idx_buffer)->bo,
113
index_offset),
114
A5XX_CP_DRAW_INDX_OFFSET_6(.max_indices = max_indices));
115
} else {
116
OUT_PKT(ring, CP_DRAW_INDX_OFFSET, pack_CP_DRAW_INDX_OFFSET_0(*draw0),
117
CP_DRAW_INDX_OFFSET_1(.num_instances = info->instance_count),
118
CP_DRAW_INDX_OFFSET_2(.num_indices = draw->count));
119
}
120
}
121
122
static void
123
fixup_draw_state(struct fd_context *ctx, struct fd6_emit *emit) assert_dt
124
{
125
if (ctx->last.dirty ||
126
(ctx->last.primitive_restart != emit->primitive_restart)) {
127
/* rasterizer state is effected by primitive-restart: */
128
fd_context_dirty(ctx, FD_DIRTY_RASTERIZER);
129
ctx->last.primitive_restart = emit->primitive_restart;
130
}
131
}
132
133
static bool
134
fd6_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
135
unsigned drawid_offset,
136
const struct pipe_draw_indirect_info *indirect,
137
const struct pipe_draw_start_count_bias *draw,
138
unsigned index_offset) assert_dt
139
{
140
struct fd6_context *fd6_ctx = fd6_context(ctx);
141
struct shader_info *gs_info = ir3_get_shader_info(ctx->prog.gs);
142
struct fd6_emit emit = {
143
.ctx = ctx,
144
.vtx = &ctx->vtx,
145
.info = info,
146
.drawid_offset = drawid_offset,
147
.indirect = indirect,
148
.draw = draw,
149
.key = {
150
.vs = ctx->prog.vs,
151
.gs = ctx->prog.gs,
152
.fs = ctx->prog.fs,
153
.key = {
154
.rasterflat = ctx->rasterizer->flatshade,
155
.layer_zero = !gs_info || !(gs_info->outputs_written & VARYING_BIT_LAYER),
156
.sample_shading = (ctx->min_samples > 1),
157
.msaa = (ctx->framebuffer.samples > 1),
158
},
159
},
160
.rasterflat = ctx->rasterizer->flatshade,
161
.sprite_coord_enable = ctx->rasterizer->sprite_coord_enable,
162
.sprite_coord_mode = ctx->rasterizer->sprite_coord_mode,
163
.primitive_restart = info->primitive_restart && info->index_size,
164
};
165
166
if (!(ctx->prog.vs && ctx->prog.fs))
167
return false;
168
169
if (info->mode == PIPE_PRIM_PATCHES) {
170
emit.key.hs = ctx->prog.hs;
171
emit.key.ds = ctx->prog.ds;
172
173
if (!(ctx->prog.hs && ctx->prog.ds))
174
return false;
175
176
struct shader_info *ds_info = ir3_get_shader_info(emit.key.ds);
177
emit.key.key.tessellation = ir3_tess_mode(ds_info->tess.primitive_mode);
178
ctx->gen_dirty |= BIT(FD6_GROUP_PRIMITIVE_PARAMS);
179
}
180
181
if (emit.key.gs) {
182
emit.key.key.has_gs = true;
183
ctx->gen_dirty |= BIT(FD6_GROUP_PRIMITIVE_PARAMS);
184
}
185
186
if (!(emit.key.hs || emit.key.ds || emit.key.gs || indirect))
187
fd6_vsc_update_sizes(ctx->batch, info, draw);
188
189
ir3_fixup_shader_state(&ctx->base, &emit.key.key);
190
191
if (!(ctx->dirty & FD_DIRTY_PROG)) {
192
emit.prog = fd6_ctx->prog;
193
} else {
194
fd6_ctx->prog = fd6_emit_get_prog(&emit);
195
}
196
197
/* bail if compile failed: */
198
if (!fd6_ctx->prog)
199
return false;
200
201
fixup_draw_state(ctx, &emit);
202
203
/* *after* fixup_shader_state(): */
204
emit.dirty = ctx->dirty;
205
emit.dirty_groups = ctx->gen_dirty;
206
207
emit.bs = fd6_emit_get_prog(&emit)->bs;
208
emit.vs = fd6_emit_get_prog(&emit)->vs;
209
emit.hs = fd6_emit_get_prog(&emit)->hs;
210
emit.ds = fd6_emit_get_prog(&emit)->ds;
211
emit.gs = fd6_emit_get_prog(&emit)->gs;
212
emit.fs = fd6_emit_get_prog(&emit)->fs;
213
214
if (emit.vs->need_driver_params || fd6_ctx->has_dp_state)
215
emit.dirty_groups |= BIT(FD6_GROUP_VS_DRIVER_PARAMS);
216
217
/* If we are doing xfb, we need to emit the xfb state on every draw: */
218
if (emit.prog->stream_output)
219
emit.dirty_groups |= BIT(FD6_GROUP_SO);
220
221
if (unlikely(ctx->stats_users > 0)) {
222
ctx->stats.vs_regs += ir3_shader_halfregs(emit.vs);
223
ctx->stats.hs_regs += COND(emit.hs, ir3_shader_halfregs(emit.hs));
224
ctx->stats.ds_regs += COND(emit.ds, ir3_shader_halfregs(emit.ds));
225
ctx->stats.gs_regs += COND(emit.gs, ir3_shader_halfregs(emit.gs));
226
ctx->stats.fs_regs += ir3_shader_halfregs(emit.fs);
227
}
228
229
struct fd_ringbuffer *ring = ctx->batch->draw;
230
231
struct CP_DRAW_INDX_OFFSET_0 draw0 = {
232
.prim_type = ctx->primtypes[info->mode],
233
.vis_cull = USE_VISIBILITY,
234
.gs_enable = !!emit.key.gs,
235
};
236
237
if (indirect && indirect->count_from_stream_output) {
238
draw0.source_select = DI_SRC_SEL_AUTO_XFB;
239
} else if (info->index_size) {
240
draw0.source_select = DI_SRC_SEL_DMA;
241
draw0.index_size = fd4_size2indextype(info->index_size);
242
} else {
243
draw0.source_select = DI_SRC_SEL_AUTO_INDEX;
244
}
245
246
if (info->mode == PIPE_PRIM_PATCHES) {
247
shader_info *ds_info = &emit.ds->shader->nir->info;
248
uint32_t factor_stride;
249
250
switch (ds_info->tess.primitive_mode) {
251
case GL_ISOLINES:
252
draw0.patch_type = TESS_ISOLINES;
253
factor_stride = 12;
254
break;
255
case GL_TRIANGLES:
256
draw0.patch_type = TESS_TRIANGLES;
257
factor_stride = 20;
258
break;
259
case GL_QUADS:
260
draw0.patch_type = TESS_QUADS;
261
factor_stride = 28;
262
break;
263
default:
264
unreachable("bad tessmode");
265
}
266
267
draw0.prim_type = DI_PT_PATCHES0 + info->vertices_per_patch;
268
draw0.tess_enable = true;
269
270
const unsigned max_count = 2048;
271
unsigned count;
272
273
/**
274
* We can cap tessparam/tessfactor buffer sizes at the sub-draw
275
* limit. But in the indirect-draw case we must assume the worst.
276
*/
277
if (indirect && indirect->buffer) {
278
count = ALIGN_NPOT(max_count, info->vertices_per_patch);
279
} else {
280
count = MIN2(max_count, draw->count);
281
count = ALIGN_NPOT(count, info->vertices_per_patch);
282
}
283
284
OUT_PKT7(ring, CP_SET_SUBDRAW_SIZE, 1);
285
OUT_RING(ring, count);
286
287
ctx->batch->tessellation = true;
288
ctx->batch->tessparam_size =
289
MAX2(ctx->batch->tessparam_size, emit.hs->output_size * 4 * count);
290
ctx->batch->tessfactor_size =
291
MAX2(ctx->batch->tessfactor_size, factor_stride * count);
292
293
if (!ctx->batch->tess_addrs_constobj) {
294
/* Reserve space for the bo address - we'll write them later in
295
* setup_tess_buffers(). We need 2 bo address, but indirect
296
* constant upload needs at least 4 vec4s.
297
*/
298
unsigned size = 4 * 16;
299
300
ctx->batch->tess_addrs_constobj = fd_submit_new_ringbuffer(
301
ctx->batch->submit, size, FD_RINGBUFFER_STREAMING);
302
303
ctx->batch->tess_addrs_constobj->cur += size;
304
}
305
}
306
307
uint32_t index_start = info->index_size ? draw->index_bias : draw->start;
308
if (ctx->last.dirty || (ctx->last.index_start != index_start)) {
309
OUT_PKT4(ring, REG_A6XX_VFD_INDEX_OFFSET, 1);
310
OUT_RING(ring, index_start); /* VFD_INDEX_OFFSET */
311
ctx->last.index_start = index_start;
312
}
313
314
if (ctx->last.dirty || (ctx->last.instance_start != info->start_instance)) {
315
OUT_PKT4(ring, REG_A6XX_VFD_INSTANCE_START_OFFSET, 1);
316
OUT_RING(ring, info->start_instance); /* VFD_INSTANCE_START_OFFSET */
317
ctx->last.instance_start = info->start_instance;
318
}
319
320
uint32_t restart_index =
321
info->primitive_restart ? info->restart_index : 0xffffffff;
322
if (ctx->last.dirty || (ctx->last.restart_index != restart_index)) {
323
OUT_PKT4(ring, REG_A6XX_PC_RESTART_INDEX, 1);
324
OUT_RING(ring, restart_index); /* PC_RESTART_INDEX */
325
ctx->last.restart_index = restart_index;
326
}
327
328
// TODO move fd6_emit_streamout.. I think..
329
if (emit.dirty_groups)
330
fd6_emit_state(ring, &emit);
331
332
/* for debug after a lock up, write a unique counter value
333
* to scratch7 for each draw, to make it easier to match up
334
* register dumps to cmdstream. The combination of IB
335
* (scratch6) and DRAW is enough to "triangulate" the
336
* particular draw that caused lockup.
337
*/
338
emit_marker6(ring, 7);
339
340
if (indirect) {
341
if (indirect->count_from_stream_output) {
342
draw_emit_xfb(ring, &draw0, info, indirect);
343
} else {
344
draw_emit_indirect(ring, &draw0, info, indirect, index_offset);
345
}
346
} else {
347
draw_emit(ring, &draw0, info, draw, index_offset);
348
}
349
350
emit_marker6(ring, 7);
351
fd_reset_wfi(ctx->batch);
352
353
if (emit.streamout_mask) {
354
struct fd_ringbuffer *ring = ctx->batch->draw;
355
356
for (unsigned i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
357
if (emit.streamout_mask & (1 << i)) {
358
fd6_event_write(ctx->batch, ring, FLUSH_SO_0 + i, false);
359
}
360
}
361
}
362
363
fd_context_all_clean(ctx);
364
365
return true;
366
}
367
368
static void
369
fd6_clear_lrz(struct fd_batch *batch, struct fd_resource *zsbuf, double depth)
370
{
371
struct fd_ringbuffer *ring;
372
struct fd_screen *screen = batch->ctx->screen;
373
374
ring = fd_batch_get_prologue(batch);
375
376
emit_marker6(ring, 7);
377
OUT_PKT7(ring, CP_SET_MARKER, 1);
378
OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
379
emit_marker6(ring, 7);
380
381
OUT_WFI5(ring);
382
383
OUT_REG(ring, A6XX_RB_CCU_CNTL(.color_offset = screen->ccu_offset_bypass));
384
385
OUT_REG(ring,
386
A6XX_HLSQ_INVALIDATE_CMD(.vs_state = true, .hs_state = true,
387
.ds_state = true, .gs_state = true,
388
.fs_state = true, .cs_state = true,
389
.gfx_ibo = true, .cs_ibo = true,
390
.gfx_shared_const = true,
391
.gfx_bindless = 0x1f, .cs_bindless = 0x1f));
392
393
emit_marker6(ring, 7);
394
OUT_PKT7(ring, CP_SET_MARKER, 1);
395
OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BLIT2DSCALE));
396
emit_marker6(ring, 7);
397
398
OUT_PKT4(ring, REG_A6XX_RB_2D_UNKNOWN_8C01, 1);
399
OUT_RING(ring, 0x0);
400
401
OUT_PKT4(ring, REG_A6XX_SP_PS_2D_SRC_INFO, 13);
402
OUT_RING(ring, 0x00000000);
403
OUT_RING(ring, 0x00000000);
404
OUT_RING(ring, 0x00000000);
405
OUT_RING(ring, 0x00000000);
406
OUT_RING(ring, 0x00000000);
407
OUT_RING(ring, 0x00000000);
408
OUT_RING(ring, 0x00000000);
409
OUT_RING(ring, 0x00000000);
410
OUT_RING(ring, 0x00000000);
411
OUT_RING(ring, 0x00000000);
412
OUT_RING(ring, 0x00000000);
413
OUT_RING(ring, 0x00000000);
414
OUT_RING(ring, 0x00000000);
415
416
OUT_PKT4(ring, REG_A6XX_SP_2D_DST_FORMAT, 1);
417
OUT_RING(ring, 0x0000f410);
418
419
OUT_PKT4(ring, REG_A6XX_GRAS_2D_BLIT_CNTL, 1);
420
OUT_RING(ring,
421
A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(FMT6_16_UNORM) | 0x4f00080);
422
423
OUT_PKT4(ring, REG_A6XX_RB_2D_BLIT_CNTL, 1);
424
OUT_RING(ring, A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(FMT6_16_UNORM) | 0x4f00080);
425
426
fd6_event_write(batch, ring, PC_CCU_FLUSH_COLOR_TS, true);
427
fd6_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
428
429
OUT_PKT4(ring, REG_A6XX_RB_2D_SRC_SOLID_C0, 4);
430
OUT_RING(ring, fui(depth));
431
OUT_RING(ring, 0x00000000);
432
OUT_RING(ring, 0x00000000);
433
OUT_RING(ring, 0x00000000);
434
435
OUT_PKT4(ring, REG_A6XX_RB_2D_DST_INFO, 9);
436
OUT_RING(ring, A6XX_RB_2D_DST_INFO_COLOR_FORMAT(FMT6_16_UNORM) |
437
A6XX_RB_2D_DST_INFO_TILE_MODE(TILE6_LINEAR) |
438
A6XX_RB_2D_DST_INFO_COLOR_SWAP(WZYX));
439
OUT_RELOC(ring, zsbuf->lrz, 0, 0, 0);
440
OUT_RING(ring, A6XX_RB_2D_DST_PITCH(zsbuf->lrz_pitch * 2).value);
441
OUT_RING(ring, 0x00000000);
442
OUT_RING(ring, 0x00000000);
443
OUT_RING(ring, 0x00000000);
444
OUT_RING(ring, 0x00000000);
445
OUT_RING(ring, 0x00000000);
446
447
OUT_REG(ring, A6XX_GRAS_2D_SRC_TL_X(0), A6XX_GRAS_2D_SRC_BR_X(0),
448
A6XX_GRAS_2D_SRC_TL_Y(0), A6XX_GRAS_2D_SRC_BR_Y(0));
449
450
OUT_PKT4(ring, REG_A6XX_GRAS_2D_DST_TL, 2);
451
OUT_RING(ring, A6XX_GRAS_2D_DST_TL_X(0) | A6XX_GRAS_2D_DST_TL_Y(0));
452
OUT_RING(ring, A6XX_GRAS_2D_DST_BR_X(zsbuf->lrz_width - 1) |
453
A6XX_GRAS_2D_DST_BR_Y(zsbuf->lrz_height - 1));
454
455
fd6_event_write(batch, ring, 0x3f, false);
456
457
OUT_WFI5(ring);
458
459
OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8E04, 1);
460
OUT_RING(ring, screen->info->a6xx.magic.RB_UNKNOWN_8E04_blit);
461
462
OUT_PKT7(ring, CP_BLIT, 1);
463
OUT_RING(ring, CP_BLIT_0_OP(BLIT_OP_SCALE));
464
465
OUT_WFI5(ring);
466
467
OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_8E04, 1);
468
OUT_RING(ring, 0x0); /* RB_UNKNOWN_8E04 */
469
470
fd6_event_write(batch, ring, PC_CCU_FLUSH_COLOR_TS, true);
471
fd6_event_write(batch, ring, PC_CCU_FLUSH_DEPTH_TS, true);
472
fd6_event_write(batch, ring, CACHE_FLUSH_TS, true);
473
474
fd6_cache_inv(batch, ring);
475
}
476
477
static bool
478
is_z32(enum pipe_format format)
479
{
480
switch (format) {
481
case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
482
case PIPE_FORMAT_Z32_UNORM:
483
case PIPE_FORMAT_Z32_FLOAT:
484
return true;
485
default:
486
return false;
487
}
488
}
489
490
static bool
491
fd6_clear(struct fd_context *ctx, unsigned buffers,
492
const union pipe_color_union *color, double depth,
493
unsigned stencil) assert_dt
494
{
495
struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
496
const bool has_depth = pfb->zsbuf;
497
unsigned color_buffers = buffers >> 2;
498
499
/* we need to do multisample clear on 3d pipe, so fallback to u_blitter: */
500
if (pfb->samples > 1)
501
return false;
502
503
/* If we're clearing after draws, fallback to 3D pipe clears. We could
504
* use blitter clears in the draw batch but then we'd have to patch up the
505
* gmem offsets. This doesn't seem like a useful thing to optimize for
506
* however.*/
507
if (ctx->batch->num_draws > 0)
508
return false;
509
510
u_foreach_bit (i, color_buffers)
511
ctx->batch->clear_color[i] = *color;
512
if (buffers & PIPE_CLEAR_DEPTH)
513
ctx->batch->clear_depth = depth;
514
if (buffers & PIPE_CLEAR_STENCIL)
515
ctx->batch->clear_stencil = stencil;
516
517
ctx->batch->fast_cleared |= buffers;
518
519
if (has_depth && (buffers & PIPE_CLEAR_DEPTH)) {
520
struct fd_resource *zsbuf = fd_resource(pfb->zsbuf->texture);
521
if (zsbuf->lrz && !is_z32(pfb->zsbuf->format)) {
522
zsbuf->lrz_valid = true;
523
zsbuf->lrz_direction = FD_LRZ_UNKNOWN;
524
fd6_clear_lrz(ctx->batch, zsbuf, depth);
525
}
526
}
527
528
return true;
529
}
530
531
void
532
fd6_draw_init(struct pipe_context *pctx) disable_thread_safety_analysis
533
{
534
struct fd_context *ctx = fd_context(pctx);
535
ctx->draw_vbo = fd6_draw_vbo;
536
ctx->clear = fd6_clear;
537
}
538
539