Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/panfrost/pan_context.c
4570 views
1
/*
2
* Copyright (C) 2019-2020 Collabora, Ltd.
3
* © Copyright 2018 Alyssa Rosenzweig
4
* Copyright © 2014-2017 Broadcom
5
* Copyright (C) 2017 Intel Corporation
6
*
7
* Permission is hereby granted, free of charge, to any person obtaining a
8
* copy of this software and associated documentation files (the "Software"),
9
* to deal in the Software without restriction, including without limitation
10
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
11
* and/or sell copies of the Software, and to permit persons to whom the
12
* Software is furnished to do so, subject to the following conditions:
13
*
14
* The above copyright notice and this permission notice (including the next
15
* paragraph) shall be included in all copies or substantial portions of the
16
* Software.
17
*
18
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24
* SOFTWARE.
25
*
26
*/
27
28
#include <sys/poll.h>
29
#include <errno.h>
30
31
#include "pan_bo.h"
32
#include "pan_context.h"
33
#include "pan_minmax_cache.h"
34
#include "panfrost-quirks.h"
35
36
#include "util/macros.h"
37
#include "util/format/u_format.h"
38
#include "util/u_inlines.h"
39
#include "util/u_upload_mgr.h"
40
#include "util/u_memory.h"
41
#include "util/u_vbuf.h"
42
#include "util/half_float.h"
43
#include "util/u_helpers.h"
44
#include "util/format/u_format.h"
45
#include "util/u_prim.h"
46
#include "util/u_prim_restart.h"
47
#include "indices/u_primconvert.h"
48
#include "tgsi/tgsi_parse.h"
49
#include "tgsi/tgsi_from_mesa.h"
50
#include "util/u_math.h"
51
52
#include "pan_screen.h"
53
#include "pan_util.h"
54
#include "decode.h"
55
#include "util/pan_lower_framebuffer.h"
56
57
static void
58
panfrost_clear(
59
struct pipe_context *pipe,
60
unsigned buffers,
61
const struct pipe_scissor_state *scissor_state,
62
const union pipe_color_union *color,
63
double depth, unsigned stencil)
64
{
65
struct panfrost_context *ctx = pan_context(pipe);
66
67
if (!panfrost_render_condition_check(ctx))
68
return;
69
70
/* TODO: panfrost_get_fresh_batch_for_fbo() instantiates a new batch if
71
* the existing batch targeting this FBO has draws. We could probably
72
* avoid that by replacing plain clears by quad-draws with a specific
73
* color/depth/stencil value, thus avoiding the generation of extra
74
* fragment jobs.
75
*/
76
struct panfrost_batch *batch = panfrost_get_fresh_batch_for_fbo(ctx);
77
panfrost_batch_clear(batch, buffers, color, depth, stencil);
78
}
79
80
bool
81
panfrost_writes_point_size(struct panfrost_context *ctx)
82
{
83
assert(ctx->shader[PIPE_SHADER_VERTEX]);
84
struct panfrost_shader_state *vs = panfrost_get_shader_state(ctx, PIPE_SHADER_VERTEX);
85
86
return vs->info.vs.writes_point_size && ctx->active_prim == PIPE_PRIM_POINTS;
87
}
88
89
/* The entire frame is in memory -- send it off to the kernel! */
90
91
void
92
panfrost_flush(
93
struct pipe_context *pipe,
94
struct pipe_fence_handle **fence,
95
unsigned flags)
96
{
97
struct panfrost_context *ctx = pan_context(pipe);
98
struct panfrost_device *dev = pan_device(pipe->screen);
99
100
101
/* Submit all pending jobs */
102
panfrost_flush_all_batches(ctx);
103
104
if (fence) {
105
struct pipe_fence_handle *f = panfrost_fence_create(ctx);
106
pipe->screen->fence_reference(pipe->screen, fence, NULL);
107
*fence = f;
108
}
109
110
if (dev->debug & PAN_DBG_TRACE)
111
pandecode_next_frame();
112
}
113
114
static void
115
panfrost_texture_barrier(struct pipe_context *pipe, unsigned flags)
116
{
117
struct panfrost_context *ctx = pan_context(pipe);
118
panfrost_flush_all_batches(ctx);
119
}
120
121
static void
122
panfrost_set_frontend_noop(struct pipe_context *pipe, bool enable)
123
{
124
struct panfrost_context *ctx = pan_context(pipe);
125
panfrost_flush_all_batches(ctx);
126
ctx->is_noop = enable;
127
}
128
129
130
static void
131
panfrost_generic_cso_delete(struct pipe_context *pctx, void *hwcso)
132
{
133
free(hwcso);
134
}
135
136
static void
137
panfrost_bind_blend_state(struct pipe_context *pipe, void *cso)
138
{
139
struct panfrost_context *ctx = pan_context(pipe);
140
ctx->blend = cso;
141
ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_RENDERER;
142
}
143
144
static void
145
panfrost_set_blend_color(struct pipe_context *pipe,
146
const struct pipe_blend_color *blend_color)
147
{
148
struct panfrost_context *ctx = pan_context(pipe);
149
ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_RENDERER;
150
151
if (blend_color)
152
ctx->blend_color = *blend_color;
153
}
154
155
/* Create a final blend given the context */
156
157
mali_ptr
158
panfrost_get_blend(struct panfrost_batch *batch, unsigned rti, struct panfrost_bo **bo, unsigned *shader_offset)
159
{
160
struct panfrost_context *ctx = batch->ctx;
161
struct panfrost_device *dev = pan_device(ctx->base.screen);
162
struct panfrost_blend_state *blend = ctx->blend;
163
struct pan_blend_info info = blend->info[rti];
164
struct pipe_surface *surf = batch->key.cbufs[rti];
165
enum pipe_format fmt = surf->format;
166
167
/* Use fixed-function if the equation permits, the format is blendable,
168
* and no more than one unique constant is accessed */
169
if (info.fixed_function && panfrost_blendable_formats_v7[fmt].internal &&
170
pan_blend_is_homogenous_constant(info.constant_mask,
171
ctx->blend_color.color)) {
172
return 0;
173
}
174
175
/* Otherwise, we need to grab a shader */
176
struct pan_blend_state pan_blend = blend->pan;
177
unsigned nr_samples = surf->nr_samples ? : surf->texture->nr_samples;
178
179
pan_blend.rts[rti].format = fmt;
180
pan_blend.rts[rti].nr_samples = nr_samples;
181
memcpy(pan_blend.constants, ctx->blend_color.color,
182
sizeof(pan_blend.constants));
183
184
/* Upload the shader, sharing a BO */
185
if (!(*bo)) {
186
*bo = panfrost_batch_create_bo(batch, 4096, PAN_BO_EXECUTE,
187
PIPE_SHADER_FRAGMENT, "Blend shader");
188
}
189
190
struct panfrost_shader_state *ss = panfrost_get_shader_state(ctx, PIPE_SHADER_FRAGMENT);
191
192
/* Default for Midgard */
193
nir_alu_type col0_type = nir_type_float32;
194
nir_alu_type col1_type = nir_type_float32;
195
196
/* Bifrost has per-output types, respect them */
197
if (pan_is_bifrost(dev)) {
198
col0_type = ss->info.bifrost.blend[rti].type;
199
col1_type = ss->info.bifrost.blend_src1_type;
200
}
201
202
pthread_mutex_lock(&dev->blend_shaders.lock);
203
struct pan_blend_shader_variant *shader =
204
pan_blend_get_shader_locked(dev, &pan_blend,
205
col0_type, col1_type, rti);
206
207
/* Size check and upload */
208
unsigned offset = *shader_offset;
209
assert((offset + shader->binary.size) < 4096);
210
memcpy((*bo)->ptr.cpu + offset, shader->binary.data, shader->binary.size);
211
*shader_offset += shader->binary.size;
212
pthread_mutex_unlock(&dev->blend_shaders.lock);
213
214
return ((*bo)->ptr.gpu + offset) | shader->first_tag;
215
}
216
217
static void
218
panfrost_bind_rasterizer_state(
219
struct pipe_context *pctx,
220
void *hwcso)
221
{
222
struct panfrost_context *ctx = pan_context(pctx);
223
ctx->rasterizer = hwcso;
224
225
/* We can assume the renderer state descriptor is always dirty, the
226
* dependencies are too intricate to bother tracking in detail. However
227
* we could probably diff the renderers for viewport dirty tracking,
228
* that just cares about the scissor enable and the depth clips. */
229
ctx->dirty |= PAN_DIRTY_SCISSOR;
230
ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_RENDERER;
231
}
232
233
static void
234
panfrost_set_shader_images(
235
struct pipe_context *pctx,
236
enum pipe_shader_type shader,
237
unsigned start_slot, unsigned count, unsigned unbind_num_trailing_slots,
238
const struct pipe_image_view *iviews)
239
{
240
struct panfrost_context *ctx = pan_context(pctx);
241
ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_IMAGE;
242
243
/* Unbind start_slot...start_slot+count */
244
if (!iviews) {
245
for (int i = start_slot; i < start_slot + count + unbind_num_trailing_slots; i++) {
246
pipe_resource_reference(&ctx->images[shader][i].resource, NULL);
247
}
248
249
ctx->image_mask[shader] &= ~(((1ull << count) - 1) << start_slot);
250
return;
251
}
252
253
/* Bind start_slot...start_slot+count */
254
for (int i = 0; i < count; i++) {
255
const struct pipe_image_view *image = &iviews[i];
256
SET_BIT(ctx->image_mask[shader], 1 << (start_slot + i), image->resource);
257
258
if (!image->resource) {
259
util_copy_image_view(&ctx->images[shader][start_slot+i], NULL);
260
continue;
261
}
262
263
struct panfrost_resource *rsrc = pan_resource(image->resource);
264
265
/* Images don't work with AFBC, since they require pixel-level granularity */
266
if (drm_is_afbc(rsrc->image.layout.modifier)) {
267
pan_resource_modifier_convert(ctx, rsrc,
268
DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED);
269
}
270
271
util_copy_image_view(&ctx->images[shader][start_slot+i], image);
272
}
273
274
/* Unbind start_slot+count...start_slot+count+unbind_num_trailing_slots */
275
for (int i = 0; i < unbind_num_trailing_slots; i++) {
276
SET_BIT(ctx->image_mask[shader], 1 << (start_slot + count + i), NULL);
277
util_copy_image_view(&ctx->images[shader][start_slot+count+i], NULL);
278
}
279
}
280
281
static void
282
panfrost_bind_vertex_elements_state(
283
struct pipe_context *pctx,
284
void *hwcso)
285
{
286
struct panfrost_context *ctx = pan_context(pctx);
287
ctx->vertex = hwcso;
288
}
289
290
static void *
291
panfrost_create_shader_state(
292
struct pipe_context *pctx,
293
const struct pipe_shader_state *cso,
294
enum pipe_shader_type stage)
295
{
296
struct panfrost_shader_variants *so = CALLOC_STRUCT(panfrost_shader_variants);
297
struct panfrost_device *dev = pan_device(pctx->screen);
298
so->base = *cso;
299
300
/* Token deep copy to prevent memory corruption */
301
302
if (cso->type == PIPE_SHADER_IR_TGSI)
303
so->base.tokens = tgsi_dup_tokens(so->base.tokens);
304
305
/* Precompile for shader-db if we need to */
306
if (unlikely((dev->debug & PAN_DBG_PRECOMPILE) && cso->type == PIPE_SHADER_IR_NIR)) {
307
struct panfrost_context *ctx = pan_context(pctx);
308
309
struct panfrost_shader_state state = { 0 };
310
311
panfrost_shader_compile(pctx->screen,
312
&ctx->shaders, &ctx->descs,
313
PIPE_SHADER_IR_NIR,
314
so->base.ir.nir,
315
tgsi_processor_to_shader_stage(stage),
316
&state);
317
}
318
319
return so;
320
}
321
322
static void
323
panfrost_delete_shader_state(
324
struct pipe_context *pctx,
325
void *so)
326
{
327
struct panfrost_shader_variants *cso = (struct panfrost_shader_variants *) so;
328
329
if (cso->base.type == PIPE_SHADER_IR_TGSI) {
330
/* TODO: leaks TGSI tokens! */
331
}
332
333
for (unsigned i = 0; i < cso->variant_count; ++i) {
334
struct panfrost_shader_state *shader_state = &cso->variants[i];
335
panfrost_bo_unreference(shader_state->bin.bo);
336
panfrost_bo_unreference(shader_state->state.bo);
337
panfrost_bo_unreference(shader_state->linkage.bo);
338
}
339
340
free(cso->variants);
341
free(so);
342
}
343
344
static void
345
panfrost_bind_sampler_states(
346
struct pipe_context *pctx,
347
enum pipe_shader_type shader,
348
unsigned start_slot, unsigned num_sampler,
349
void **sampler)
350
{
351
assert(start_slot == 0);
352
353
struct panfrost_context *ctx = pan_context(pctx);
354
ctx->dirty_shader[shader] |= PAN_DIRTY_STAGE_SAMPLER;
355
356
ctx->sampler_count[shader] = sampler ? num_sampler : 0;
357
if (sampler)
358
memcpy(ctx->samplers[shader], sampler, num_sampler * sizeof (void *));
359
}
360
361
static bool
362
panfrost_variant_matches(
363
struct panfrost_context *ctx,
364
struct panfrost_shader_state *variant,
365
enum pipe_shader_type type)
366
{
367
struct panfrost_device *dev = pan_device(ctx->base.screen);
368
369
if (variant->info.stage == MESA_SHADER_FRAGMENT &&
370
variant->info.fs.outputs_read) {
371
struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
372
373
unsigned i;
374
BITSET_FOREACH_SET(i, &variant->info.fs.outputs_read, 8) {
375
enum pipe_format fmt = PIPE_FORMAT_R8G8B8A8_UNORM;
376
377
if ((fb->nr_cbufs > i) && fb->cbufs[i])
378
fmt = fb->cbufs[i]->format;
379
380
const struct util_format_description *desc =
381
util_format_description(fmt);
382
383
if (pan_format_class_load(desc, dev->quirks) == PAN_FORMAT_NATIVE)
384
fmt = PIPE_FORMAT_NONE;
385
386
if (variant->rt_formats[i] != fmt)
387
return false;
388
}
389
}
390
391
if (variant->info.stage == MESA_SHADER_FRAGMENT &&
392
variant->nr_cbufs != ctx->pipe_framebuffer.nr_cbufs)
393
return false;
394
395
/* Otherwise, we're good to go */
396
return true;
397
}
398
399
/**
400
* Fix an uncompiled shader's stream output info, and produce a bitmask
401
* of which VARYING_SLOT_* are captured for stream output.
402
*
403
* Core Gallium stores output->register_index as a "slot" number, where
404
* slots are assigned consecutively to all outputs in info->outputs_written.
405
* This naive packing of outputs doesn't work for us - we too have slots,
406
* but the layout is defined by the VUE map, which we won't have until we
407
* compile a specific shader variant. So, we remap these and simply store
408
* VARYING_SLOT_* in our copy's output->register_index fields.
409
*
410
* We then produce a bitmask of outputs which are used for SO.
411
*
412
* Implementation from iris.
413
*/
414
415
static uint64_t
416
update_so_info(struct pipe_stream_output_info *so_info,
417
uint64_t outputs_written)
418
{
419
uint64_t so_outputs = 0;
420
uint8_t reverse_map[64] = {0};
421
unsigned slot = 0;
422
423
while (outputs_written)
424
reverse_map[slot++] = u_bit_scan64(&outputs_written);
425
426
for (unsigned i = 0; i < so_info->num_outputs; i++) {
427
struct pipe_stream_output *output = &so_info->output[i];
428
429
/* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
430
output->register_index = reverse_map[output->register_index];
431
432
so_outputs |= 1ull << output->register_index;
433
}
434
435
return so_outputs;
436
}
437
438
static void
439
panfrost_bind_shader_state(
440
struct pipe_context *pctx,
441
void *hwcso,
442
enum pipe_shader_type type)
443
{
444
struct panfrost_context *ctx = pan_context(pctx);
445
struct panfrost_device *dev = pan_device(ctx->base.screen);
446
ctx->shader[type] = hwcso;
447
448
ctx->dirty |= PAN_DIRTY_TLS_SIZE;
449
ctx->dirty_shader[type] |= PAN_DIRTY_STAGE_RENDERER;
450
451
if (!hwcso) return;
452
453
/* Match the appropriate variant */
454
455
signed variant = -1;
456
struct panfrost_shader_variants *variants = (struct panfrost_shader_variants *) hwcso;
457
458
for (unsigned i = 0; i < variants->variant_count; ++i) {
459
if (panfrost_variant_matches(ctx, &variants->variants[i], type)) {
460
variant = i;
461
break;
462
}
463
}
464
465
if (variant == -1) {
466
/* No variant matched, so create a new one */
467
variant = variants->variant_count++;
468
469
if (variants->variant_count > variants->variant_space) {
470
unsigned old_space = variants->variant_space;
471
472
variants->variant_space *= 2;
473
if (variants->variant_space == 0)
474
variants->variant_space = 1;
475
476
/* Arbitrary limit to stop runaway programs from
477
* creating an unbounded number of shader variants. */
478
assert(variants->variant_space < 1024);
479
480
unsigned msize = sizeof(struct panfrost_shader_state);
481
variants->variants = realloc(variants->variants,
482
variants->variant_space * msize);
483
484
memset(&variants->variants[old_space], 0,
485
(variants->variant_space - old_space) * msize);
486
}
487
488
struct panfrost_shader_state *v =
489
&variants->variants[variant];
490
491
if (type == PIPE_SHADER_FRAGMENT) {
492
struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
493
v->nr_cbufs = fb->nr_cbufs;
494
495
for (unsigned i = 0; i < fb->nr_cbufs; ++i) {
496
enum pipe_format fmt = PIPE_FORMAT_R8G8B8A8_UNORM;
497
498
if ((fb->nr_cbufs > i) && fb->cbufs[i])
499
fmt = fb->cbufs[i]->format;
500
501
const struct util_format_description *desc =
502
util_format_description(fmt);
503
504
if (pan_format_class_load(desc, dev->quirks) == PAN_FORMAT_NATIVE)
505
fmt = PIPE_FORMAT_NONE;
506
507
v->rt_formats[i] = fmt;
508
}
509
}
510
}
511
512
/* Select this variant */
513
variants->active_variant = variant;
514
515
struct panfrost_shader_state *shader_state = &variants->variants[variant];
516
assert(panfrost_variant_matches(ctx, shader_state, type));
517
518
/* We finally have a variant, so compile it */
519
520
if (!shader_state->compiled) {
521
panfrost_shader_compile(ctx->base.screen,
522
&ctx->shaders, &ctx->descs,
523
variants->base.type,
524
variants->base.type == PIPE_SHADER_IR_NIR ?
525
variants->base.ir.nir :
526
variants->base.tokens,
527
tgsi_processor_to_shader_stage(type),
528
shader_state);
529
530
shader_state->compiled = true;
531
532
/* Fixup the stream out information */
533
shader_state->stream_output = variants->base.stream_output;
534
shader_state->so_mask =
535
update_so_info(&shader_state->stream_output,
536
shader_state->info.outputs_written);
537
}
538
}
539
540
static void *
541
panfrost_create_vs_state(struct pipe_context *pctx, const struct pipe_shader_state *hwcso)
542
{
543
return panfrost_create_shader_state(pctx, hwcso, PIPE_SHADER_VERTEX);
544
}
545
546
static void *
547
panfrost_create_fs_state(struct pipe_context *pctx, const struct pipe_shader_state *hwcso)
548
{
549
return panfrost_create_shader_state(pctx, hwcso, PIPE_SHADER_FRAGMENT);
550
}
551
552
static void
553
panfrost_bind_vs_state(struct pipe_context *pctx, void *hwcso)
554
{
555
panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_VERTEX);
556
}
557
558
static void
559
panfrost_bind_fs_state(struct pipe_context *pctx, void *hwcso)
560
{
561
panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_FRAGMENT);
562
}
563
564
static void
565
panfrost_set_vertex_buffers(
566
struct pipe_context *pctx,
567
unsigned start_slot,
568
unsigned num_buffers,
569
unsigned unbind_num_trailing_slots,
570
bool take_ownership,
571
const struct pipe_vertex_buffer *buffers)
572
{
573
struct panfrost_context *ctx = pan_context(pctx);
574
575
util_set_vertex_buffers_mask(ctx->vertex_buffers, &ctx->vb_mask, buffers,
576
start_slot, num_buffers, unbind_num_trailing_slots,
577
take_ownership);
578
}
579
580
static void
581
panfrost_set_constant_buffer(
582
struct pipe_context *pctx,
583
enum pipe_shader_type shader, uint index, bool take_ownership,
584
const struct pipe_constant_buffer *buf)
585
{
586
struct panfrost_context *ctx = pan_context(pctx);
587
struct panfrost_constant_buffer *pbuf = &ctx->constant_buffer[shader];
588
589
util_copy_constant_buffer(&pbuf->cb[index], buf, take_ownership);
590
591
unsigned mask = (1 << index);
592
593
if (unlikely(!buf)) {
594
pbuf->enabled_mask &= ~mask;
595
return;
596
}
597
598
pbuf->enabled_mask |= mask;
599
ctx->dirty_shader[shader] |= PAN_DIRTY_STAGE_CONST;
600
}
601
602
static void
603
panfrost_set_stencil_ref(
604
struct pipe_context *pctx,
605
const struct pipe_stencil_ref ref)
606
{
607
struct panfrost_context *ctx = pan_context(pctx);
608
ctx->stencil_ref = ref;
609
ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_RENDERER;
610
}
611
612
static void
613
panfrost_set_sampler_views(
614
struct pipe_context *pctx,
615
enum pipe_shader_type shader,
616
unsigned start_slot, unsigned num_views,
617
unsigned unbind_num_trailing_slots,
618
struct pipe_sampler_view **views)
619
{
620
struct panfrost_context *ctx = pan_context(pctx);
621
ctx->dirty_shader[shader] |= PAN_DIRTY_STAGE_TEXTURE;
622
623
unsigned new_nr = 0;
624
unsigned i;
625
626
assert(start_slot == 0);
627
628
if (!views)
629
num_views = 0;
630
631
for (i = 0; i < num_views; ++i) {
632
if (views[i])
633
new_nr = i + 1;
634
pipe_sampler_view_reference((struct pipe_sampler_view **)&ctx->sampler_views[shader][i],
635
views[i]);
636
}
637
638
for (; i < ctx->sampler_view_count[shader]; i++) {
639
pipe_sampler_view_reference((struct pipe_sampler_view **)&ctx->sampler_views[shader][i],
640
NULL);
641
}
642
ctx->sampler_view_count[shader] = new_nr;
643
}
644
645
static void
646
panfrost_sampler_view_destroy(
647
struct pipe_context *pctx,
648
struct pipe_sampler_view *pview)
649
{
650
struct panfrost_sampler_view *view = (struct panfrost_sampler_view *) pview;
651
652
pipe_resource_reference(&pview->texture, NULL);
653
panfrost_bo_unreference(view->state.bo);
654
ralloc_free(view);
655
}
656
657
static void
658
panfrost_set_shader_buffers(
659
struct pipe_context *pctx,
660
enum pipe_shader_type shader,
661
unsigned start, unsigned count,
662
const struct pipe_shader_buffer *buffers,
663
unsigned writable_bitmask)
664
{
665
struct panfrost_context *ctx = pan_context(pctx);
666
667
util_set_shader_buffers_mask(ctx->ssbo[shader], &ctx->ssbo_mask[shader],
668
buffers, start, count);
669
}
670
671
static void
672
panfrost_set_framebuffer_state(struct pipe_context *pctx,
673
const struct pipe_framebuffer_state *fb)
674
{
675
struct panfrost_context *ctx = pan_context(pctx);
676
677
util_copy_framebuffer_state(&ctx->pipe_framebuffer, fb);
678
ctx->batch = NULL;
679
680
/* Hot draw call path needs the mask of active render targets */
681
ctx->fb_rt_mask = 0;
682
683
for (unsigned i = 0; i < ctx->pipe_framebuffer.nr_cbufs; ++i) {
684
if (ctx->pipe_framebuffer.cbufs[i])
685
ctx->fb_rt_mask |= BITFIELD_BIT(i);
686
}
687
688
/* We may need to generate a new variant if the fragment shader is
689
* keyed to the framebuffer format (due to EXT_framebuffer_fetch) */
690
struct panfrost_shader_variants *fs = ctx->shader[PIPE_SHADER_FRAGMENT];
691
692
if (fs && fs->variant_count &&
693
fs->variants[fs->active_variant].info.fs.outputs_read)
694
ctx->base.bind_fs_state(&ctx->base, fs);
695
}
696
697
static void
698
panfrost_bind_depth_stencil_state(struct pipe_context *pipe,
699
void *cso)
700
{
701
struct panfrost_context *ctx = pan_context(pipe);
702
ctx->depth_stencil = cso;
703
ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_RENDERER;
704
}
705
706
static void
707
panfrost_set_sample_mask(struct pipe_context *pipe,
708
unsigned sample_mask)
709
{
710
struct panfrost_context *ctx = pan_context(pipe);
711
ctx->sample_mask = sample_mask;
712
ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_RENDERER;
713
}
714
715
static void
716
panfrost_set_min_samples(struct pipe_context *pipe,
717
unsigned min_samples)
718
{
719
struct panfrost_context *ctx = pan_context(pipe);
720
ctx->min_samples = min_samples;
721
ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_RENDERER;
722
}
723
724
static void
725
panfrost_set_clip_state(struct pipe_context *pipe,
726
const struct pipe_clip_state *clip)
727
{
728
//struct panfrost_context *panfrost = pan_context(pipe);
729
}
730
731
static void
732
panfrost_set_viewport_states(struct pipe_context *pipe,
733
unsigned start_slot,
734
unsigned num_viewports,
735
const struct pipe_viewport_state *viewports)
736
{
737
struct panfrost_context *ctx = pan_context(pipe);
738
739
assert(start_slot == 0);
740
assert(num_viewports == 1);
741
742
ctx->pipe_viewport = *viewports;
743
ctx->dirty |= PAN_DIRTY_VIEWPORT;
744
}
745
746
static void
747
panfrost_set_scissor_states(struct pipe_context *pipe,
748
unsigned start_slot,
749
unsigned num_scissors,
750
const struct pipe_scissor_state *scissors)
751
{
752
struct panfrost_context *ctx = pan_context(pipe);
753
754
assert(start_slot == 0);
755
assert(num_scissors == 1);
756
757
ctx->scissor = *scissors;
758
ctx->dirty |= PAN_DIRTY_SCISSOR;
759
}
760
761
static void
762
panfrost_set_polygon_stipple(struct pipe_context *pipe,
763
const struct pipe_poly_stipple *stipple)
764
{
765
//struct panfrost_context *panfrost = pan_context(pipe);
766
}
767
768
static void
769
panfrost_set_active_query_state(struct pipe_context *pipe,
770
bool enable)
771
{
772
struct panfrost_context *ctx = pan_context(pipe);
773
ctx->active_queries = enable;
774
ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_RENDERER;
775
}
776
777
static void
778
panfrost_render_condition(struct pipe_context *pipe,
779
struct pipe_query *query,
780
bool condition,
781
enum pipe_render_cond_flag mode)
782
{
783
struct panfrost_context *ctx = pan_context(pipe);
784
785
ctx->cond_query = (struct panfrost_query *)query;
786
ctx->cond_cond = condition;
787
ctx->cond_mode = mode;
788
}
789
790
static void
791
panfrost_destroy(struct pipe_context *pipe)
792
{
793
struct panfrost_context *panfrost = pan_context(pipe);
794
795
if (panfrost->blitter)
796
util_blitter_destroy(panfrost->blitter);
797
798
util_unreference_framebuffer_state(&panfrost->pipe_framebuffer);
799
u_upload_destroy(pipe->stream_uploader);
800
801
panfrost_pool_cleanup(&panfrost->descs);
802
panfrost_pool_cleanup(&panfrost->shaders);
803
804
ralloc_free(pipe);
805
}
806
807
static struct pipe_query *
808
panfrost_create_query(struct pipe_context *pipe,
809
unsigned type,
810
unsigned index)
811
{
812
struct panfrost_query *q = rzalloc(pipe, struct panfrost_query);
813
814
q->type = type;
815
q->index = index;
816
817
return (struct pipe_query *) q;
818
}
819
820
static void
821
panfrost_destroy_query(struct pipe_context *pipe, struct pipe_query *q)
822
{
823
struct panfrost_query *query = (struct panfrost_query *) q;
824
825
if (query->rsrc)
826
pipe_resource_reference(&query->rsrc, NULL);
827
828
ralloc_free(q);
829
}
830
831
static bool
832
panfrost_begin_query(struct pipe_context *pipe, struct pipe_query *q)
833
{
834
struct panfrost_context *ctx = pan_context(pipe);
835
struct panfrost_device *dev = pan_device(ctx->base.screen);
836
struct panfrost_query *query = (struct panfrost_query *) q;
837
838
switch (query->type) {
839
case PIPE_QUERY_OCCLUSION_COUNTER:
840
case PIPE_QUERY_OCCLUSION_PREDICATE:
841
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: {
842
unsigned size = sizeof(uint64_t) * dev->core_count;
843
844
/* Allocate a resource for the query results to be stored */
845
if (!query->rsrc) {
846
query->rsrc = pipe_buffer_create(ctx->base.screen,
847
PIPE_BIND_QUERY_BUFFER, 0, size);
848
}
849
850
/* Default to 0 if nothing at all drawn. */
851
uint8_t *zeroes = alloca(size);
852
memset(zeroes, 0, size);
853
pipe_buffer_write(pipe, query->rsrc, 0, size, zeroes);
854
855
query->msaa = (ctx->pipe_framebuffer.samples > 1);
856
ctx->occlusion_query = query;
857
ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_RENDERER;
858
break;
859
}
860
861
/* Geometry statistics are computed in the driver. XXX: geom/tess
862
* shaders.. */
863
864
case PIPE_QUERY_PRIMITIVES_GENERATED:
865
query->start = ctx->prims_generated;
866
break;
867
case PIPE_QUERY_PRIMITIVES_EMITTED:
868
query->start = ctx->tf_prims_generated;
869
break;
870
871
default:
872
/* TODO: timestamp queries, etc? */
873
break;
874
}
875
876
return true;
877
}
878
879
static bool
880
panfrost_end_query(struct pipe_context *pipe, struct pipe_query *q)
881
{
882
struct panfrost_context *ctx = pan_context(pipe);
883
struct panfrost_query *query = (struct panfrost_query *) q;
884
885
switch (query->type) {
886
case PIPE_QUERY_OCCLUSION_COUNTER:
887
case PIPE_QUERY_OCCLUSION_PREDICATE:
888
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
889
ctx->occlusion_query = NULL;
890
ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= PAN_DIRTY_STAGE_RENDERER;
891
break;
892
case PIPE_QUERY_PRIMITIVES_GENERATED:
893
query->end = ctx->prims_generated;
894
break;
895
case PIPE_QUERY_PRIMITIVES_EMITTED:
896
query->end = ctx->tf_prims_generated;
897
break;
898
}
899
900
return true;
901
}
902
903
static bool
904
panfrost_get_query_result(struct pipe_context *pipe,
905
struct pipe_query *q,
906
bool wait,
907
union pipe_query_result *vresult)
908
{
909
struct panfrost_query *query = (struct panfrost_query *) q;
910
struct panfrost_context *ctx = pan_context(pipe);
911
struct panfrost_device *dev = pan_device(ctx->base.screen);
912
struct panfrost_resource *rsrc = pan_resource(query->rsrc);
913
914
switch (query->type) {
915
case PIPE_QUERY_OCCLUSION_COUNTER:
916
case PIPE_QUERY_OCCLUSION_PREDICATE:
917
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
918
panfrost_flush_writer(ctx, rsrc);
919
panfrost_bo_wait(rsrc->image.data.bo, INT64_MAX, false);
920
921
/* Read back the query results */
922
uint64_t *result = (uint64_t *) rsrc->image.data.bo->ptr.cpu;
923
924
if (query->type == PIPE_QUERY_OCCLUSION_COUNTER) {
925
uint64_t passed = 0;
926
for (int i = 0; i < dev->core_count; ++i)
927
passed += result[i];
928
929
if (!pan_is_bifrost(dev) && !query->msaa)
930
passed /= 4;
931
932
vresult->u64 = passed;
933
} else {
934
vresult->b = !!result[0];
935
}
936
937
break;
938
939
case PIPE_QUERY_PRIMITIVES_GENERATED:
940
case PIPE_QUERY_PRIMITIVES_EMITTED:
941
panfrost_flush_all_batches(ctx);
942
vresult->u64 = query->end - query->start;
943
break;
944
945
default:
946
/* TODO: more queries */
947
break;
948
}
949
950
return true;
951
}
952
953
bool
954
panfrost_render_condition_check(struct panfrost_context *ctx)
955
{
956
if (!ctx->cond_query)
957
return true;
958
959
union pipe_query_result res = { 0 };
960
bool wait =
961
ctx->cond_mode != PIPE_RENDER_COND_NO_WAIT &&
962
ctx->cond_mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
963
964
struct pipe_query *pq = (struct pipe_query *)ctx->cond_query;
965
966
if (panfrost_get_query_result(&ctx->base, pq, wait, &res))
967
return res.u64 != ctx->cond_cond;
968
969
return true;
970
}
971
972
static struct pipe_stream_output_target *
973
panfrost_create_stream_output_target(struct pipe_context *pctx,
974
struct pipe_resource *prsc,
975
unsigned buffer_offset,
976
unsigned buffer_size)
977
{
978
struct pipe_stream_output_target *target;
979
980
target = &rzalloc(pctx, struct panfrost_streamout_target)->base;
981
982
if (!target)
983
return NULL;
984
985
pipe_reference_init(&target->reference, 1);
986
pipe_resource_reference(&target->buffer, prsc);
987
988
target->context = pctx;
989
target->buffer_offset = buffer_offset;
990
target->buffer_size = buffer_size;
991
992
return target;
993
}
994
995
static void
996
panfrost_stream_output_target_destroy(struct pipe_context *pctx,
997
struct pipe_stream_output_target *target)
998
{
999
pipe_resource_reference(&target->buffer, NULL);
1000
ralloc_free(target);
1001
}
1002
1003
static void
1004
panfrost_set_stream_output_targets(struct pipe_context *pctx,
1005
unsigned num_targets,
1006
struct pipe_stream_output_target **targets,
1007
const unsigned *offsets)
1008
{
1009
struct panfrost_context *ctx = pan_context(pctx);
1010
struct panfrost_streamout *so = &ctx->streamout;
1011
1012
assert(num_targets <= ARRAY_SIZE(so->targets));
1013
1014
for (unsigned i = 0; i < num_targets; i++) {
1015
if (offsets[i] != -1)
1016
pan_so_target(targets[i])->offset = offsets[i];
1017
1018
pipe_so_target_reference(&so->targets[i], targets[i]);
1019
}
1020
1021
for (unsigned i = 0; i < so->num_targets; i++)
1022
pipe_so_target_reference(&so->targets[i], NULL);
1023
1024
so->num_targets = num_targets;
1025
}
1026
1027
struct pipe_context *
1028
panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags)
1029
{
1030
struct panfrost_context *ctx = rzalloc(screen, struct panfrost_context);
1031
struct pipe_context *gallium = (struct pipe_context *) ctx;
1032
struct panfrost_device *dev = pan_device(screen);
1033
1034
gallium->screen = screen;
1035
1036
gallium->destroy = panfrost_destroy;
1037
1038
gallium->set_framebuffer_state = panfrost_set_framebuffer_state;
1039
1040
gallium->flush = panfrost_flush;
1041
gallium->clear = panfrost_clear;
1042
gallium->texture_barrier = panfrost_texture_barrier;
1043
gallium->set_frontend_noop = panfrost_set_frontend_noop;
1044
1045
gallium->set_vertex_buffers = panfrost_set_vertex_buffers;
1046
gallium->set_constant_buffer = panfrost_set_constant_buffer;
1047
gallium->set_shader_buffers = panfrost_set_shader_buffers;
1048
gallium->set_shader_images = panfrost_set_shader_images;
1049
1050
gallium->set_stencil_ref = panfrost_set_stencil_ref;
1051
1052
gallium->set_sampler_views = panfrost_set_sampler_views;
1053
gallium->sampler_view_destroy = panfrost_sampler_view_destroy;
1054
1055
gallium->bind_rasterizer_state = panfrost_bind_rasterizer_state;
1056
gallium->delete_rasterizer_state = panfrost_generic_cso_delete;
1057
1058
gallium->bind_vertex_elements_state = panfrost_bind_vertex_elements_state;
1059
gallium->delete_vertex_elements_state = panfrost_generic_cso_delete;
1060
1061
gallium->create_fs_state = panfrost_create_fs_state;
1062
gallium->delete_fs_state = panfrost_delete_shader_state;
1063
gallium->bind_fs_state = panfrost_bind_fs_state;
1064
1065
gallium->create_vs_state = panfrost_create_vs_state;
1066
gallium->delete_vs_state = panfrost_delete_shader_state;
1067
gallium->bind_vs_state = panfrost_bind_vs_state;
1068
1069
gallium->delete_sampler_state = panfrost_generic_cso_delete;
1070
gallium->bind_sampler_states = panfrost_bind_sampler_states;
1071
1072
gallium->bind_depth_stencil_alpha_state = panfrost_bind_depth_stencil_state;
1073
gallium->delete_depth_stencil_alpha_state = panfrost_generic_cso_delete;
1074
1075
gallium->set_sample_mask = panfrost_set_sample_mask;
1076
gallium->set_min_samples = panfrost_set_min_samples;
1077
1078
gallium->set_clip_state = panfrost_set_clip_state;
1079
gallium->set_viewport_states = panfrost_set_viewport_states;
1080
gallium->set_scissor_states = panfrost_set_scissor_states;
1081
gallium->set_polygon_stipple = panfrost_set_polygon_stipple;
1082
gallium->set_active_query_state = panfrost_set_active_query_state;
1083
gallium->render_condition = panfrost_render_condition;
1084
1085
gallium->create_query = panfrost_create_query;
1086
gallium->destroy_query = panfrost_destroy_query;
1087
gallium->begin_query = panfrost_begin_query;
1088
gallium->end_query = panfrost_end_query;
1089
gallium->get_query_result = panfrost_get_query_result;
1090
1091
gallium->create_stream_output_target = panfrost_create_stream_output_target;
1092
gallium->stream_output_target_destroy = panfrost_stream_output_target_destroy;
1093
gallium->set_stream_output_targets = panfrost_set_stream_output_targets;
1094
1095
gallium->bind_blend_state = panfrost_bind_blend_state;
1096
gallium->delete_blend_state = panfrost_generic_cso_delete;
1097
1098
gallium->set_blend_color = panfrost_set_blend_color;
1099
1100
panfrost_cmdstream_context_init(gallium);
1101
panfrost_resource_context_init(gallium);
1102
panfrost_compute_context_init(gallium);
1103
1104
gallium->stream_uploader = u_upload_create_default(gallium);
1105
gallium->const_uploader = gallium->stream_uploader;
1106
1107
panfrost_pool_init(&ctx->descs, ctx, dev,
1108
0, 4096, "Descriptors", true, false);
1109
1110
panfrost_pool_init(&ctx->shaders, ctx, dev,
1111
PAN_BO_EXECUTE, 4096, "Shaders", true, false);
1112
1113
/* All of our GPUs support ES mode. Midgard supports additionally
1114
* QUADS/QUAD_STRIPS/POLYGON. Bifrost supports just QUADS. */
1115
1116
ctx->draw_modes = (1 << (PIPE_PRIM_QUADS + 1)) - 1;
1117
1118
if (!pan_is_bifrost(dev)) {
1119
ctx->draw_modes |= (1 << PIPE_PRIM_QUAD_STRIP);
1120
ctx->draw_modes |= (1 << PIPE_PRIM_POLYGON);
1121
}
1122
1123
ctx->primconvert = util_primconvert_create(gallium, ctx->draw_modes);
1124
1125
ctx->blitter = util_blitter_create(gallium);
1126
1127
assert(ctx->blitter);
1128
1129
/* Prepare for render! */
1130
1131
/* By default mask everything on */
1132
ctx->sample_mask = ~0;
1133
ctx->active_queries = true;
1134
1135
int ASSERTED ret;
1136
1137
/* Create a syncobj in a signaled state. Will be updated to point to the
1138
* last queued job out_sync every time we submit a new job.
1139
*/
1140
ret = drmSyncobjCreate(dev->fd, DRM_SYNCOBJ_CREATE_SIGNALED, &ctx->syncobj);
1141
assert(!ret && ctx->syncobj);
1142
1143
return gallium;
1144
}
1145
1146