Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/virgl/virgl_context.c
4570 views
1
/*
2
* Copyright 2014, 2015 Red Hat.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* on the rights to use, copy, modify, merge, publish, distribute, sub
8
* license, and/or sell copies of the Software, and to permit persons to whom
9
* the Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21
* USE OR OTHER DEALINGS IN THE SOFTWARE.
22
*/
23
24
#include <libsync.h>
25
#include "pipe/p_shader_tokens.h"
26
27
#include "pipe/p_context.h"
28
#include "pipe/p_defines.h"
29
#include "pipe/p_screen.h"
30
#include "pipe/p_state.h"
31
#include "util/u_draw.h"
32
#include "util/u_inlines.h"
33
#include "util/u_memory.h"
34
#include "util/format/u_format.h"
35
#include "util/u_prim.h"
36
#include "util/u_transfer.h"
37
#include "util/u_helpers.h"
38
#include "util/slab.h"
39
#include "util/u_upload_mgr.h"
40
#include "util/u_blitter.h"
41
#include "tgsi/tgsi_text.h"
42
#include "indices/u_primconvert.h"
43
44
#include "pipebuffer/pb_buffer.h"
45
46
#include "virgl_encode.h"
47
#include "virgl_context.h"
48
#include "virtio-gpu/virgl_protocol.h"
49
#include "virgl_resource.h"
50
#include "virgl_screen.h"
51
#include "virgl_staging_mgr.h"
52
53
struct virgl_vertex_elements_state {
54
uint32_t handle;
55
uint8_t binding_map[PIPE_MAX_ATTRIBS];
56
uint8_t num_bindings;
57
};
58
59
static uint32_t next_handle;
60
uint32_t virgl_object_assign_handle(void)
61
{
62
return p_atomic_inc_return(&next_handle);
63
}
64
65
bool
66
virgl_can_rebind_resource(struct virgl_context *vctx,
67
struct pipe_resource *res)
68
{
69
/* We cannot rebind resources that are referenced by host objects, which
70
* are
71
*
72
* - VIRGL_OBJECT_SURFACE
73
* - VIRGL_OBJECT_SAMPLER_VIEW
74
* - VIRGL_OBJECT_STREAMOUT_TARGET
75
*
76
* Because surfaces cannot be created from buffers, we require the resource
77
* to be a buffer instead (and avoid tracking VIRGL_OBJECT_SURFACE binds).
78
*/
79
const unsigned unsupported_bind = (PIPE_BIND_SAMPLER_VIEW |
80
PIPE_BIND_STREAM_OUTPUT);
81
const unsigned bind_history = virgl_resource(res)->bind_history;
82
return res->target == PIPE_BUFFER && !(bind_history & unsupported_bind);
83
}
84
85
void
86
virgl_rebind_resource(struct virgl_context *vctx,
87
struct pipe_resource *res)
88
{
89
/* Queries use internally created buffers and do not go through transfers.
90
* Index buffers are not bindable. They are not tracked.
91
*/
92
ASSERTED const unsigned tracked_bind = (PIPE_BIND_VERTEX_BUFFER |
93
PIPE_BIND_CONSTANT_BUFFER |
94
PIPE_BIND_SHADER_BUFFER |
95
PIPE_BIND_SHADER_IMAGE);
96
const unsigned bind_history = virgl_resource(res)->bind_history;
97
unsigned i;
98
99
assert(virgl_can_rebind_resource(vctx, res) &&
100
(bind_history & tracked_bind) == bind_history);
101
102
if (bind_history & PIPE_BIND_VERTEX_BUFFER) {
103
for (i = 0; i < vctx->num_vertex_buffers; i++) {
104
if (vctx->vertex_buffer[i].buffer.resource == res) {
105
vctx->vertex_array_dirty = true;
106
break;
107
}
108
}
109
}
110
111
if (bind_history & PIPE_BIND_SHADER_BUFFER) {
112
uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
113
while (remaining_mask) {
114
int i = u_bit_scan(&remaining_mask);
115
if (vctx->atomic_buffers[i].buffer == res) {
116
const struct pipe_shader_buffer *abo = &vctx->atomic_buffers[i];
117
virgl_encode_set_hw_atomic_buffers(vctx, i, 1, abo);
118
}
119
}
120
}
121
122
/* check per-stage shader bindings */
123
if (bind_history & (PIPE_BIND_CONSTANT_BUFFER |
124
PIPE_BIND_SHADER_BUFFER |
125
PIPE_BIND_SHADER_IMAGE)) {
126
enum pipe_shader_type shader_type;
127
for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
128
const struct virgl_shader_binding_state *binding =
129
&vctx->shader_bindings[shader_type];
130
131
if (bind_history & PIPE_BIND_CONSTANT_BUFFER) {
132
uint32_t remaining_mask = binding->ubo_enabled_mask;
133
while (remaining_mask) {
134
int i = u_bit_scan(&remaining_mask);
135
if (binding->ubos[i].buffer == res) {
136
const struct pipe_constant_buffer *ubo = &binding->ubos[i];
137
virgl_encoder_set_uniform_buffer(vctx, shader_type, i,
138
ubo->buffer_offset,
139
ubo->buffer_size,
140
virgl_resource(res));
141
}
142
}
143
}
144
145
if (bind_history & PIPE_BIND_SHADER_BUFFER) {
146
uint32_t remaining_mask = binding->ssbo_enabled_mask;
147
while (remaining_mask) {
148
int i = u_bit_scan(&remaining_mask);
149
if (binding->ssbos[i].buffer == res) {
150
const struct pipe_shader_buffer *ssbo = &binding->ssbos[i];
151
virgl_encode_set_shader_buffers(vctx, shader_type, i, 1,
152
ssbo);
153
}
154
}
155
}
156
157
if (bind_history & PIPE_BIND_SHADER_IMAGE) {
158
uint32_t remaining_mask = binding->image_enabled_mask;
159
while (remaining_mask) {
160
int i = u_bit_scan(&remaining_mask);
161
if (binding->images[i].resource == res) {
162
const struct pipe_image_view *image = &binding->images[i];
163
virgl_encode_set_shader_images(vctx, shader_type, i, 1,
164
image);
165
}
166
}
167
}
168
}
169
}
170
}
171
172
static void virgl_attach_res_framebuffer(struct virgl_context *vctx)
173
{
174
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
175
struct pipe_surface *surf;
176
struct virgl_resource *res;
177
unsigned i;
178
179
surf = vctx->framebuffer.zsbuf;
180
if (surf) {
181
res = virgl_resource(surf->texture);
182
if (res) {
183
vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
184
virgl_resource_dirty(res, surf->u.tex.level);
185
}
186
}
187
for (i = 0; i < vctx->framebuffer.nr_cbufs; i++) {
188
surf = vctx->framebuffer.cbufs[i];
189
if (surf) {
190
res = virgl_resource(surf->texture);
191
if (res) {
192
vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
193
virgl_resource_dirty(res, surf->u.tex.level);
194
}
195
}
196
}
197
}
198
199
static void virgl_attach_res_sampler_views(struct virgl_context *vctx,
200
enum pipe_shader_type shader_type)
201
{
202
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
203
const struct virgl_shader_binding_state *binding =
204
&vctx->shader_bindings[shader_type];
205
uint32_t remaining_mask = binding->view_enabled_mask;
206
struct virgl_resource *res;
207
208
while (remaining_mask) {
209
int i = u_bit_scan(&remaining_mask);
210
assert(binding->views[i] && binding->views[i]->texture);
211
res = virgl_resource(binding->views[i]->texture);
212
vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
213
}
214
}
215
216
static void virgl_attach_res_vertex_buffers(struct virgl_context *vctx)
217
{
218
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
219
struct virgl_resource *res;
220
unsigned i;
221
222
for (i = 0; i < vctx->num_vertex_buffers; i++) {
223
res = virgl_resource(vctx->vertex_buffer[i].buffer.resource);
224
if (res)
225
vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
226
}
227
}
228
229
static void virgl_attach_res_index_buffer(struct virgl_context *vctx,
230
struct virgl_indexbuf *ib)
231
{
232
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
233
struct virgl_resource *res;
234
235
res = virgl_resource(ib->buffer);
236
if (res)
237
vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
238
}
239
240
static void virgl_attach_res_so_targets(struct virgl_context *vctx)
241
{
242
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
243
struct virgl_resource *res;
244
unsigned i;
245
246
for (i = 0; i < vctx->num_so_targets; i++) {
247
res = virgl_resource(vctx->so_targets[i].base.buffer);
248
if (res)
249
vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
250
}
251
}
252
253
static void virgl_attach_res_uniform_buffers(struct virgl_context *vctx,
254
enum pipe_shader_type shader_type)
255
{
256
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
257
const struct virgl_shader_binding_state *binding =
258
&vctx->shader_bindings[shader_type];
259
uint32_t remaining_mask = binding->ubo_enabled_mask;
260
struct virgl_resource *res;
261
262
while (remaining_mask) {
263
int i = u_bit_scan(&remaining_mask);
264
res = virgl_resource(binding->ubos[i].buffer);
265
assert(res);
266
vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
267
}
268
}
269
270
static void virgl_attach_res_shader_buffers(struct virgl_context *vctx,
271
enum pipe_shader_type shader_type)
272
{
273
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
274
const struct virgl_shader_binding_state *binding =
275
&vctx->shader_bindings[shader_type];
276
uint32_t remaining_mask = binding->ssbo_enabled_mask;
277
struct virgl_resource *res;
278
279
while (remaining_mask) {
280
int i = u_bit_scan(&remaining_mask);
281
res = virgl_resource(binding->ssbos[i].buffer);
282
assert(res);
283
vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
284
}
285
}
286
287
static void virgl_attach_res_shader_images(struct virgl_context *vctx,
288
enum pipe_shader_type shader_type)
289
{
290
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
291
const struct virgl_shader_binding_state *binding =
292
&vctx->shader_bindings[shader_type];
293
uint32_t remaining_mask = binding->image_enabled_mask;
294
struct virgl_resource *res;
295
296
while (remaining_mask) {
297
int i = u_bit_scan(&remaining_mask);
298
res = virgl_resource(binding->images[i].resource);
299
assert(res);
300
vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
301
}
302
}
303
304
static void virgl_attach_res_atomic_buffers(struct virgl_context *vctx)
305
{
306
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
307
uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
308
struct virgl_resource *res;
309
310
while (remaining_mask) {
311
int i = u_bit_scan(&remaining_mask);
312
res = virgl_resource(vctx->atomic_buffers[i].buffer);
313
assert(res);
314
vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
315
}
316
}
317
318
/*
319
* after flushing, the hw context still has a bunch of
320
* resources bound, so we need to rebind those here.
321
*/
322
static void virgl_reemit_draw_resources(struct virgl_context *vctx)
323
{
324
enum pipe_shader_type shader_type;
325
326
/* reattach any flushed resources */
327
/* framebuffer, sampler views, vertex/index/uniform/stream buffers */
328
virgl_attach_res_framebuffer(vctx);
329
330
for (shader_type = 0; shader_type < PIPE_SHADER_COMPUTE; shader_type++) {
331
virgl_attach_res_sampler_views(vctx, shader_type);
332
virgl_attach_res_uniform_buffers(vctx, shader_type);
333
virgl_attach_res_shader_buffers(vctx, shader_type);
334
virgl_attach_res_shader_images(vctx, shader_type);
335
}
336
virgl_attach_res_atomic_buffers(vctx);
337
virgl_attach_res_vertex_buffers(vctx);
338
virgl_attach_res_so_targets(vctx);
339
}
340
341
static void virgl_reemit_compute_resources(struct virgl_context *vctx)
342
{
343
virgl_attach_res_sampler_views(vctx, PIPE_SHADER_COMPUTE);
344
virgl_attach_res_uniform_buffers(vctx, PIPE_SHADER_COMPUTE);
345
virgl_attach_res_shader_buffers(vctx, PIPE_SHADER_COMPUTE);
346
virgl_attach_res_shader_images(vctx, PIPE_SHADER_COMPUTE);
347
348
virgl_attach_res_atomic_buffers(vctx);
349
}
350
351
static struct pipe_surface *virgl_create_surface(struct pipe_context *ctx,
352
struct pipe_resource *resource,
353
const struct pipe_surface *templ)
354
{
355
struct virgl_context *vctx = virgl_context(ctx);
356
struct virgl_surface *surf;
357
struct virgl_resource *res = virgl_resource(resource);
358
uint32_t handle;
359
360
/* no support for buffer surfaces */
361
if (resource->target == PIPE_BUFFER)
362
return NULL;
363
364
surf = CALLOC_STRUCT(virgl_surface);
365
if (!surf)
366
return NULL;
367
368
assert(ctx->screen->get_param(ctx->screen,
369
PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
370
(util_format_is_srgb(templ->format) ==
371
util_format_is_srgb(resource->format)));
372
373
virgl_resource_dirty(res, 0);
374
handle = virgl_object_assign_handle();
375
pipe_reference_init(&surf->base.reference, 1);
376
pipe_resource_reference(&surf->base.texture, resource);
377
surf->base.context = ctx;
378
surf->base.format = templ->format;
379
380
surf->base.width = u_minify(resource->width0, templ->u.tex.level);
381
surf->base.height = u_minify(resource->height0, templ->u.tex.level);
382
surf->base.u.tex.level = templ->u.tex.level;
383
surf->base.u.tex.first_layer = templ->u.tex.first_layer;
384
surf->base.u.tex.last_layer = templ->u.tex.last_layer;
385
surf->base.nr_samples = templ->nr_samples;
386
387
virgl_encoder_create_surface(vctx, handle, res, &surf->base);
388
surf->handle = handle;
389
return &surf->base;
390
}
391
392
static void virgl_surface_destroy(struct pipe_context *ctx,
393
struct pipe_surface *psurf)
394
{
395
struct virgl_context *vctx = virgl_context(ctx);
396
struct virgl_surface *surf = virgl_surface(psurf);
397
398
pipe_resource_reference(&surf->base.texture, NULL);
399
virgl_encode_delete_object(vctx, surf->handle, VIRGL_OBJECT_SURFACE);
400
FREE(surf);
401
}
402
403
static void *virgl_create_blend_state(struct pipe_context *ctx,
404
const struct pipe_blend_state *blend_state)
405
{
406
struct virgl_context *vctx = virgl_context(ctx);
407
uint32_t handle;
408
handle = virgl_object_assign_handle();
409
410
virgl_encode_blend_state(vctx, handle, blend_state);
411
return (void *)(unsigned long)handle;
412
413
}
414
415
static void virgl_bind_blend_state(struct pipe_context *ctx,
416
void *blend_state)
417
{
418
struct virgl_context *vctx = virgl_context(ctx);
419
uint32_t handle = (unsigned long)blend_state;
420
virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_BLEND);
421
}
422
423
static void virgl_delete_blend_state(struct pipe_context *ctx,
424
void *blend_state)
425
{
426
struct virgl_context *vctx = virgl_context(ctx);
427
uint32_t handle = (unsigned long)blend_state;
428
virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_BLEND);
429
}
430
431
static void *virgl_create_depth_stencil_alpha_state(struct pipe_context *ctx,
432
const struct pipe_depth_stencil_alpha_state *blend_state)
433
{
434
struct virgl_context *vctx = virgl_context(ctx);
435
uint32_t handle;
436
handle = virgl_object_assign_handle();
437
438
virgl_encode_dsa_state(vctx, handle, blend_state);
439
return (void *)(unsigned long)handle;
440
}
441
442
static void virgl_bind_depth_stencil_alpha_state(struct pipe_context *ctx,
443
void *blend_state)
444
{
445
struct virgl_context *vctx = virgl_context(ctx);
446
uint32_t handle = (unsigned long)blend_state;
447
virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_DSA);
448
}
449
450
static void virgl_delete_depth_stencil_alpha_state(struct pipe_context *ctx,
451
void *dsa_state)
452
{
453
struct virgl_context *vctx = virgl_context(ctx);
454
uint32_t handle = (unsigned long)dsa_state;
455
virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_DSA);
456
}
457
458
static void *virgl_create_rasterizer_state(struct pipe_context *ctx,
459
const struct pipe_rasterizer_state *rs_state)
460
{
461
struct virgl_context *vctx = virgl_context(ctx);
462
struct virgl_rasterizer_state *vrs = CALLOC_STRUCT(virgl_rasterizer_state);
463
464
if (!vrs)
465
return NULL;
466
vrs->rs = *rs_state;
467
vrs->handle = virgl_object_assign_handle();
468
469
assert(rs_state->depth_clip_near ||
470
virgl_screen(ctx->screen)->caps.caps.v1.bset.depth_clip_disable);
471
472
virgl_encode_rasterizer_state(vctx, vrs->handle, rs_state);
473
return (void *)vrs;
474
}
475
476
static void virgl_bind_rasterizer_state(struct pipe_context *ctx,
477
void *rs_state)
478
{
479
struct virgl_context *vctx = virgl_context(ctx);
480
uint32_t handle = 0;
481
if (rs_state) {
482
struct virgl_rasterizer_state *vrs = rs_state;
483
vctx->rs_state = *vrs;
484
handle = vrs->handle;
485
}
486
virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_RASTERIZER);
487
}
488
489
static void virgl_delete_rasterizer_state(struct pipe_context *ctx,
490
void *rs_state)
491
{
492
struct virgl_context *vctx = virgl_context(ctx);
493
struct virgl_rasterizer_state *vrs = rs_state;
494
virgl_encode_delete_object(vctx, vrs->handle, VIRGL_OBJECT_RASTERIZER);
495
FREE(vrs);
496
}
497
498
static void virgl_set_framebuffer_state(struct pipe_context *ctx,
499
const struct pipe_framebuffer_state *state)
500
{
501
struct virgl_context *vctx = virgl_context(ctx);
502
503
vctx->framebuffer = *state;
504
virgl_encoder_set_framebuffer_state(vctx, state);
505
virgl_attach_res_framebuffer(vctx);
506
}
507
508
static void virgl_set_viewport_states(struct pipe_context *ctx,
509
unsigned start_slot,
510
unsigned num_viewports,
511
const struct pipe_viewport_state *state)
512
{
513
struct virgl_context *vctx = virgl_context(ctx);
514
virgl_encoder_set_viewport_states(vctx, start_slot, num_viewports, state);
515
}
516
517
static void *virgl_create_vertex_elements_state(struct pipe_context *ctx,
518
unsigned num_elements,
519
const struct pipe_vertex_element *elements)
520
{
521
struct pipe_vertex_element new_elements[PIPE_MAX_ATTRIBS];
522
struct virgl_context *vctx = virgl_context(ctx);
523
struct virgl_vertex_elements_state *state =
524
CALLOC_STRUCT(virgl_vertex_elements_state);
525
526
for (int i = 0; i < num_elements; ++i) {
527
if (elements[i].instance_divisor) {
528
/* Virglrenderer doesn't deal with instance_divisor correctly if
529
* there isn't a 1:1 relationship between elements and bindings.
530
* So let's make sure there is, by duplicating bindings.
531
*/
532
for (int j = 0; j < num_elements; ++j) {
533
new_elements[j] = elements[j];
534
new_elements[j].vertex_buffer_index = j;
535
state->binding_map[j] = elements[j].vertex_buffer_index;
536
}
537
elements = new_elements;
538
state->num_bindings = num_elements;
539
break;
540
}
541
}
542
543
state->handle = virgl_object_assign_handle();
544
virgl_encoder_create_vertex_elements(vctx, state->handle,
545
num_elements, elements);
546
return state;
547
}
548
549
static void virgl_delete_vertex_elements_state(struct pipe_context *ctx,
550
void *ve)
551
{
552
struct virgl_context *vctx = virgl_context(ctx);
553
struct virgl_vertex_elements_state *state =
554
(struct virgl_vertex_elements_state *)ve;
555
virgl_encode_delete_object(vctx, state->handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
556
FREE(state);
557
}
558
559
static void virgl_bind_vertex_elements_state(struct pipe_context *ctx,
560
void *ve)
561
{
562
struct virgl_context *vctx = virgl_context(ctx);
563
struct virgl_vertex_elements_state *state =
564
(struct virgl_vertex_elements_state *)ve;
565
vctx->vertex_elements = state;
566
virgl_encode_bind_object(vctx, state ? state->handle : 0,
567
VIRGL_OBJECT_VERTEX_ELEMENTS);
568
vctx->vertex_array_dirty = TRUE;
569
}
570
571
static void virgl_set_vertex_buffers(struct pipe_context *ctx,
572
unsigned start_slot,
573
unsigned num_buffers,
574
unsigned unbind_num_trailing_slots,
575
bool take_ownership,
576
const struct pipe_vertex_buffer *buffers)
577
{
578
struct virgl_context *vctx = virgl_context(ctx);
579
580
util_set_vertex_buffers_count(vctx->vertex_buffer,
581
&vctx->num_vertex_buffers,
582
buffers, start_slot, num_buffers,
583
unbind_num_trailing_slots,
584
take_ownership);
585
586
if (buffers) {
587
for (unsigned i = 0; i < num_buffers; i++) {
588
struct virgl_resource *res =
589
virgl_resource(buffers[i].buffer.resource);
590
if (res && !buffers[i].is_user_buffer)
591
res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
592
}
593
}
594
595
vctx->vertex_array_dirty = TRUE;
596
}
597
598
static void virgl_hw_set_vertex_buffers(struct virgl_context *vctx)
599
{
600
if (vctx->vertex_array_dirty) {
601
struct virgl_vertex_elements_state *ve = vctx->vertex_elements;
602
603
if (ve->num_bindings) {
604
struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
605
for (int i = 0; i < ve->num_bindings; ++i)
606
vertex_buffers[i] = vctx->vertex_buffer[ve->binding_map[i]];
607
608
virgl_encoder_set_vertex_buffers(vctx, ve->num_bindings, vertex_buffers);
609
} else
610
virgl_encoder_set_vertex_buffers(vctx, vctx->num_vertex_buffers, vctx->vertex_buffer);
611
612
virgl_attach_res_vertex_buffers(vctx);
613
614
vctx->vertex_array_dirty = FALSE;
615
}
616
}
617
618
static void virgl_set_stencil_ref(struct pipe_context *ctx,
619
const struct pipe_stencil_ref ref)
620
{
621
struct virgl_context *vctx = virgl_context(ctx);
622
virgl_encoder_set_stencil_ref(vctx, &ref);
623
}
624
625
static void virgl_set_blend_color(struct pipe_context *ctx,
626
const struct pipe_blend_color *color)
627
{
628
struct virgl_context *vctx = virgl_context(ctx);
629
virgl_encoder_set_blend_color(vctx, color);
630
}
631
632
static void virgl_hw_set_index_buffer(struct virgl_context *vctx,
633
struct virgl_indexbuf *ib)
634
{
635
virgl_encoder_set_index_buffer(vctx, ib);
636
virgl_attach_res_index_buffer(vctx, ib);
637
}
638
639
static void virgl_set_constant_buffer(struct pipe_context *ctx,
640
enum pipe_shader_type shader, uint index,
641
bool take_ownership,
642
const struct pipe_constant_buffer *buf)
643
{
644
struct virgl_context *vctx = virgl_context(ctx);
645
struct virgl_shader_binding_state *binding =
646
&vctx->shader_bindings[shader];
647
648
if (buf && buf->buffer) {
649
struct virgl_resource *res = virgl_resource(buf->buffer);
650
res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
651
652
virgl_encoder_set_uniform_buffer(vctx, shader, index,
653
buf->buffer_offset,
654
buf->buffer_size, res);
655
656
if (take_ownership) {
657
pipe_resource_reference(&binding->ubos[index].buffer, NULL);
658
binding->ubos[index].buffer = buf->buffer;
659
} else {
660
pipe_resource_reference(&binding->ubos[index].buffer, buf->buffer);
661
}
662
binding->ubos[index] = *buf;
663
binding->ubo_enabled_mask |= 1 << index;
664
} else {
665
static const struct pipe_constant_buffer dummy_ubo;
666
if (!buf)
667
buf = &dummy_ubo;
668
virgl_encoder_write_constant_buffer(vctx, shader, index,
669
buf->buffer_size / 4,
670
buf->user_buffer);
671
672
pipe_resource_reference(&binding->ubos[index].buffer, NULL);
673
binding->ubo_enabled_mask &= ~(1 << index);
674
}
675
}
676
677
static void *virgl_shader_encoder(struct pipe_context *ctx,
678
const struct pipe_shader_state *shader,
679
unsigned type)
680
{
681
struct virgl_context *vctx = virgl_context(ctx);
682
uint32_t handle;
683
struct tgsi_token *new_tokens;
684
int ret;
685
686
new_tokens = virgl_tgsi_transform((struct virgl_screen *)vctx->base.screen, shader->tokens);
687
if (!new_tokens)
688
return NULL;
689
690
handle = virgl_object_assign_handle();
691
/* encode VS state */
692
ret = virgl_encode_shader_state(vctx, handle, type,
693
&shader->stream_output, 0,
694
new_tokens);
695
if (ret) {
696
return NULL;
697
}
698
699
FREE(new_tokens);
700
return (void *)(unsigned long)handle;
701
702
}
703
static void *virgl_create_vs_state(struct pipe_context *ctx,
704
const struct pipe_shader_state *shader)
705
{
706
return virgl_shader_encoder(ctx, shader, PIPE_SHADER_VERTEX);
707
}
708
709
static void *virgl_create_tcs_state(struct pipe_context *ctx,
710
const struct pipe_shader_state *shader)
711
{
712
return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_CTRL);
713
}
714
715
static void *virgl_create_tes_state(struct pipe_context *ctx,
716
const struct pipe_shader_state *shader)
717
{
718
return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_EVAL);
719
}
720
721
static void *virgl_create_gs_state(struct pipe_context *ctx,
722
const struct pipe_shader_state *shader)
723
{
724
return virgl_shader_encoder(ctx, shader, PIPE_SHADER_GEOMETRY);
725
}
726
727
static void *virgl_create_fs_state(struct pipe_context *ctx,
728
const struct pipe_shader_state *shader)
729
{
730
return virgl_shader_encoder(ctx, shader, PIPE_SHADER_FRAGMENT);
731
}
732
733
static void
734
virgl_delete_fs_state(struct pipe_context *ctx,
735
void *fs)
736
{
737
uint32_t handle = (unsigned long)fs;
738
struct virgl_context *vctx = virgl_context(ctx);
739
740
virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
741
}
742
743
static void
744
virgl_delete_gs_state(struct pipe_context *ctx,
745
void *gs)
746
{
747
uint32_t handle = (unsigned long)gs;
748
struct virgl_context *vctx = virgl_context(ctx);
749
750
virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
751
}
752
753
static void
754
virgl_delete_vs_state(struct pipe_context *ctx,
755
void *vs)
756
{
757
uint32_t handle = (unsigned long)vs;
758
struct virgl_context *vctx = virgl_context(ctx);
759
760
virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
761
}
762
763
static void
764
virgl_delete_tcs_state(struct pipe_context *ctx,
765
void *tcs)
766
{
767
uint32_t handle = (unsigned long)tcs;
768
struct virgl_context *vctx = virgl_context(ctx);
769
770
virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
771
}
772
773
static void
774
virgl_delete_tes_state(struct pipe_context *ctx,
775
void *tes)
776
{
777
uint32_t handle = (unsigned long)tes;
778
struct virgl_context *vctx = virgl_context(ctx);
779
780
virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
781
}
782
783
static void virgl_bind_vs_state(struct pipe_context *ctx,
784
void *vss)
785
{
786
uint32_t handle = (unsigned long)vss;
787
struct virgl_context *vctx = virgl_context(ctx);
788
789
virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_VERTEX);
790
}
791
792
static void virgl_bind_tcs_state(struct pipe_context *ctx,
793
void *vss)
794
{
795
uint32_t handle = (unsigned long)vss;
796
struct virgl_context *vctx = virgl_context(ctx);
797
798
virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_CTRL);
799
}
800
801
static void virgl_bind_tes_state(struct pipe_context *ctx,
802
void *vss)
803
{
804
uint32_t handle = (unsigned long)vss;
805
struct virgl_context *vctx = virgl_context(ctx);
806
807
virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_EVAL);
808
}
809
810
static void virgl_bind_gs_state(struct pipe_context *ctx,
811
void *vss)
812
{
813
uint32_t handle = (unsigned long)vss;
814
struct virgl_context *vctx = virgl_context(ctx);
815
816
virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_GEOMETRY);
817
}
818
819
820
static void virgl_bind_fs_state(struct pipe_context *ctx,
821
void *vss)
822
{
823
uint32_t handle = (unsigned long)vss;
824
struct virgl_context *vctx = virgl_context(ctx);
825
826
virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_FRAGMENT);
827
}
828
829
static void virgl_clear(struct pipe_context *ctx,
830
unsigned buffers,
831
const struct pipe_scissor_state *scissor_state,
832
const union pipe_color_union *color,
833
double depth, unsigned stencil)
834
{
835
struct virgl_context *vctx = virgl_context(ctx);
836
837
if (!vctx->num_draws)
838
virgl_reemit_draw_resources(vctx);
839
vctx->num_draws++;
840
841
virgl_encode_clear(vctx, buffers, color, depth, stencil);
842
}
843
844
static void virgl_clear_texture(struct pipe_context *ctx,
845
struct pipe_resource *res,
846
unsigned int level,
847
const struct pipe_box *box,
848
const void *data)
849
{
850
struct virgl_context *vctx = virgl_context(ctx);
851
struct virgl_resource *vres = virgl_resource(res);
852
853
virgl_encode_clear_texture(vctx, vres, level, box, data);
854
855
/* Mark as dirty, since we are updating the host side resource
856
* without going through the corresponding guest side resource, and
857
* hence the two will diverge.
858
*/
859
virgl_resource_dirty(vres, level);
860
}
861
862
static void virgl_draw_vbo(struct pipe_context *ctx,
863
const struct pipe_draw_info *dinfo,
864
unsigned drawid_offset,
865
const struct pipe_draw_indirect_info *indirect,
866
const struct pipe_draw_start_count_bias *draws,
867
unsigned num_draws)
868
{
869
if (num_draws > 1) {
870
util_draw_multi(ctx, dinfo, drawid_offset, indirect, draws, num_draws);
871
return;
872
}
873
874
if (!indirect && (!draws[0].count || !dinfo->instance_count))
875
return;
876
877
struct virgl_context *vctx = virgl_context(ctx);
878
struct virgl_screen *rs = virgl_screen(ctx->screen);
879
struct virgl_indexbuf ib = {};
880
struct pipe_draw_info info = *dinfo;
881
882
if (!indirect &&
883
!dinfo->primitive_restart &&
884
!u_trim_pipe_prim(dinfo->mode, (unsigned*)&draws[0].count))
885
return;
886
887
if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) {
888
util_primconvert_save_rasterizer_state(vctx->primconvert, &vctx->rs_state.rs);
889
util_primconvert_draw_vbo(vctx->primconvert, dinfo, drawid_offset, indirect, draws, num_draws);
890
return;
891
}
892
if (info.index_size) {
893
pipe_resource_reference(&ib.buffer, info.has_user_indices ? NULL : info.index.resource);
894
ib.user_buffer = info.has_user_indices ? info.index.user : NULL;
895
ib.index_size = dinfo->index_size;
896
ib.offset = draws[0].start * ib.index_size;
897
898
if (ib.user_buffer) {
899
unsigned start_offset = draws[0].start * ib.index_size;
900
u_upload_data(vctx->uploader, start_offset,
901
draws[0].count * ib.index_size, 4,
902
(char*)ib.user_buffer + start_offset,
903
&ib.offset, &ib.buffer);
904
ib.offset -= start_offset;
905
ib.user_buffer = NULL;
906
}
907
}
908
909
if (!vctx->num_draws)
910
virgl_reemit_draw_resources(vctx);
911
vctx->num_draws++;
912
913
virgl_hw_set_vertex_buffers(vctx);
914
if (info.index_size)
915
virgl_hw_set_index_buffer(vctx, &ib);
916
917
virgl_encoder_draw_vbo(vctx, &info, drawid_offset, indirect, &draws[0]);
918
919
pipe_resource_reference(&ib.buffer, NULL);
920
921
}
922
923
static void virgl_submit_cmd(struct virgl_winsys *vws,
924
struct virgl_cmd_buf *cbuf,
925
struct pipe_fence_handle **fence)
926
{
927
if (unlikely(virgl_debug & VIRGL_DEBUG_SYNC)) {
928
struct pipe_fence_handle *sync_fence = NULL;
929
930
vws->submit_cmd(vws, cbuf, &sync_fence);
931
932
vws->fence_wait(vws, sync_fence, PIPE_TIMEOUT_INFINITE);
933
vws->fence_reference(vws, &sync_fence, NULL);
934
} else {
935
vws->submit_cmd(vws, cbuf, fence);
936
}
937
}
938
939
static void virgl_flush_eq(struct virgl_context *ctx, void *closure,
940
struct pipe_fence_handle **fence)
941
{
942
struct virgl_screen *rs = virgl_screen(ctx->base.screen);
943
944
/* skip empty cbuf */
945
if (ctx->cbuf->cdw == ctx->cbuf_initial_cdw &&
946
ctx->queue.num_dwords == 0 &&
947
!fence)
948
return;
949
950
if (ctx->num_draws)
951
u_upload_unmap(ctx->uploader);
952
953
/* send the buffer to the remote side for decoding */
954
ctx->num_draws = ctx->num_compute = 0;
955
956
virgl_transfer_queue_clear(&ctx->queue, ctx->cbuf);
957
958
virgl_submit_cmd(rs->vws, ctx->cbuf, fence);
959
960
/* Reserve some space for transfers. */
961
if (ctx->encoded_transfers)
962
ctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
963
964
virgl_encoder_set_sub_ctx(ctx, ctx->hw_sub_ctx_id);
965
966
ctx->cbuf_initial_cdw = ctx->cbuf->cdw;
967
968
/* We have flushed the command queue, including any pending copy transfers
969
* involving staging resources.
970
*/
971
ctx->queued_staging_res_size = 0;
972
}
973
974
static void virgl_flush_from_st(struct pipe_context *ctx,
975
struct pipe_fence_handle **fence,
976
enum pipe_flush_flags flags)
977
{
978
struct virgl_context *vctx = virgl_context(ctx);
979
980
virgl_flush_eq(vctx, vctx, fence);
981
}
982
983
static struct pipe_sampler_view *virgl_create_sampler_view(struct pipe_context *ctx,
984
struct pipe_resource *texture,
985
const struct pipe_sampler_view *state)
986
{
987
struct virgl_context *vctx = virgl_context(ctx);
988
struct virgl_sampler_view *grview;
989
uint32_t handle;
990
struct virgl_resource *res;
991
992
if (!state)
993
return NULL;
994
995
grview = CALLOC_STRUCT(virgl_sampler_view);
996
if (!grview)
997
return NULL;
998
999
res = virgl_resource(texture);
1000
handle = virgl_object_assign_handle();
1001
virgl_encode_sampler_view(vctx, handle, res, state);
1002
1003
grview->base = *state;
1004
grview->base.reference.count = 1;
1005
1006
grview->base.texture = NULL;
1007
grview->base.context = ctx;
1008
pipe_resource_reference(&grview->base.texture, texture);
1009
grview->handle = handle;
1010
return &grview->base;
1011
}
1012
1013
static void virgl_set_sampler_views(struct pipe_context *ctx,
1014
enum pipe_shader_type shader_type,
1015
unsigned start_slot,
1016
unsigned num_views,
1017
unsigned unbind_num_trailing_slots,
1018
struct pipe_sampler_view **views)
1019
{
1020
struct virgl_context *vctx = virgl_context(ctx);
1021
struct virgl_shader_binding_state *binding =
1022
&vctx->shader_bindings[shader_type];
1023
1024
binding->view_enabled_mask &= ~u_bit_consecutive(start_slot, num_views);
1025
for (unsigned i = 0; i < num_views; i++) {
1026
unsigned idx = start_slot + i;
1027
if (views && views[i]) {
1028
struct virgl_resource *res = virgl_resource(views[i]->texture);
1029
res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
1030
1031
pipe_sampler_view_reference(&binding->views[idx], views[i]);
1032
binding->view_enabled_mask |= 1 << idx;
1033
} else {
1034
pipe_sampler_view_reference(&binding->views[idx], NULL);
1035
}
1036
}
1037
1038
virgl_encode_set_sampler_views(vctx, shader_type,
1039
start_slot, num_views, (struct virgl_sampler_view **)binding->views);
1040
virgl_attach_res_sampler_views(vctx, shader_type);
1041
1042
if (unbind_num_trailing_slots) {
1043
virgl_set_sampler_views(ctx, shader_type, start_slot + num_views,
1044
unbind_num_trailing_slots, 0, NULL);
1045
}
1046
}
1047
1048
static void
1049
virgl_texture_barrier(struct pipe_context *ctx, unsigned flags)
1050
{
1051
struct virgl_context *vctx = virgl_context(ctx);
1052
struct virgl_screen *rs = virgl_screen(ctx->screen);
1053
1054
if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_BARRIER) &&
1055
!(rs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_BLEND_EQUATION))
1056
return;
1057
virgl_encode_texture_barrier(vctx, flags);
1058
}
1059
1060
static void virgl_destroy_sampler_view(struct pipe_context *ctx,
1061
struct pipe_sampler_view *view)
1062
{
1063
struct virgl_context *vctx = virgl_context(ctx);
1064
struct virgl_sampler_view *grview = virgl_sampler_view(view);
1065
1066
virgl_encode_delete_object(vctx, grview->handle, VIRGL_OBJECT_SAMPLER_VIEW);
1067
pipe_resource_reference(&view->texture, NULL);
1068
FREE(view);
1069
}
1070
1071
static void *virgl_create_sampler_state(struct pipe_context *ctx,
1072
const struct pipe_sampler_state *state)
1073
{
1074
struct virgl_context *vctx = virgl_context(ctx);
1075
uint32_t handle;
1076
1077
handle = virgl_object_assign_handle();
1078
1079
virgl_encode_sampler_state(vctx, handle, state);
1080
return (void *)(unsigned long)handle;
1081
}
1082
1083
static void virgl_delete_sampler_state(struct pipe_context *ctx,
1084
void *ss)
1085
{
1086
struct virgl_context *vctx = virgl_context(ctx);
1087
uint32_t handle = (unsigned long)ss;
1088
1089
virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SAMPLER_STATE);
1090
}
1091
1092
static void virgl_bind_sampler_states(struct pipe_context *ctx,
1093
enum pipe_shader_type shader,
1094
unsigned start_slot,
1095
unsigned num_samplers,
1096
void **samplers)
1097
{
1098
struct virgl_context *vctx = virgl_context(ctx);
1099
uint32_t handles[32];
1100
int i;
1101
for (i = 0; i < num_samplers; i++) {
1102
handles[i] = (unsigned long)(samplers[i]);
1103
}
1104
virgl_encode_bind_sampler_states(vctx, shader, start_slot, num_samplers, handles);
1105
}
1106
1107
static void virgl_set_polygon_stipple(struct pipe_context *ctx,
1108
const struct pipe_poly_stipple *ps)
1109
{
1110
struct virgl_context *vctx = virgl_context(ctx);
1111
virgl_encoder_set_polygon_stipple(vctx, ps);
1112
}
1113
1114
static void virgl_set_scissor_states(struct pipe_context *ctx,
1115
unsigned start_slot,
1116
unsigned num_scissor,
1117
const struct pipe_scissor_state *ss)
1118
{
1119
struct virgl_context *vctx = virgl_context(ctx);
1120
virgl_encoder_set_scissor_state(vctx, start_slot, num_scissor, ss);
1121
}
1122
1123
static void virgl_set_sample_mask(struct pipe_context *ctx,
1124
unsigned sample_mask)
1125
{
1126
struct virgl_context *vctx = virgl_context(ctx);
1127
virgl_encoder_set_sample_mask(vctx, sample_mask);
1128
}
1129
1130
static void virgl_set_min_samples(struct pipe_context *ctx,
1131
unsigned min_samples)
1132
{
1133
struct virgl_context *vctx = virgl_context(ctx);
1134
struct virgl_screen *rs = virgl_screen(ctx->screen);
1135
1136
if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_SET_MIN_SAMPLES))
1137
return;
1138
virgl_encoder_set_min_samples(vctx, min_samples);
1139
}
1140
1141
static void virgl_set_clip_state(struct pipe_context *ctx,
1142
const struct pipe_clip_state *clip)
1143
{
1144
struct virgl_context *vctx = virgl_context(ctx);
1145
virgl_encoder_set_clip_state(vctx, clip);
1146
}
1147
1148
static void virgl_set_tess_state(struct pipe_context *ctx,
1149
const float default_outer_level[4],
1150
const float default_inner_level[2])
1151
{
1152
struct virgl_context *vctx = virgl_context(ctx);
1153
struct virgl_screen *rs = virgl_screen(ctx->screen);
1154
1155
if (!rs->caps.caps.v1.bset.has_tessellation_shaders)
1156
return;
1157
virgl_encode_set_tess_state(vctx, default_outer_level, default_inner_level);
1158
}
1159
1160
static void virgl_resource_copy_region(struct pipe_context *ctx,
1161
struct pipe_resource *dst,
1162
unsigned dst_level,
1163
unsigned dstx, unsigned dsty, unsigned dstz,
1164
struct pipe_resource *src,
1165
unsigned src_level,
1166
const struct pipe_box *src_box)
1167
{
1168
struct virgl_context *vctx = virgl_context(ctx);
1169
struct virgl_resource *dres = virgl_resource(dst);
1170
struct virgl_resource *sres = virgl_resource(src);
1171
1172
if (dres->b.target == PIPE_BUFFER)
1173
util_range_add(&dres->b, &dres->valid_buffer_range, dstx, dstx + src_box->width);
1174
virgl_resource_dirty(dres, dst_level);
1175
1176
virgl_encode_resource_copy_region(vctx, dres,
1177
dst_level, dstx, dsty, dstz,
1178
sres, src_level,
1179
src_box);
1180
}
1181
1182
static void
1183
virgl_flush_resource(struct pipe_context *pipe,
1184
struct pipe_resource *resource)
1185
{
1186
}
1187
1188
static void virgl_blit(struct pipe_context *ctx,
1189
const struct pipe_blit_info *blit)
1190
{
1191
struct virgl_context *vctx = virgl_context(ctx);
1192
struct virgl_resource *dres = virgl_resource(blit->dst.resource);
1193
struct virgl_resource *sres = virgl_resource(blit->src.resource);
1194
1195
assert(ctx->screen->get_param(ctx->screen,
1196
PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
1197
(util_format_is_srgb(blit->dst.resource->format) ==
1198
util_format_is_srgb(blit->dst.format)));
1199
1200
virgl_resource_dirty(dres, blit->dst.level);
1201
virgl_encode_blit(vctx, dres, sres,
1202
blit);
1203
}
1204
1205
static void virgl_set_hw_atomic_buffers(struct pipe_context *ctx,
1206
unsigned start_slot,
1207
unsigned count,
1208
const struct pipe_shader_buffer *buffers)
1209
{
1210
struct virgl_context *vctx = virgl_context(ctx);
1211
1212
vctx->atomic_buffer_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1213
for (unsigned i = 0; i < count; i++) {
1214
unsigned idx = start_slot + i;
1215
if (buffers && buffers[i].buffer) {
1216
struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1217
res->bind_history |= PIPE_BIND_SHADER_BUFFER;
1218
1219
pipe_resource_reference(&vctx->atomic_buffers[idx].buffer,
1220
buffers[i].buffer);
1221
vctx->atomic_buffers[idx] = buffers[i];
1222
vctx->atomic_buffer_enabled_mask |= 1 << idx;
1223
} else {
1224
pipe_resource_reference(&vctx->atomic_buffers[idx].buffer, NULL);
1225
}
1226
}
1227
1228
virgl_encode_set_hw_atomic_buffers(vctx, start_slot, count, buffers);
1229
}
1230
1231
static void virgl_set_shader_buffers(struct pipe_context *ctx,
1232
enum pipe_shader_type shader,
1233
unsigned start_slot, unsigned count,
1234
const struct pipe_shader_buffer *buffers,
1235
unsigned writable_bitmask)
1236
{
1237
struct virgl_context *vctx = virgl_context(ctx);
1238
struct virgl_screen *rs = virgl_screen(ctx->screen);
1239
struct virgl_shader_binding_state *binding =
1240
&vctx->shader_bindings[shader];
1241
1242
binding->ssbo_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1243
for (unsigned i = 0; i < count; i++) {
1244
unsigned idx = start_slot + i;
1245
if (buffers && buffers[i].buffer) {
1246
struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1247
res->bind_history |= PIPE_BIND_SHADER_BUFFER;
1248
1249
pipe_resource_reference(&binding->ssbos[idx].buffer, buffers[i].buffer);
1250
binding->ssbos[idx] = buffers[i];
1251
binding->ssbo_enabled_mask |= 1 << idx;
1252
} else {
1253
pipe_resource_reference(&binding->ssbos[idx].buffer, NULL);
1254
}
1255
}
1256
1257
uint32_t max_shader_buffer = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1258
rs->caps.caps.v2.max_shader_buffer_frag_compute :
1259
rs->caps.caps.v2.max_shader_buffer_other_stages;
1260
if (!max_shader_buffer)
1261
return;
1262
virgl_encode_set_shader_buffers(vctx, shader, start_slot, count, buffers);
1263
}
1264
1265
static void virgl_create_fence_fd(struct pipe_context *ctx,
1266
struct pipe_fence_handle **fence,
1267
int fd,
1268
enum pipe_fd_type type)
1269
{
1270
assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
1271
struct virgl_screen *rs = virgl_screen(ctx->screen);
1272
1273
if (rs->vws->cs_create_fence)
1274
*fence = rs->vws->cs_create_fence(rs->vws, fd);
1275
}
1276
1277
static void virgl_fence_server_sync(struct pipe_context *ctx,
1278
struct pipe_fence_handle *fence)
1279
{
1280
struct virgl_context *vctx = virgl_context(ctx);
1281
struct virgl_screen *rs = virgl_screen(ctx->screen);
1282
1283
if (rs->vws->fence_server_sync)
1284
rs->vws->fence_server_sync(rs->vws, vctx->cbuf, fence);
1285
}
1286
1287
static void virgl_set_shader_images(struct pipe_context *ctx,
1288
enum pipe_shader_type shader,
1289
unsigned start_slot, unsigned count,
1290
unsigned unbind_num_trailing_slots,
1291
const struct pipe_image_view *images)
1292
{
1293
struct virgl_context *vctx = virgl_context(ctx);
1294
struct virgl_screen *rs = virgl_screen(ctx->screen);
1295
struct virgl_shader_binding_state *binding =
1296
&vctx->shader_bindings[shader];
1297
1298
binding->image_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1299
for (unsigned i = 0; i < count; i++) {
1300
unsigned idx = start_slot + i;
1301
if (images && images[i].resource) {
1302
struct virgl_resource *res = virgl_resource(images[i].resource);
1303
res->bind_history |= PIPE_BIND_SHADER_IMAGE;
1304
1305
pipe_resource_reference(&binding->images[idx].resource,
1306
images[i].resource);
1307
binding->images[idx] = images[i];
1308
binding->image_enabled_mask |= 1 << idx;
1309
} else {
1310
pipe_resource_reference(&binding->images[idx].resource, NULL);
1311
}
1312
}
1313
1314
uint32_t max_shader_images = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1315
rs->caps.caps.v2.max_shader_image_frag_compute :
1316
rs->caps.caps.v2.max_shader_image_other_stages;
1317
if (!max_shader_images)
1318
return;
1319
virgl_encode_set_shader_images(vctx, shader, start_slot, count, images);
1320
1321
if (unbind_num_trailing_slots) {
1322
virgl_set_shader_images(ctx, shader, start_slot + count,
1323
unbind_num_trailing_slots, 0, NULL);
1324
}
1325
}
1326
1327
static void virgl_memory_barrier(struct pipe_context *ctx,
1328
unsigned flags)
1329
{
1330
struct virgl_context *vctx = virgl_context(ctx);
1331
struct virgl_screen *rs = virgl_screen(ctx->screen);
1332
1333
if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_MEMORY_BARRIER))
1334
return;
1335
virgl_encode_memory_barrier(vctx, flags);
1336
}
1337
1338
static void *virgl_create_compute_state(struct pipe_context *ctx,
1339
const struct pipe_compute_state *state)
1340
{
1341
struct virgl_context *vctx = virgl_context(ctx);
1342
uint32_t handle;
1343
const struct tgsi_token *new_tokens = state->prog;
1344
struct pipe_stream_output_info so_info = {};
1345
int ret;
1346
1347
handle = virgl_object_assign_handle();
1348
ret = virgl_encode_shader_state(vctx, handle, PIPE_SHADER_COMPUTE,
1349
&so_info,
1350
state->req_local_mem,
1351
new_tokens);
1352
if (ret) {
1353
return NULL;
1354
}
1355
1356
return (void *)(unsigned long)handle;
1357
}
1358
1359
static void virgl_bind_compute_state(struct pipe_context *ctx, void *state)
1360
{
1361
uint32_t handle = (unsigned long)state;
1362
struct virgl_context *vctx = virgl_context(ctx);
1363
1364
virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_COMPUTE);
1365
}
1366
1367
static void virgl_delete_compute_state(struct pipe_context *ctx, void *state)
1368
{
1369
uint32_t handle = (unsigned long)state;
1370
struct virgl_context *vctx = virgl_context(ctx);
1371
1372
virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
1373
}
1374
1375
static void virgl_launch_grid(struct pipe_context *ctx,
1376
const struct pipe_grid_info *info)
1377
{
1378
struct virgl_context *vctx = virgl_context(ctx);
1379
1380
if (!vctx->num_compute)
1381
virgl_reemit_compute_resources(vctx);
1382
vctx->num_compute++;
1383
1384
virgl_encode_launch_grid(vctx, info);
1385
}
1386
1387
static void
1388
virgl_release_shader_binding(struct virgl_context *vctx,
1389
enum pipe_shader_type shader_type)
1390
{
1391
struct virgl_shader_binding_state *binding =
1392
&vctx->shader_bindings[shader_type];
1393
1394
while (binding->view_enabled_mask) {
1395
int i = u_bit_scan(&binding->view_enabled_mask);
1396
pipe_sampler_view_reference(
1397
(struct pipe_sampler_view **)&binding->views[i], NULL);
1398
}
1399
1400
while (binding->ubo_enabled_mask) {
1401
int i = u_bit_scan(&binding->ubo_enabled_mask);
1402
pipe_resource_reference(&binding->ubos[i].buffer, NULL);
1403
}
1404
1405
while (binding->ssbo_enabled_mask) {
1406
int i = u_bit_scan(&binding->ssbo_enabled_mask);
1407
pipe_resource_reference(&binding->ssbos[i].buffer, NULL);
1408
}
1409
1410
while (binding->image_enabled_mask) {
1411
int i = u_bit_scan(&binding->image_enabled_mask);
1412
pipe_resource_reference(&binding->images[i].resource, NULL);
1413
}
1414
}
1415
1416
static void
1417
virgl_emit_string_marker(struct pipe_context *ctx, const char *message, int len)
1418
{
1419
struct virgl_context *vctx = virgl_context(ctx);
1420
virgl_encode_emit_string_marker(vctx, message, len);
1421
}
1422
1423
static void
1424
virgl_context_destroy( struct pipe_context *ctx )
1425
{
1426
struct virgl_context *vctx = virgl_context(ctx);
1427
struct virgl_screen *rs = virgl_screen(ctx->screen);
1428
enum pipe_shader_type shader_type;
1429
1430
vctx->framebuffer.zsbuf = NULL;
1431
vctx->framebuffer.nr_cbufs = 0;
1432
virgl_encoder_destroy_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1433
virgl_flush_eq(vctx, vctx, NULL);
1434
1435
for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++)
1436
virgl_release_shader_binding(vctx, shader_type);
1437
1438
while (vctx->atomic_buffer_enabled_mask) {
1439
int i = u_bit_scan(&vctx->atomic_buffer_enabled_mask);
1440
pipe_resource_reference(&vctx->atomic_buffers[i].buffer, NULL);
1441
}
1442
1443
rs->vws->cmd_buf_destroy(vctx->cbuf);
1444
if (vctx->uploader)
1445
u_upload_destroy(vctx->uploader);
1446
if (vctx->supports_staging)
1447
virgl_staging_destroy(&vctx->staging);
1448
util_primconvert_destroy(vctx->primconvert);
1449
virgl_transfer_queue_fini(&vctx->queue);
1450
1451
slab_destroy_child(&vctx->transfer_pool);
1452
FREE(vctx);
1453
}
1454
1455
static void virgl_get_sample_position(struct pipe_context *ctx,
1456
unsigned sample_count,
1457
unsigned index,
1458
float *out_value)
1459
{
1460
struct virgl_context *vctx = virgl_context(ctx);
1461
struct virgl_screen *vs = virgl_screen(vctx->base.screen);
1462
1463
if (sample_count > vs->caps.caps.v1.max_samples) {
1464
debug_printf("VIRGL: requested %d MSAA samples, but only %d supported\n",
1465
sample_count, vs->caps.caps.v1.max_samples);
1466
return;
1467
}
1468
1469
/* The following is basically copied from dri/i965gen6_get_sample_position
1470
* The only addition is that we hold the msaa positions for all sample
1471
* counts in a flat array. */
1472
uint32_t bits = 0;
1473
if (sample_count == 1) {
1474
out_value[0] = out_value[1] = 0.5f;
1475
return;
1476
} else if (sample_count == 2) {
1477
bits = vs->caps.caps.v2.sample_locations[0] >> (8 * index);
1478
} else if (sample_count <= 4) {
1479
bits = vs->caps.caps.v2.sample_locations[1] >> (8 * index);
1480
} else if (sample_count <= 8) {
1481
bits = vs->caps.caps.v2.sample_locations[2 + (index >> 2)] >> (8 * (index & 3));
1482
} else if (sample_count <= 16) {
1483
bits = vs->caps.caps.v2.sample_locations[4 + (index >> 2)] >> (8 * (index & 3));
1484
}
1485
out_value[0] = ((bits >> 4) & 0xf) / 16.0f;
1486
out_value[1] = (bits & 0xf) / 16.0f;
1487
1488
if (virgl_debug & VIRGL_DEBUG_VERBOSE)
1489
debug_printf("VIRGL: sample postion [%2d/%2d] = (%f, %f)\n",
1490
index, sample_count, out_value[0], out_value[1]);
1491
}
1492
1493
static void virgl_send_tweaks(struct virgl_context *vctx, struct virgl_screen *rs)
1494
{
1495
if (rs->tweak_gles_emulate_bgra)
1496
virgl_encode_tweak(vctx, virgl_tweak_gles_brga_emulate, 1);
1497
1498
if (rs->tweak_gles_apply_bgra_dest_swizzle)
1499
virgl_encode_tweak(vctx, virgl_tweak_gles_brga_apply_dest_swizzle, 1);
1500
1501
if (rs->tweak_gles_tf3_value > 0)
1502
virgl_encode_tweak(vctx, virgl_tweak_gles_tf3_samples_passes_multiplier,
1503
rs->tweak_gles_tf3_value);
1504
}
1505
1506
struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
1507
void *priv,
1508
unsigned flags)
1509
{
1510
struct virgl_context *vctx;
1511
struct virgl_screen *rs = virgl_screen(pscreen);
1512
vctx = CALLOC_STRUCT(virgl_context);
1513
const char *host_debug_flagstring;
1514
1515
vctx->cbuf = rs->vws->cmd_buf_create(rs->vws, VIRGL_MAX_CMDBUF_DWORDS);
1516
if (!vctx->cbuf) {
1517
FREE(vctx);
1518
return NULL;
1519
}
1520
1521
vctx->base.destroy = virgl_context_destroy;
1522
vctx->base.create_surface = virgl_create_surface;
1523
vctx->base.surface_destroy = virgl_surface_destroy;
1524
vctx->base.set_framebuffer_state = virgl_set_framebuffer_state;
1525
vctx->base.create_blend_state = virgl_create_blend_state;
1526
vctx->base.bind_blend_state = virgl_bind_blend_state;
1527
vctx->base.delete_blend_state = virgl_delete_blend_state;
1528
vctx->base.create_depth_stencil_alpha_state = virgl_create_depth_stencil_alpha_state;
1529
vctx->base.bind_depth_stencil_alpha_state = virgl_bind_depth_stencil_alpha_state;
1530
vctx->base.delete_depth_stencil_alpha_state = virgl_delete_depth_stencil_alpha_state;
1531
vctx->base.create_rasterizer_state = virgl_create_rasterizer_state;
1532
vctx->base.bind_rasterizer_state = virgl_bind_rasterizer_state;
1533
vctx->base.delete_rasterizer_state = virgl_delete_rasterizer_state;
1534
1535
vctx->base.set_viewport_states = virgl_set_viewport_states;
1536
vctx->base.create_vertex_elements_state = virgl_create_vertex_elements_state;
1537
vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state;
1538
vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state;
1539
vctx->base.set_vertex_buffers = virgl_set_vertex_buffers;
1540
vctx->base.set_constant_buffer = virgl_set_constant_buffer;
1541
1542
vctx->base.set_tess_state = virgl_set_tess_state;
1543
vctx->base.create_vs_state = virgl_create_vs_state;
1544
vctx->base.create_tcs_state = virgl_create_tcs_state;
1545
vctx->base.create_tes_state = virgl_create_tes_state;
1546
vctx->base.create_gs_state = virgl_create_gs_state;
1547
vctx->base.create_fs_state = virgl_create_fs_state;
1548
1549
vctx->base.bind_vs_state = virgl_bind_vs_state;
1550
vctx->base.bind_tcs_state = virgl_bind_tcs_state;
1551
vctx->base.bind_tes_state = virgl_bind_tes_state;
1552
vctx->base.bind_gs_state = virgl_bind_gs_state;
1553
vctx->base.bind_fs_state = virgl_bind_fs_state;
1554
1555
vctx->base.delete_vs_state = virgl_delete_vs_state;
1556
vctx->base.delete_tcs_state = virgl_delete_tcs_state;
1557
vctx->base.delete_tes_state = virgl_delete_tes_state;
1558
vctx->base.delete_gs_state = virgl_delete_gs_state;
1559
vctx->base.delete_fs_state = virgl_delete_fs_state;
1560
1561
vctx->base.create_compute_state = virgl_create_compute_state;
1562
vctx->base.bind_compute_state = virgl_bind_compute_state;
1563
vctx->base.delete_compute_state = virgl_delete_compute_state;
1564
vctx->base.launch_grid = virgl_launch_grid;
1565
1566
vctx->base.clear = virgl_clear;
1567
vctx->base.clear_texture = virgl_clear_texture;
1568
vctx->base.draw_vbo = virgl_draw_vbo;
1569
vctx->base.flush = virgl_flush_from_st;
1570
vctx->base.screen = pscreen;
1571
vctx->base.create_sampler_view = virgl_create_sampler_view;
1572
vctx->base.sampler_view_destroy = virgl_destroy_sampler_view;
1573
vctx->base.set_sampler_views = virgl_set_sampler_views;
1574
vctx->base.texture_barrier = virgl_texture_barrier;
1575
1576
vctx->base.create_sampler_state = virgl_create_sampler_state;
1577
vctx->base.delete_sampler_state = virgl_delete_sampler_state;
1578
vctx->base.bind_sampler_states = virgl_bind_sampler_states;
1579
1580
vctx->base.set_polygon_stipple = virgl_set_polygon_stipple;
1581
vctx->base.set_scissor_states = virgl_set_scissor_states;
1582
vctx->base.set_sample_mask = virgl_set_sample_mask;
1583
vctx->base.set_min_samples = virgl_set_min_samples;
1584
vctx->base.set_stencil_ref = virgl_set_stencil_ref;
1585
vctx->base.set_clip_state = virgl_set_clip_state;
1586
1587
vctx->base.set_blend_color = virgl_set_blend_color;
1588
1589
vctx->base.get_sample_position = virgl_get_sample_position;
1590
1591
vctx->base.resource_copy_region = virgl_resource_copy_region;
1592
vctx->base.flush_resource = virgl_flush_resource;
1593
vctx->base.blit = virgl_blit;
1594
vctx->base.create_fence_fd = virgl_create_fence_fd;
1595
vctx->base.fence_server_sync = virgl_fence_server_sync;
1596
1597
vctx->base.set_shader_buffers = virgl_set_shader_buffers;
1598
vctx->base.set_hw_atomic_buffers = virgl_set_hw_atomic_buffers;
1599
vctx->base.set_shader_images = virgl_set_shader_images;
1600
vctx->base.memory_barrier = virgl_memory_barrier;
1601
vctx->base.emit_string_marker = virgl_emit_string_marker;
1602
1603
virgl_init_context_resource_functions(&vctx->base);
1604
virgl_init_query_functions(vctx);
1605
virgl_init_so_functions(vctx);
1606
1607
slab_create_child(&vctx->transfer_pool, &rs->transfer_pool);
1608
virgl_transfer_queue_init(&vctx->queue, vctx);
1609
vctx->encoded_transfers = (rs->vws->supports_encoded_transfers &&
1610
(rs->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFER));
1611
1612
/* Reserve some space for transfers. */
1613
if (vctx->encoded_transfers)
1614
vctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
1615
1616
vctx->primconvert = util_primconvert_create(&vctx->base, rs->caps.caps.v1.prim_mask);
1617
vctx->uploader = u_upload_create(&vctx->base, 1024 * 1024,
1618
PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STREAM, 0);
1619
if (!vctx->uploader)
1620
goto fail;
1621
vctx->base.stream_uploader = vctx->uploader;
1622
vctx->base.const_uploader = vctx->uploader;
1623
1624
/* We use a special staging buffer as the source of copy transfers. */
1625
if ((rs->caps.caps.v2.capability_bits & VIRGL_CAP_COPY_TRANSFER) &&
1626
vctx->encoded_transfers) {
1627
virgl_staging_init(&vctx->staging, &vctx->base, 1024 * 1024);
1628
vctx->supports_staging = true;
1629
}
1630
1631
vctx->hw_sub_ctx_id = p_atomic_inc_return(&rs->sub_ctx_id);
1632
virgl_encoder_create_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1633
1634
virgl_encoder_set_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1635
1636
if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_GUEST_MAY_INIT_LOG) {
1637
host_debug_flagstring = getenv("VIRGL_HOST_DEBUG");
1638
if (host_debug_flagstring)
1639
virgl_encode_host_debug_flagstring(vctx, host_debug_flagstring);
1640
}
1641
1642
if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_APP_TWEAK_SUPPORT)
1643
virgl_send_tweaks(vctx, rs);
1644
1645
return &vctx->base;
1646
fail:
1647
virgl_context_destroy(&vctx->base);
1648
return NULL;
1649
}
1650
1651