Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/winsys/svga/drm/vmw_context.c
4573 views
1
/**********************************************************
2
* Copyright 2009-2015 VMware, Inc. All rights reserved.
3
*
4
* Permission is hereby granted, free of charge, to any person
5
* obtaining a copy of this software and associated documentation
6
* files (the "Software"), to deal in the Software without
7
* restriction, including without limitation the rights to use, copy,
8
* modify, merge, publish, distribute, sublicense, and/or sell copies
9
* of the Software, and to permit persons to whom the Software is
10
* furnished to do so, subject to the following conditions:
11
*
12
* The above copyright notice and this permission notice shall be
13
* included in all copies or substantial portions of the Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
* SOFTWARE.
23
*
24
**********************************************************/
25
26
27
#include "svga_cmd.h"
28
29
#include "util/u_debug.h"
30
#include "util/u_memory.h"
31
#include "util/u_debug_stack.h"
32
#include "util/u_debug_flush.h"
33
#include "util/u_hash_table.h"
34
#include "pipebuffer/pb_buffer.h"
35
#include "pipebuffer/pb_validate.h"
36
37
#include "svga_winsys.h"
38
#include "vmw_context.h"
39
#include "vmw_screen.h"
40
#include "vmw_buffer.h"
41
#include "vmw_surface.h"
42
#include "vmw_fence.h"
43
#include "vmw_shader.h"
44
#include "vmw_query.h"
45
46
#define VMW_COMMAND_SIZE (64*1024)
47
#define VMW_SURFACE_RELOCS (1024)
48
#define VMW_SHADER_RELOCS (1024)
49
#define VMW_REGION_RELOCS (512)
50
51
#define VMW_MUST_FLUSH_STACK 8
52
53
/*
54
* A factor applied to the maximum mob memory size to determine
55
* the optimial time to preemptively flush the command buffer.
56
* The constant is based on some performance trials with SpecViewperf.
57
*/
58
#define VMW_MAX_MOB_MEM_FACTOR 2
59
60
/*
61
* A factor applied to the maximum surface memory size to determine
62
* the optimial time to preemptively flush the command buffer.
63
* The constant is based on some performance trials with SpecViewperf.
64
*/
65
#define VMW_MAX_SURF_MEM_FACTOR 2
66
67
68
69
struct vmw_buffer_relocation
70
{
71
struct pb_buffer *buffer;
72
boolean is_mob;
73
uint32 offset;
74
75
union {
76
struct {
77
struct SVGAGuestPtr *where;
78
} region;
79
struct {
80
SVGAMobId *id;
81
uint32 *offset_into_mob;
82
} mob;
83
};
84
};
85
86
struct vmw_ctx_validate_item {
87
union {
88
struct vmw_svga_winsys_surface *vsurf;
89
struct vmw_svga_winsys_shader *vshader;
90
};
91
boolean referenced;
92
};
93
94
struct vmw_svga_winsys_context
95
{
96
struct svga_winsys_context base;
97
98
struct vmw_winsys_screen *vws;
99
struct hash_table *hash;
100
101
#ifdef DEBUG
102
boolean must_flush;
103
struct debug_stack_frame must_flush_stack[VMW_MUST_FLUSH_STACK];
104
struct debug_flush_ctx *fctx;
105
#endif
106
107
struct {
108
uint8_t buffer[VMW_COMMAND_SIZE];
109
uint32_t size;
110
uint32_t used;
111
uint32_t reserved;
112
} command;
113
114
struct {
115
struct vmw_ctx_validate_item items[VMW_SURFACE_RELOCS];
116
uint32_t size;
117
uint32_t used;
118
uint32_t staged;
119
uint32_t reserved;
120
} surface;
121
122
struct {
123
struct vmw_buffer_relocation relocs[VMW_REGION_RELOCS];
124
uint32_t size;
125
uint32_t used;
126
uint32_t staged;
127
uint32_t reserved;
128
} region;
129
130
struct {
131
struct vmw_ctx_validate_item items[VMW_SHADER_RELOCS];
132
uint32_t size;
133
uint32_t used;
134
uint32_t staged;
135
uint32_t reserved;
136
} shader;
137
138
struct pb_validate *validate;
139
140
/**
141
* The amount of surface, GMR or MOB memory that is referred by the commands
142
* currently batched in the context command buffer.
143
*/
144
uint64_t seen_surfaces;
145
uint64_t seen_regions;
146
uint64_t seen_mobs;
147
148
/**
149
* Whether this context should fail to reserve more commands, not because it
150
* ran out of command space, but because a substantial ammount of GMR was
151
* referred.
152
*/
153
boolean preemptive_flush;
154
};
155
156
157
static inline struct vmw_svga_winsys_context *
158
vmw_svga_winsys_context(struct svga_winsys_context *swc)
159
{
160
assert(swc);
161
return (struct vmw_svga_winsys_context *)swc;
162
}
163
164
165
static inline enum pb_usage_flags
166
vmw_translate_to_pb_flags(unsigned flags)
167
{
168
enum pb_usage_flags f = 0;
169
if (flags & SVGA_RELOC_READ)
170
f |= PB_USAGE_GPU_READ;
171
172
if (flags & SVGA_RELOC_WRITE)
173
f |= PB_USAGE_GPU_WRITE;
174
175
return f;
176
}
177
178
static enum pipe_error
179
vmw_swc_flush(struct svga_winsys_context *swc,
180
struct pipe_fence_handle **pfence)
181
{
182
struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
183
struct vmw_winsys_screen *vws = vswc->vws;
184
struct pipe_fence_handle *fence = NULL;
185
unsigned i;
186
enum pipe_error ret;
187
188
/*
189
* If we hit a retry, lock the mutex and retry immediately.
190
* If we then still hit a retry, sleep until another thread
191
* wakes us up after it has released its buffers from the
192
* validate list.
193
*
194
* If we hit another error condition, we still need to broadcast since
195
* pb_validate_validate releases validated buffers in its error path.
196
*/
197
198
ret = pb_validate_validate(vswc->validate);
199
if (ret != PIPE_OK) {
200
mtx_lock(&vws->cs_mutex);
201
while (ret == PIPE_ERROR_RETRY) {
202
ret = pb_validate_validate(vswc->validate);
203
if (ret == PIPE_ERROR_RETRY) {
204
cnd_wait(&vws->cs_cond, &vws->cs_mutex);
205
}
206
}
207
if (ret != PIPE_OK) {
208
cnd_broadcast(&vws->cs_cond);
209
}
210
mtx_unlock(&vws->cs_mutex);
211
}
212
213
assert(ret == PIPE_OK);
214
if(ret == PIPE_OK) {
215
216
/* Apply relocations */
217
for(i = 0; i < vswc->region.used; ++i) {
218
struct vmw_buffer_relocation *reloc = &vswc->region.relocs[i];
219
struct SVGAGuestPtr ptr;
220
221
if(!vmw_gmr_bufmgr_region_ptr(reloc->buffer, &ptr))
222
assert(0);
223
224
ptr.offset += reloc->offset;
225
226
if (reloc->is_mob) {
227
if (reloc->mob.id)
228
*reloc->mob.id = ptr.gmrId;
229
if (reloc->mob.offset_into_mob)
230
*reloc->mob.offset_into_mob = ptr.offset;
231
else {
232
assert(ptr.offset == 0);
233
}
234
} else
235
*reloc->region.where = ptr;
236
}
237
238
if (vswc->command.used || pfence != NULL)
239
vmw_ioctl_command(vws,
240
vswc->base.cid,
241
0,
242
vswc->command.buffer,
243
vswc->command.used,
244
&fence,
245
vswc->base.imported_fence_fd,
246
vswc->base.hints);
247
248
pb_validate_fence(vswc->validate, fence);
249
mtx_lock(&vws->cs_mutex);
250
cnd_broadcast(&vws->cs_cond);
251
mtx_unlock(&vws->cs_mutex);
252
}
253
254
vswc->command.used = 0;
255
vswc->command.reserved = 0;
256
257
for(i = 0; i < vswc->surface.used + vswc->surface.staged; ++i) {
258
struct vmw_ctx_validate_item *isurf = &vswc->surface.items[i];
259
if (isurf->referenced)
260
p_atomic_dec(&isurf->vsurf->validated);
261
vmw_svga_winsys_surface_reference(&isurf->vsurf, NULL);
262
}
263
264
_mesa_hash_table_clear(vswc->hash, NULL);
265
vswc->surface.used = 0;
266
vswc->surface.reserved = 0;
267
268
for(i = 0; i < vswc->shader.used + vswc->shader.staged; ++i) {
269
struct vmw_ctx_validate_item *ishader = &vswc->shader.items[i];
270
if (ishader->referenced)
271
p_atomic_dec(&ishader->vshader->validated);
272
vmw_svga_winsys_shader_reference(&ishader->vshader, NULL);
273
}
274
275
vswc->shader.used = 0;
276
vswc->shader.reserved = 0;
277
278
vswc->region.used = 0;
279
vswc->region.reserved = 0;
280
281
#ifdef DEBUG
282
vswc->must_flush = FALSE;
283
debug_flush_flush(vswc->fctx);
284
#endif
285
swc->hints &= ~SVGA_HINT_FLAG_CAN_PRE_FLUSH;
286
swc->hints &= ~SVGA_HINT_FLAG_EXPORT_FENCE_FD;
287
vswc->preemptive_flush = FALSE;
288
vswc->seen_surfaces = 0;
289
vswc->seen_regions = 0;
290
vswc->seen_mobs = 0;
291
292
if (vswc->base.imported_fence_fd != -1) {
293
close(vswc->base.imported_fence_fd);
294
vswc->base.imported_fence_fd = -1;
295
}
296
297
if(pfence)
298
vmw_fence_reference(vswc->vws, pfence, fence);
299
300
vmw_fence_reference(vswc->vws, &fence, NULL);
301
302
return ret;
303
}
304
305
306
static void *
307
vmw_swc_reserve(struct svga_winsys_context *swc,
308
uint32_t nr_bytes, uint32_t nr_relocs )
309
{
310
struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
311
312
#ifdef DEBUG
313
/* Check if somebody forgot to check the previous failure */
314
if(vswc->must_flush) {
315
debug_printf("Forgot to flush:\n");
316
debug_backtrace_dump(vswc->must_flush_stack, VMW_MUST_FLUSH_STACK);
317
assert(!vswc->must_flush);
318
}
319
debug_flush_might_flush(vswc->fctx);
320
#endif
321
322
assert(nr_bytes <= vswc->command.size);
323
if(nr_bytes > vswc->command.size)
324
return NULL;
325
326
if(vswc->preemptive_flush ||
327
vswc->command.used + nr_bytes > vswc->command.size ||
328
vswc->surface.used + nr_relocs > vswc->surface.size ||
329
vswc->shader.used + nr_relocs > vswc->shader.size ||
330
vswc->region.used + nr_relocs > vswc->region.size) {
331
#ifdef DEBUG
332
vswc->must_flush = TRUE;
333
debug_backtrace_capture(vswc->must_flush_stack, 1,
334
VMW_MUST_FLUSH_STACK);
335
#endif
336
return NULL;
337
}
338
339
assert(vswc->command.used + nr_bytes <= vswc->command.size);
340
assert(vswc->surface.used + nr_relocs <= vswc->surface.size);
341
assert(vswc->shader.used + nr_relocs <= vswc->shader.size);
342
assert(vswc->region.used + nr_relocs <= vswc->region.size);
343
344
vswc->command.reserved = nr_bytes;
345
vswc->surface.reserved = nr_relocs;
346
vswc->surface.staged = 0;
347
vswc->shader.reserved = nr_relocs;
348
vswc->shader.staged = 0;
349
vswc->region.reserved = nr_relocs;
350
vswc->region.staged = 0;
351
352
return vswc->command.buffer + vswc->command.used;
353
}
354
355
static unsigned
356
vmw_swc_get_command_buffer_size(struct svga_winsys_context *swc)
357
{
358
const struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
359
return vswc->command.used;
360
}
361
362
static void
363
vmw_swc_context_relocation(struct svga_winsys_context *swc,
364
uint32 *cid)
365
{
366
*cid = swc->cid;
367
}
368
369
static boolean
370
vmw_swc_add_validate_buffer(struct vmw_svga_winsys_context *vswc,
371
struct pb_buffer *pb_buf,
372
unsigned flags)
373
{
374
ASSERTED enum pipe_error ret;
375
unsigned translated_flags;
376
boolean already_present;
377
378
translated_flags = vmw_translate_to_pb_flags(flags);
379
ret = pb_validate_add_buffer(vswc->validate, pb_buf, translated_flags,
380
vswc->hash, &already_present);
381
assert(ret == PIPE_OK);
382
return !already_present;
383
}
384
385
static void
386
vmw_swc_region_relocation(struct svga_winsys_context *swc,
387
struct SVGAGuestPtr *where,
388
struct svga_winsys_buffer *buffer,
389
uint32 offset,
390
unsigned flags)
391
{
392
struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
393
struct vmw_buffer_relocation *reloc;
394
395
assert(vswc->region.staged < vswc->region.reserved);
396
397
reloc = &vswc->region.relocs[vswc->region.used + vswc->region.staged];
398
reloc->region.where = where;
399
400
/*
401
* pb_validate holds a refcount to the buffer, so no need to
402
* refcount it again in the relocation.
403
*/
404
reloc->buffer = vmw_pb_buffer(buffer);
405
reloc->offset = offset;
406
reloc->is_mob = FALSE;
407
++vswc->region.staged;
408
409
if (vmw_swc_add_validate_buffer(vswc, reloc->buffer, flags)) {
410
vswc->seen_regions += reloc->buffer->size;
411
if ((swc->hints & SVGA_HINT_FLAG_CAN_PRE_FLUSH) &&
412
vswc->seen_regions >= VMW_GMR_POOL_SIZE/5)
413
vswc->preemptive_flush = TRUE;
414
}
415
416
#ifdef DEBUG
417
if (!(flags & SVGA_RELOC_INTERNAL))
418
debug_flush_cb_reference(vswc->fctx, vmw_debug_flush_buf(buffer));
419
#endif
420
}
421
422
static void
423
vmw_swc_mob_relocation(struct svga_winsys_context *swc,
424
SVGAMobId *id,
425
uint32 *offset_into_mob,
426
struct svga_winsys_buffer *buffer,
427
uint32 offset,
428
unsigned flags)
429
{
430
struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
431
struct vmw_buffer_relocation *reloc;
432
struct pb_buffer *pb_buffer = vmw_pb_buffer(buffer);
433
434
if (id) {
435
assert(vswc->region.staged < vswc->region.reserved);
436
437
reloc = &vswc->region.relocs[vswc->region.used + vswc->region.staged];
438
reloc->mob.id = id;
439
reloc->mob.offset_into_mob = offset_into_mob;
440
441
/*
442
* pb_validate holds a refcount to the buffer, so no need to
443
* refcount it again in the relocation.
444
*/
445
reloc->buffer = pb_buffer;
446
reloc->offset = offset;
447
reloc->is_mob = TRUE;
448
++vswc->region.staged;
449
}
450
451
if (vmw_swc_add_validate_buffer(vswc, pb_buffer, flags)) {
452
vswc->seen_mobs += pb_buffer->size;
453
454
if ((swc->hints & SVGA_HINT_FLAG_CAN_PRE_FLUSH) &&
455
vswc->seen_mobs >=
456
vswc->vws->ioctl.max_mob_memory / VMW_MAX_MOB_MEM_FACTOR)
457
vswc->preemptive_flush = TRUE;
458
}
459
460
#ifdef DEBUG
461
if (!(flags & SVGA_RELOC_INTERNAL))
462
debug_flush_cb_reference(vswc->fctx, vmw_debug_flush_buf(buffer));
463
#endif
464
}
465
466
467
/**
468
* vmw_swc_surface_clear_reference - Clear referenced info for a surface
469
*
470
* @swc: Pointer to an svga_winsys_context
471
* @vsurf: Pointer to a vmw_svga_winsys_surface, the referenced info of which
472
* we want to clear
473
*
474
* This is primarily used by a discard surface map to indicate that the
475
* surface data is no longer referenced by a draw call, and mapping it
476
* should therefore no longer cause a flush.
477
*/
478
void
479
vmw_swc_surface_clear_reference(struct svga_winsys_context *swc,
480
struct vmw_svga_winsys_surface *vsurf)
481
{
482
struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
483
struct vmw_ctx_validate_item *isrf =
484
util_hash_table_get(vswc->hash, vsurf);
485
486
if (isrf && isrf->referenced) {
487
isrf->referenced = FALSE;
488
p_atomic_dec(&vsurf->validated);
489
}
490
}
491
492
static void
493
vmw_swc_surface_only_relocation(struct svga_winsys_context *swc,
494
uint32 *where,
495
struct vmw_svga_winsys_surface *vsurf,
496
unsigned flags)
497
{
498
struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
499
struct vmw_ctx_validate_item *isrf;
500
501
assert(vswc->surface.staged < vswc->surface.reserved);
502
isrf = util_hash_table_get(vswc->hash, vsurf);
503
504
if (isrf == NULL) {
505
isrf = &vswc->surface.items[vswc->surface.used + vswc->surface.staged];
506
vmw_svga_winsys_surface_reference(&isrf->vsurf, vsurf);
507
isrf->referenced = FALSE;
508
509
_mesa_hash_table_insert(vswc->hash, vsurf, isrf);
510
++vswc->surface.staged;
511
512
vswc->seen_surfaces += vsurf->size;
513
if ((swc->hints & SVGA_HINT_FLAG_CAN_PRE_FLUSH) &&
514
vswc->seen_surfaces >=
515
vswc->vws->ioctl.max_surface_memory / VMW_MAX_SURF_MEM_FACTOR)
516
vswc->preemptive_flush = TRUE;
517
}
518
519
if (!(flags & SVGA_RELOC_INTERNAL) && !isrf->referenced) {
520
isrf->referenced = TRUE;
521
p_atomic_inc(&vsurf->validated);
522
}
523
524
if (where)
525
*where = vsurf->sid;
526
}
527
528
static void
529
vmw_swc_surface_relocation(struct svga_winsys_context *swc,
530
uint32 *where,
531
uint32 *mobid,
532
struct svga_winsys_surface *surface,
533
unsigned flags)
534
{
535
struct vmw_svga_winsys_surface *vsurf;
536
537
assert(swc->have_gb_objects || mobid == NULL);
538
539
if (!surface) {
540
*where = SVGA3D_INVALID_ID;
541
if (mobid)
542
*mobid = SVGA3D_INVALID_ID;
543
return;
544
}
545
546
vsurf = vmw_svga_winsys_surface(surface);
547
vmw_swc_surface_only_relocation(swc, where, vsurf, flags);
548
549
if (swc->have_gb_objects && vsurf->buf != NULL) {
550
551
/*
552
* Make sure backup buffer ends up fenced.
553
*/
554
555
mtx_lock(&vsurf->mutex);
556
assert(vsurf->buf != NULL);
557
558
/*
559
* An internal reloc means that the surface transfer direction
560
* is opposite to the MOB transfer direction...
561
*/
562
if ((flags & SVGA_RELOC_INTERNAL) &&
563
(flags & (SVGA_RELOC_READ | SVGA_RELOC_WRITE)) !=
564
(SVGA_RELOC_READ | SVGA_RELOC_WRITE))
565
flags ^= (SVGA_RELOC_READ | SVGA_RELOC_WRITE);
566
vmw_swc_mob_relocation(swc, mobid, NULL, (struct svga_winsys_buffer *)
567
vsurf->buf, 0, flags);
568
mtx_unlock(&vsurf->mutex);
569
}
570
}
571
572
static void
573
vmw_swc_shader_relocation(struct svga_winsys_context *swc,
574
uint32 *shid,
575
uint32 *mobid,
576
uint32 *offset,
577
struct svga_winsys_gb_shader *shader,
578
unsigned flags)
579
{
580
struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
581
struct vmw_winsys_screen *vws = vswc->vws;
582
struct vmw_svga_winsys_shader *vshader;
583
struct vmw_ctx_validate_item *ishader;
584
585
if(!shader) {
586
*shid = SVGA3D_INVALID_ID;
587
return;
588
}
589
590
vshader = vmw_svga_winsys_shader(shader);
591
592
if (!vws->base.have_vgpu10) {
593
assert(vswc->shader.staged < vswc->shader.reserved);
594
ishader = util_hash_table_get(vswc->hash, vshader);
595
596
if (ishader == NULL) {
597
ishader = &vswc->shader.items[vswc->shader.used + vswc->shader.staged];
598
vmw_svga_winsys_shader_reference(&ishader->vshader, vshader);
599
ishader->referenced = FALSE;
600
601
_mesa_hash_table_insert(vswc->hash, vshader, ishader);
602
++vswc->shader.staged;
603
}
604
605
if (!ishader->referenced) {
606
ishader->referenced = TRUE;
607
p_atomic_inc(&vshader->validated);
608
}
609
}
610
611
if (shid)
612
*shid = vshader->shid;
613
614
if (vshader->buf)
615
vmw_swc_mob_relocation(swc, mobid, offset, vshader->buf,
616
0, SVGA_RELOC_READ);
617
}
618
619
static void
620
vmw_swc_query_relocation(struct svga_winsys_context *swc,
621
SVGAMobId *id,
622
struct svga_winsys_gb_query *query)
623
{
624
/* Queries are backed by one big MOB */
625
vmw_swc_mob_relocation(swc, id, NULL, query->buf, 0,
626
SVGA_RELOC_READ | SVGA_RELOC_WRITE);
627
}
628
629
static void
630
vmw_swc_commit(struct svga_winsys_context *swc)
631
{
632
struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
633
634
assert(vswc->command.used + vswc->command.reserved <= vswc->command.size);
635
vswc->command.used += vswc->command.reserved;
636
vswc->command.reserved = 0;
637
638
assert(vswc->surface.staged <= vswc->surface.reserved);
639
assert(vswc->surface.used + vswc->surface.staged <= vswc->surface.size);
640
vswc->surface.used += vswc->surface.staged;
641
vswc->surface.staged = 0;
642
vswc->surface.reserved = 0;
643
644
assert(vswc->shader.staged <= vswc->shader.reserved);
645
assert(vswc->shader.used + vswc->shader.staged <= vswc->shader.size);
646
vswc->shader.used += vswc->shader.staged;
647
vswc->shader.staged = 0;
648
vswc->shader.reserved = 0;
649
650
assert(vswc->region.staged <= vswc->region.reserved);
651
assert(vswc->region.used + vswc->region.staged <= vswc->region.size);
652
vswc->region.used += vswc->region.staged;
653
vswc->region.staged = 0;
654
vswc->region.reserved = 0;
655
}
656
657
658
static void
659
vmw_swc_destroy(struct svga_winsys_context *swc)
660
{
661
struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
662
unsigned i;
663
664
for(i = 0; i < vswc->surface.used; ++i) {
665
struct vmw_ctx_validate_item *isurf = &vswc->surface.items[i];
666
if (isurf->referenced)
667
p_atomic_dec(&isurf->vsurf->validated);
668
vmw_svga_winsys_surface_reference(&isurf->vsurf, NULL);
669
}
670
671
for(i = 0; i < vswc->shader.used; ++i) {
672
struct vmw_ctx_validate_item *ishader = &vswc->shader.items[i];
673
if (ishader->referenced)
674
p_atomic_dec(&ishader->vshader->validated);
675
vmw_svga_winsys_shader_reference(&ishader->vshader, NULL);
676
}
677
678
_mesa_hash_table_destroy(vswc->hash, NULL);
679
pb_validate_destroy(vswc->validate);
680
vmw_ioctl_context_destroy(vswc->vws, swc->cid);
681
#ifdef DEBUG
682
debug_flush_ctx_destroy(vswc->fctx);
683
#endif
684
FREE(vswc);
685
}
686
687
/**
688
* vmw_svga_winsys_vgpu10_shader_screate - The winsys shader_crate callback
689
*
690
* @swc: The winsys context.
691
* @shaderId: Previously allocated shader id.
692
* @shaderType: The shader type.
693
* @bytecode: The shader bytecode
694
* @bytecodelen: The length of the bytecode.
695
*
696
* Creates an svga_winsys_gb_shader structure and allocates a buffer for the
697
* shader code and copies the shader code into the buffer. Shader
698
* resource creation is not done.
699
*/
700
static struct svga_winsys_gb_shader *
701
vmw_svga_winsys_vgpu10_shader_create(struct svga_winsys_context *swc,
702
uint32 shaderId,
703
SVGA3dShaderType shaderType,
704
const uint32 *bytecode,
705
uint32 bytecodeLen,
706
const SVGA3dDXShaderSignatureHeader *sgnInfo,
707
uint32 sgnLen)
708
{
709
struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
710
struct vmw_svga_winsys_shader *shader;
711
shader = vmw_svga_shader_create(&vswc->vws->base, shaderType, bytecode,
712
bytecodeLen, sgnInfo, sgnLen);
713
if (!shader)
714
return NULL;
715
716
shader->shid = shaderId;
717
return svga_winsys_shader(shader);
718
}
719
720
/**
721
* vmw_svga_winsys_vgpu10_shader_destroy - The winsys shader_destroy callback.
722
*
723
* @swc: The winsys context.
724
* @shader: A shader structure previously allocated by shader_create.
725
*
726
* Frees the shader structure and the buffer holding the shader code.
727
*/
728
static void
729
vmw_svga_winsys_vgpu10_shader_destroy(struct svga_winsys_context *swc,
730
struct svga_winsys_gb_shader *shader)
731
{
732
struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
733
734
vmw_svga_winsys_shader_destroy(&vswc->vws->base, shader);
735
}
736
737
/**
738
* vmw_svga_winsys_resource_rebind - The winsys resource_rebind callback
739
*
740
* @swc: The winsys context.
741
* @surface: The surface to be referenced.
742
* @shader: The shader to be referenced.
743
* @flags: Relocation flags.
744
*
745
* This callback is needed because shader backing buffers are sub-allocated, and
746
* hence the kernel fencing is not sufficient. The buffers need to be put on
747
* the context's validation list and fenced after command submission to avoid
748
* reuse of busy shader buffers. In addition, surfaces need to be put on the
749
* validation list in order for the driver to regard them as referenced
750
* by the command stream.
751
*/
752
static enum pipe_error
753
vmw_svga_winsys_resource_rebind(struct svga_winsys_context *swc,
754
struct svga_winsys_surface *surface,
755
struct svga_winsys_gb_shader *shader,
756
unsigned flags)
757
{
758
/**
759
* Need to reserve one validation item for either the surface or
760
* the shader.
761
*/
762
if (!vmw_swc_reserve(swc, 0, 1))
763
return PIPE_ERROR_OUT_OF_MEMORY;
764
765
if (surface)
766
vmw_swc_surface_relocation(swc, NULL, NULL, surface, flags);
767
else if (shader)
768
vmw_swc_shader_relocation(swc, NULL, NULL, NULL, shader, flags);
769
770
vmw_swc_commit(swc);
771
772
return PIPE_OK;
773
}
774
775
struct svga_winsys_context *
776
vmw_svga_winsys_context_create(struct svga_winsys_screen *sws)
777
{
778
struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
779
struct vmw_svga_winsys_context *vswc;
780
781
vswc = CALLOC_STRUCT(vmw_svga_winsys_context);
782
if(!vswc)
783
return NULL;
784
785
vswc->base.destroy = vmw_swc_destroy;
786
vswc->base.reserve = vmw_swc_reserve;
787
vswc->base.get_command_buffer_size = vmw_swc_get_command_buffer_size;
788
vswc->base.surface_relocation = vmw_swc_surface_relocation;
789
vswc->base.region_relocation = vmw_swc_region_relocation;
790
vswc->base.mob_relocation = vmw_swc_mob_relocation;
791
vswc->base.query_relocation = vmw_swc_query_relocation;
792
vswc->base.query_bind = vmw_swc_query_bind;
793
vswc->base.context_relocation = vmw_swc_context_relocation;
794
vswc->base.shader_relocation = vmw_swc_shader_relocation;
795
vswc->base.commit = vmw_swc_commit;
796
vswc->base.flush = vmw_swc_flush;
797
vswc->base.surface_map = vmw_svga_winsys_surface_map;
798
vswc->base.surface_unmap = vmw_svga_winsys_surface_unmap;
799
800
vswc->base.shader_create = vmw_svga_winsys_vgpu10_shader_create;
801
vswc->base.shader_destroy = vmw_svga_winsys_vgpu10_shader_destroy;
802
803
vswc->base.resource_rebind = vmw_svga_winsys_resource_rebind;
804
805
if (sws->have_vgpu10)
806
vswc->base.cid = vmw_ioctl_extended_context_create(vws, sws->have_vgpu10);
807
else
808
vswc->base.cid = vmw_ioctl_context_create(vws);
809
810
if (vswc->base.cid == -1)
811
goto out_no_context;
812
813
vswc->base.imported_fence_fd = -1;
814
815
vswc->base.have_gb_objects = sws->have_gb_objects;
816
817
vswc->vws = vws;
818
819
vswc->command.size = VMW_COMMAND_SIZE;
820
vswc->surface.size = VMW_SURFACE_RELOCS;
821
vswc->shader.size = VMW_SHADER_RELOCS;
822
vswc->region.size = VMW_REGION_RELOCS;
823
824
vswc->validate = pb_validate_create();
825
if(!vswc->validate)
826
goto out_no_validate;
827
828
vswc->hash = util_hash_table_create_ptr_keys();
829
if (!vswc->hash)
830
goto out_no_hash;
831
832
#ifdef DEBUG
833
vswc->fctx = debug_flush_ctx_create(TRUE, VMW_DEBUG_FLUSH_STACK);
834
#endif
835
836
vswc->base.force_coherent = vws->force_coherent;
837
return &vswc->base;
838
839
out_no_hash:
840
pb_validate_destroy(vswc->validate);
841
out_no_validate:
842
vmw_ioctl_context_destroy(vws, vswc->base.cid);
843
out_no_context:
844
FREE(vswc);
845
return NULL;
846
}
847
848