Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/winsys/virgl/drm/virgl_drm_winsys.c
4566 views
1
/*
2
* Copyright 2014, 2015 Red Hat.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* on the rights to use, copy, modify, merge, publish, distribute, sub
8
* license, and/or sell copies of the Software, and to permit persons to whom
9
* the Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21
* USE OR OTHER DEALINGS IN THE SOFTWARE.
22
*/
23
24
#include <errno.h>
25
#include <fcntl.h>
26
#include <limits.h>
27
#include <stdio.h>
28
#include <sys/ioctl.h>
29
#include <sys/stat.h>
30
31
#include "os/os_mman.h"
32
#include "util/os_file.h"
33
#include "util/os_time.h"
34
#include "util/u_memory.h"
35
#include "util/format/u_format.h"
36
#include "util/u_hash_table.h"
37
#include "util/u_inlines.h"
38
#include "util/u_pointer.h"
39
#include "frontend/drm_driver.h"
40
#include "virgl/virgl_screen.h"
41
#include "virgl/virgl_public.h"
42
#include "virtio-gpu/virgl_protocol.h"
43
44
#include <xf86drm.h>
45
#include <libsync.h>
46
#include "drm-uapi/virtgpu_drm.h"
47
48
#include "virgl_drm_winsys.h"
49
#include "virgl_drm_public.h"
50
51
52
#define VIRGL_DRM_VERSION(major, minor) ((major) << 16 | (minor))
53
#define VIRGL_DRM_VERSION_FENCE_FD VIRGL_DRM_VERSION(0, 1)
54
55
/* Gets a pointer to the virgl_hw_res containing the pointed to cache entry. */
56
#define cache_entry_container_res(ptr) \
57
(struct virgl_hw_res*)((char*)ptr - offsetof(struct virgl_hw_res, cache_entry))
58
59
static inline boolean can_cache_resource(uint32_t bind)
60
{
61
return bind == VIRGL_BIND_CONSTANT_BUFFER ||
62
bind == VIRGL_BIND_INDEX_BUFFER ||
63
bind == VIRGL_BIND_VERTEX_BUFFER ||
64
bind == VIRGL_BIND_CUSTOM ||
65
bind == VIRGL_BIND_STAGING ||
66
bind == 0;
67
}
68
69
static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
70
struct virgl_hw_res *res)
71
{
72
struct drm_gem_close args;
73
74
mtx_lock(&qdws->bo_handles_mutex);
75
76
/* We intentionally avoid taking the lock in
77
* virgl_drm_resource_reference. Now that the
78
* lock is taken, we need to check the refcount
79
* again. */
80
if (pipe_is_referenced(&res->reference)) {
81
mtx_unlock(&qdws->bo_handles_mutex);
82
return;
83
}
84
85
_mesa_hash_table_remove_key(qdws->bo_handles,
86
(void *)(uintptr_t)res->bo_handle);
87
if (res->flink_name)
88
_mesa_hash_table_remove_key(qdws->bo_names,
89
(void *)(uintptr_t)res->flink_name);
90
mtx_unlock(&qdws->bo_handles_mutex);
91
if (res->ptr)
92
os_munmap(res->ptr, res->size);
93
94
memset(&args, 0, sizeof(args));
95
args.handle = res->bo_handle;
96
drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args);
97
FREE(res);
98
}
99
100
static boolean virgl_drm_resource_is_busy(struct virgl_winsys *vws,
101
struct virgl_hw_res *res)
102
{
103
struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
104
struct drm_virtgpu_3d_wait waitcmd;
105
int ret;
106
107
if (!p_atomic_read(&res->maybe_busy) && !p_atomic_read(&res->external))
108
return false;
109
110
memset(&waitcmd, 0, sizeof(waitcmd));
111
waitcmd.handle = res->bo_handle;
112
waitcmd.flags = VIRTGPU_WAIT_NOWAIT;
113
114
ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
115
if (ret && errno == EBUSY)
116
return TRUE;
117
118
p_atomic_set(&res->maybe_busy, false);
119
120
return FALSE;
121
}
122
123
static void
124
virgl_drm_winsys_destroy(struct virgl_winsys *qws)
125
{
126
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
127
128
virgl_resource_cache_flush(&qdws->cache);
129
130
_mesa_hash_table_destroy(qdws->bo_handles, NULL);
131
_mesa_hash_table_destroy(qdws->bo_names, NULL);
132
mtx_destroy(&qdws->bo_handles_mutex);
133
mtx_destroy(&qdws->mutex);
134
135
FREE(qdws);
136
}
137
138
static void virgl_drm_resource_reference(struct virgl_winsys *qws,
139
struct virgl_hw_res **dres,
140
struct virgl_hw_res *sres)
141
{
142
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
143
struct virgl_hw_res *old = *dres;
144
145
if (pipe_reference(&(*dres)->reference, &sres->reference)) {
146
147
if (!can_cache_resource(old->bind) ||
148
p_atomic_read(&old->external)) {
149
virgl_hw_res_destroy(qdws, old);
150
} else {
151
mtx_lock(&qdws->mutex);
152
virgl_resource_cache_add(&qdws->cache, &old->cache_entry);
153
mtx_unlock(&qdws->mutex);
154
}
155
}
156
*dres = sres;
157
}
158
159
static struct virgl_hw_res *
160
virgl_drm_winsys_resource_create_blob(struct virgl_winsys *qws,
161
enum pipe_texture_target target,
162
uint32_t format,
163
uint32_t bind,
164
uint32_t width,
165
uint32_t height,
166
uint32_t depth,
167
uint32_t array_size,
168
uint32_t last_level,
169
uint32_t nr_samples,
170
uint32_t flags,
171
uint32_t size)
172
{
173
int ret;
174
int32_t blob_id;
175
uint32_t cmd[VIRGL_PIPE_RES_CREATE_SIZE + 1] = { 0 };
176
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
177
struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
178
struct virgl_hw_res *res;
179
180
res = CALLOC_STRUCT(virgl_hw_res);
181
if (!res)
182
return NULL;
183
184
/* Make sure blob is page aligned. */
185
if (flags & (VIRGL_RESOURCE_FLAG_MAP_PERSISTENT |
186
VIRGL_RESOURCE_FLAG_MAP_COHERENT)) {
187
width = ALIGN(width, getpagesize());
188
size = ALIGN(size, getpagesize());
189
}
190
191
blob_id = p_atomic_inc_return(&qdws->blob_id);
192
cmd[0] = VIRGL_CMD0(VIRGL_CCMD_PIPE_RESOURCE_CREATE, 0, VIRGL_PIPE_RES_CREATE_SIZE);
193
cmd[VIRGL_PIPE_RES_CREATE_FORMAT] = format;
194
cmd[VIRGL_PIPE_RES_CREATE_BIND] = bind;
195
cmd[VIRGL_PIPE_RES_CREATE_TARGET] = target;
196
cmd[VIRGL_PIPE_RES_CREATE_WIDTH] = width;
197
cmd[VIRGL_PIPE_RES_CREATE_HEIGHT] = height;
198
cmd[VIRGL_PIPE_RES_CREATE_DEPTH] = depth;
199
cmd[VIRGL_PIPE_RES_CREATE_ARRAY_SIZE] = array_size;
200
cmd[VIRGL_PIPE_RES_CREATE_LAST_LEVEL] = last_level;
201
cmd[VIRGL_PIPE_RES_CREATE_NR_SAMPLES] = nr_samples;
202
cmd[VIRGL_PIPE_RES_CREATE_FLAGS] = flags;
203
cmd[VIRGL_PIPE_RES_CREATE_BLOB_ID] = blob_id;
204
205
drm_rc_blob.cmd = (unsigned long)(void *)&cmd;
206
drm_rc_blob.cmd_size = 4 * (VIRGL_PIPE_RES_CREATE_SIZE + 1);
207
drm_rc_blob.size = size;
208
drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
209
drm_rc_blob.blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
210
drm_rc_blob.blob_id = (uint64_t) blob_id;
211
212
ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
213
if (ret != 0) {
214
FREE(res);
215
return NULL;
216
}
217
218
res->bind = bind;
219
res->res_handle = drm_rc_blob.res_handle;
220
res->bo_handle = drm_rc_blob.bo_handle;
221
res->size = size;
222
res->flags = flags;
223
res->maybe_untyped = false;
224
pipe_reference_init(&res->reference, 1);
225
p_atomic_set(&res->external, false);
226
p_atomic_set(&res->num_cs_references, 0);
227
virgl_resource_cache_entry_init(&res->cache_entry, size, bind, format,
228
flags);
229
return res;
230
}
231
232
static struct virgl_hw_res *
233
virgl_drm_winsys_resource_create(struct virgl_winsys *qws,
234
enum pipe_texture_target target,
235
uint32_t format,
236
uint32_t bind,
237
uint32_t width,
238
uint32_t height,
239
uint32_t depth,
240
uint32_t array_size,
241
uint32_t last_level,
242
uint32_t nr_samples,
243
uint32_t size,
244
bool for_fencing)
245
{
246
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
247
struct drm_virtgpu_resource_create createcmd;
248
int ret;
249
struct virgl_hw_res *res;
250
uint32_t stride = width * util_format_get_blocksize(format);
251
252
res = CALLOC_STRUCT(virgl_hw_res);
253
if (!res)
254
return NULL;
255
256
memset(&createcmd, 0, sizeof(createcmd));
257
createcmd.target = target;
258
createcmd.format = pipe_to_virgl_format(format);
259
createcmd.bind = bind;
260
createcmd.width = width;
261
createcmd.height = height;
262
createcmd.depth = depth;
263
createcmd.array_size = array_size;
264
createcmd.last_level = last_level;
265
createcmd.nr_samples = nr_samples;
266
createcmd.stride = stride;
267
createcmd.size = size;
268
269
ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &createcmd);
270
if (ret != 0) {
271
FREE(res);
272
return NULL;
273
}
274
275
res->bind = bind;
276
277
res->res_handle = createcmd.res_handle;
278
res->bo_handle = createcmd.bo_handle;
279
res->size = size;
280
res->target = target;
281
res->maybe_untyped = false;
282
pipe_reference_init(&res->reference, 1);
283
p_atomic_set(&res->external, false);
284
p_atomic_set(&res->num_cs_references, 0);
285
286
/* A newly created resource is considered busy by the kernel until the
287
* command is retired. But for our purposes, we can consider it idle
288
* unless it is used for fencing.
289
*/
290
p_atomic_set(&res->maybe_busy, for_fencing);
291
292
virgl_resource_cache_entry_init(&res->cache_entry, size, bind, format, 0);
293
294
return res;
295
}
296
297
/*
298
* Previously, with DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, all host resources had
299
* a guest memory shadow resource with size = stride * bpp. Virglrenderer
300
* would guess the stride implicitly when performing transfer operations, if
301
* the stride wasn't specified. Interestingly, vtest would specify the stride.
302
*
303
* Guessing the stride breaks down with YUV images, which may be imported into
304
* Mesa as 3R8 images. It also doesn't work if an external allocator
305
* (i.e, minigbm) decides to use a stride not equal to stride * bpp. With blob
306
* resources, the size = stride * bpp restriction no longer holds, so use
307
* explicit strides passed into Mesa.
308
*/
309
static inline bool use_explicit_stride(struct virgl_hw_res *res, uint32_t level,
310
uint32_t depth)
311
{
312
return (params[param_resource_blob].value &&
313
res->blob_mem == VIRTGPU_BLOB_MEM_HOST3D_GUEST &&
314
res->target == PIPE_TEXTURE_2D &&
315
level == 0 && depth == 1);
316
}
317
318
static int
319
virgl_bo_transfer_put(struct virgl_winsys *vws,
320
struct virgl_hw_res *res,
321
const struct pipe_box *box,
322
uint32_t stride, uint32_t layer_stride,
323
uint32_t buf_offset, uint32_t level)
324
{
325
struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
326
struct drm_virtgpu_3d_transfer_to_host tohostcmd;
327
328
p_atomic_set(&res->maybe_busy, true);
329
330
memset(&tohostcmd, 0, sizeof(tohostcmd));
331
tohostcmd.bo_handle = res->bo_handle;
332
tohostcmd.box.x = box->x;
333
tohostcmd.box.y = box->y;
334
tohostcmd.box.z = box->z;
335
tohostcmd.box.w = box->width;
336
tohostcmd.box.h = box->height;
337
tohostcmd.box.d = box->depth;
338
tohostcmd.offset = buf_offset;
339
tohostcmd.level = level;
340
341
if (use_explicit_stride(res, level, box->depth))
342
tohostcmd.stride = stride;
343
344
return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &tohostcmd);
345
}
346
347
static int
348
virgl_bo_transfer_get(struct virgl_winsys *vws,
349
struct virgl_hw_res *res,
350
const struct pipe_box *box,
351
uint32_t stride, uint32_t layer_stride,
352
uint32_t buf_offset, uint32_t level)
353
{
354
struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
355
struct drm_virtgpu_3d_transfer_from_host fromhostcmd;
356
357
p_atomic_set(&res->maybe_busy, true);
358
359
memset(&fromhostcmd, 0, sizeof(fromhostcmd));
360
fromhostcmd.bo_handle = res->bo_handle;
361
fromhostcmd.level = level;
362
fromhostcmd.offset = buf_offset;
363
fromhostcmd.box.x = box->x;
364
fromhostcmd.box.y = box->y;
365
fromhostcmd.box.z = box->z;
366
fromhostcmd.box.w = box->width;
367
fromhostcmd.box.h = box->height;
368
fromhostcmd.box.d = box->depth;
369
370
if (use_explicit_stride(res, level, box->depth))
371
fromhostcmd.stride = stride;
372
373
return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &fromhostcmd);
374
}
375
376
static struct virgl_hw_res *
377
virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
378
enum pipe_texture_target target,
379
uint32_t format,
380
uint32_t bind,
381
uint32_t width,
382
uint32_t height,
383
uint32_t depth,
384
uint32_t array_size,
385
uint32_t last_level,
386
uint32_t nr_samples,
387
uint32_t flags,
388
uint32_t size)
389
{
390
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
391
struct virgl_hw_res *res;
392
struct virgl_resource_cache_entry *entry;
393
394
if (!can_cache_resource(bind))
395
goto alloc;
396
397
mtx_lock(&qdws->mutex);
398
399
entry = virgl_resource_cache_remove_compatible(&qdws->cache, size,
400
bind, format, flags);
401
if (entry) {
402
res = cache_entry_container_res(entry);
403
mtx_unlock(&qdws->mutex);
404
pipe_reference_init(&res->reference, 1);
405
return res;
406
}
407
408
mtx_unlock(&qdws->mutex);
409
410
alloc:
411
if (flags & (VIRGL_RESOURCE_FLAG_MAP_PERSISTENT |
412
VIRGL_RESOURCE_FLAG_MAP_COHERENT))
413
res = virgl_drm_winsys_resource_create_blob(qws, target, format, bind,
414
width, height, depth,
415
array_size, last_level,
416
nr_samples, flags, size);
417
else
418
res = virgl_drm_winsys_resource_create(qws, target, format, bind, width,
419
height, depth, array_size,
420
last_level, nr_samples, size,
421
false);
422
return res;
423
}
424
425
static struct virgl_hw_res *
426
virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
427
struct winsys_handle *whandle,
428
uint32_t *plane,
429
uint32_t *stride,
430
uint32_t *plane_offset,
431
uint64_t *modifier,
432
uint32_t *blob_mem)
433
{
434
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
435
struct drm_gem_open open_arg = {};
436
struct drm_virtgpu_resource_info info_arg = {};
437
struct virgl_hw_res *res = NULL;
438
uint32_t handle = whandle->handle;
439
440
if (whandle->plane >= VIRGL_MAX_PLANE_COUNT) {
441
return NULL;
442
}
443
444
if (whandle->offset != 0 && whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
445
_debug_printf("attempt to import unsupported winsys offset %u\n",
446
whandle->offset);
447
return NULL;
448
} else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
449
*plane = whandle->plane;
450
*stride = whandle->stride;
451
*plane_offset = whandle->offset;
452
*modifier = whandle->modifier;
453
}
454
455
mtx_lock(&qdws->bo_handles_mutex);
456
457
/* We must maintain a list of pairs <handle, bo>, so that we always return
458
* the same BO for one particular handle. If we didn't do that and created
459
* more than one BO for the same handle and then relocated them in a CS,
460
* we would hit a deadlock in the kernel.
461
*
462
* The list of pairs is guarded by a mutex, of course. */
463
if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
464
res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
465
} else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
466
int r;
467
r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
468
if (r)
469
goto done;
470
res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)handle);
471
} else {
472
/* Unknown handle type */
473
goto done;
474
}
475
476
if (res) {
477
/* qdws->bo_{names,handles} hold weak pointers to virgl_hw_res. Because
478
* virgl_drm_resource_reference does not take qdws->bo_handles_mutex
479
* until it enters virgl_hw_res_destroy, there is a small window that
480
* the refcount can drop to zero. Call p_atomic_inc directly instead of
481
* virgl_drm_resource_reference to avoid hitting assert failures.
482
*/
483
p_atomic_inc(&res->reference.count);
484
goto done;
485
}
486
487
res = CALLOC_STRUCT(virgl_hw_res);
488
if (!res)
489
goto done;
490
491
if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
492
res->bo_handle = handle;
493
} else {
494
memset(&open_arg, 0, sizeof(open_arg));
495
open_arg.name = whandle->handle;
496
if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
497
FREE(res);
498
res = NULL;
499
goto done;
500
}
501
res->bo_handle = open_arg.handle;
502
res->flink_name = whandle->handle;
503
}
504
505
memset(&info_arg, 0, sizeof(info_arg));
506
info_arg.bo_handle = res->bo_handle;
507
508
if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info_arg)) {
509
/* close */
510
FREE(res);
511
res = NULL;
512
goto done;
513
}
514
515
res->res_handle = info_arg.res_handle;
516
res->blob_mem = info_arg.blob_mem;
517
*blob_mem = info_arg.blob_mem;
518
519
res->size = info_arg.size;
520
res->maybe_untyped = info_arg.blob_mem ? true : false;
521
pipe_reference_init(&res->reference, 1);
522
p_atomic_set(&res->external, true);
523
res->num_cs_references = 0;
524
525
if (res->flink_name)
526
_mesa_hash_table_insert(qdws->bo_names, (void *)(uintptr_t)res->flink_name, res);
527
_mesa_hash_table_insert(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
528
529
done:
530
mtx_unlock(&qdws->bo_handles_mutex);
531
return res;
532
}
533
534
static void
535
virgl_drm_winsys_resource_set_type(struct virgl_winsys *qws,
536
struct virgl_hw_res *res,
537
uint32_t format, uint32_t bind,
538
uint32_t width, uint32_t height,
539
uint32_t usage, uint64_t modifier,
540
uint32_t plane_count,
541
const uint32_t *plane_strides,
542
const uint32_t *plane_offsets)
543
{
544
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
545
uint32_t cmd[VIRGL_PIPE_RES_SET_TYPE_SIZE(VIRGL_MAX_PLANE_COUNT)];
546
struct drm_virtgpu_execbuffer eb;
547
int ret;
548
549
mtx_lock(&qdws->bo_handles_mutex);
550
551
if (!res->maybe_untyped) {
552
mtx_unlock(&qdws->bo_handles_mutex);
553
return;
554
}
555
res->maybe_untyped = false;
556
557
assert(plane_count && plane_count <= VIRGL_MAX_PLANE_COUNT);
558
559
cmd[0] = VIRGL_CMD0(VIRGL_CCMD_PIPE_RESOURCE_SET_TYPE, 0, VIRGL_PIPE_RES_SET_TYPE_SIZE(plane_count));
560
cmd[VIRGL_PIPE_RES_SET_TYPE_RES_HANDLE] = res->res_handle,
561
cmd[VIRGL_PIPE_RES_SET_TYPE_FORMAT] = format;
562
cmd[VIRGL_PIPE_RES_SET_TYPE_BIND] = bind;
563
cmd[VIRGL_PIPE_RES_SET_TYPE_WIDTH] = width;
564
cmd[VIRGL_PIPE_RES_SET_TYPE_HEIGHT] = height;
565
cmd[VIRGL_PIPE_RES_SET_TYPE_USAGE] = usage;
566
cmd[VIRGL_PIPE_RES_SET_TYPE_MODIFIER_LO] = (uint32_t)modifier;
567
cmd[VIRGL_PIPE_RES_SET_TYPE_MODIFIER_HI] = (uint32_t)(modifier >> 32);
568
for (uint32_t i = 0; i < plane_count; i++) {
569
cmd[VIRGL_PIPE_RES_SET_TYPE_PLANE_STRIDE(i)] = plane_strides[i];
570
cmd[VIRGL_PIPE_RES_SET_TYPE_PLANE_OFFSET(i)] = plane_offsets[i];
571
}
572
573
memset(&eb, 0, sizeof(eb));
574
eb.command = (uintptr_t)cmd;
575
eb.size = (1 + VIRGL_PIPE_RES_SET_TYPE_SIZE(plane_count)) * 4;
576
eb.num_bo_handles = 1;
577
eb.bo_handles = (uintptr_t)&res->bo_handle;
578
579
ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
580
if (ret == -1)
581
_debug_printf("failed to set resource type: %s", strerror(errno));
582
583
mtx_unlock(&qdws->bo_handles_mutex);
584
}
585
586
static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
587
struct virgl_hw_res *res,
588
uint32_t stride,
589
struct winsys_handle *whandle)
590
{
591
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
592
struct drm_gem_flink flink;
593
594
if (!res)
595
return FALSE;
596
597
if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
598
if (!res->flink_name) {
599
memset(&flink, 0, sizeof(flink));
600
flink.handle = res->bo_handle;
601
602
if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
603
return FALSE;
604
}
605
res->flink_name = flink.name;
606
607
mtx_lock(&qdws->bo_handles_mutex);
608
_mesa_hash_table_insert(qdws->bo_names, (void *)(uintptr_t)res->flink_name, res);
609
mtx_unlock(&qdws->bo_handles_mutex);
610
}
611
whandle->handle = res->flink_name;
612
} else if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
613
whandle->handle = res->bo_handle;
614
} else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
615
if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
616
return FALSE;
617
mtx_lock(&qdws->bo_handles_mutex);
618
_mesa_hash_table_insert(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
619
mtx_unlock(&qdws->bo_handles_mutex);
620
}
621
622
p_atomic_set(&res->external, true);
623
624
whandle->stride = stride;
625
return TRUE;
626
}
627
628
static void *virgl_drm_resource_map(struct virgl_winsys *qws,
629
struct virgl_hw_res *res)
630
{
631
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
632
struct drm_virtgpu_map mmap_arg;
633
void *ptr;
634
635
if (res->ptr)
636
return res->ptr;
637
638
memset(&mmap_arg, 0, sizeof(mmap_arg));
639
mmap_arg.handle = res->bo_handle;
640
if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_MAP, &mmap_arg))
641
return NULL;
642
643
ptr = os_mmap(0, res->size, PROT_READ|PROT_WRITE, MAP_SHARED,
644
qdws->fd, mmap_arg.offset);
645
if (ptr == MAP_FAILED)
646
return NULL;
647
648
res->ptr = ptr;
649
return ptr;
650
651
}
652
653
static void virgl_drm_resource_wait(struct virgl_winsys *qws,
654
struct virgl_hw_res *res)
655
{
656
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
657
struct drm_virtgpu_3d_wait waitcmd;
658
int ret;
659
660
if (!p_atomic_read(&res->maybe_busy) && !p_atomic_read(&res->external))
661
return;
662
663
memset(&waitcmd, 0, sizeof(waitcmd));
664
waitcmd.handle = res->bo_handle;
665
666
ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
667
if (ret)
668
_debug_printf("waiting got error - %d, slow gpu or hang?\n", errno);
669
670
p_atomic_set(&res->maybe_busy, false);
671
}
672
673
static bool virgl_drm_alloc_res_list(struct virgl_drm_cmd_buf *cbuf,
674
int initial_size)
675
{
676
cbuf->nres = initial_size;
677
cbuf->cres = 0;
678
679
cbuf->res_bo = CALLOC(cbuf->nres, sizeof(struct virgl_hw_buf*));
680
if (!cbuf->res_bo)
681
return false;
682
683
cbuf->res_hlist = MALLOC(cbuf->nres * sizeof(uint32_t));
684
if (!cbuf->res_hlist) {
685
FREE(cbuf->res_bo);
686
return false;
687
}
688
689
return true;
690
}
691
692
static void virgl_drm_free_res_list(struct virgl_drm_cmd_buf *cbuf)
693
{
694
int i;
695
696
for (i = 0; i < cbuf->cres; i++) {
697
p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
698
virgl_drm_resource_reference(cbuf->ws, &cbuf->res_bo[i], NULL);
699
}
700
FREE(cbuf->res_hlist);
701
FREE(cbuf->res_bo);
702
}
703
704
static boolean virgl_drm_lookup_res(struct virgl_drm_cmd_buf *cbuf,
705
struct virgl_hw_res *res)
706
{
707
unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
708
int i;
709
710
if (cbuf->is_handle_added[hash]) {
711
i = cbuf->reloc_indices_hashlist[hash];
712
if (cbuf->res_bo[i] == res)
713
return true;
714
715
for (i = 0; i < cbuf->cres; i++) {
716
if (cbuf->res_bo[i] == res) {
717
cbuf->reloc_indices_hashlist[hash] = i;
718
return true;
719
}
720
}
721
}
722
return false;
723
}
724
725
static void virgl_drm_add_res(struct virgl_drm_winsys *qdws,
726
struct virgl_drm_cmd_buf *cbuf,
727
struct virgl_hw_res *res)
728
{
729
unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
730
731
if (cbuf->cres >= cbuf->nres) {
732
unsigned new_nres = cbuf->nres + 256;
733
void *new_ptr = REALLOC(cbuf->res_bo,
734
cbuf->nres * sizeof(struct virgl_hw_buf*),
735
new_nres * sizeof(struct virgl_hw_buf*));
736
if (!new_ptr) {
737
_debug_printf("failure to add relocation %d, %d\n", cbuf->cres, new_nres);
738
return;
739
}
740
cbuf->res_bo = new_ptr;
741
742
new_ptr = REALLOC(cbuf->res_hlist,
743
cbuf->nres * sizeof(uint32_t),
744
new_nres * sizeof(uint32_t));
745
if (!new_ptr) {
746
_debug_printf("failure to add hlist relocation %d, %d\n", cbuf->cres, cbuf->nres);
747
return;
748
}
749
cbuf->res_hlist = new_ptr;
750
cbuf->nres = new_nres;
751
}
752
753
cbuf->res_bo[cbuf->cres] = NULL;
754
virgl_drm_resource_reference(&qdws->base, &cbuf->res_bo[cbuf->cres], res);
755
cbuf->res_hlist[cbuf->cres] = res->bo_handle;
756
cbuf->is_handle_added[hash] = TRUE;
757
758
cbuf->reloc_indices_hashlist[hash] = cbuf->cres;
759
p_atomic_inc(&res->num_cs_references);
760
cbuf->cres++;
761
}
762
763
/* This is called after the cbuf is submitted. */
764
static void virgl_drm_clear_res_list(struct virgl_drm_cmd_buf *cbuf)
765
{
766
int i;
767
768
for (i = 0; i < cbuf->cres; i++) {
769
/* mark all BOs busy after submission */
770
p_atomic_set(&cbuf->res_bo[i]->maybe_busy, true);
771
772
p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
773
virgl_drm_resource_reference(cbuf->ws, &cbuf->res_bo[i], NULL);
774
}
775
776
cbuf->cres = 0;
777
778
memset(cbuf->is_handle_added, 0, sizeof(cbuf->is_handle_added));
779
}
780
781
static void virgl_drm_emit_res(struct virgl_winsys *qws,
782
struct virgl_cmd_buf *_cbuf,
783
struct virgl_hw_res *res, boolean write_buf)
784
{
785
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
786
struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
787
boolean already_in_list = virgl_drm_lookup_res(cbuf, res);
788
789
if (write_buf)
790
cbuf->base.buf[cbuf->base.cdw++] = res->res_handle;
791
792
if (!already_in_list)
793
virgl_drm_add_res(qdws, cbuf, res);
794
}
795
796
static boolean virgl_drm_res_is_ref(struct virgl_winsys *qws,
797
struct virgl_cmd_buf *_cbuf,
798
struct virgl_hw_res *res)
799
{
800
if (!p_atomic_read(&res->num_cs_references))
801
return FALSE;
802
803
return TRUE;
804
}
805
806
static struct virgl_cmd_buf *virgl_drm_cmd_buf_create(struct virgl_winsys *qws,
807
uint32_t size)
808
{
809
struct virgl_drm_cmd_buf *cbuf;
810
811
cbuf = CALLOC_STRUCT(virgl_drm_cmd_buf);
812
if (!cbuf)
813
return NULL;
814
815
cbuf->ws = qws;
816
817
if (!virgl_drm_alloc_res_list(cbuf, 512)) {
818
FREE(cbuf);
819
return NULL;
820
}
821
822
cbuf->buf = CALLOC(size, sizeof(uint32_t));
823
if (!cbuf->buf) {
824
FREE(cbuf->res_hlist);
825
FREE(cbuf->res_bo);
826
FREE(cbuf);
827
return NULL;
828
}
829
830
cbuf->in_fence_fd = -1;
831
cbuf->base.buf = cbuf->buf;
832
return &cbuf->base;
833
}
834
835
static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf *_cbuf)
836
{
837
struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
838
839
virgl_drm_free_res_list(cbuf);
840
841
FREE(cbuf->buf);
842
FREE(cbuf);
843
}
844
845
static struct pipe_fence_handle *
846
virgl_drm_fence_create(struct virgl_winsys *vws, int fd, bool external)
847
{
848
struct virgl_drm_fence *fence;
849
850
assert(vws->supports_fences);
851
852
if (external) {
853
fd = os_dupfd_cloexec(fd);
854
if (fd < 0)
855
return NULL;
856
}
857
858
fence = CALLOC_STRUCT(virgl_drm_fence);
859
if (!fence) {
860
close(fd);
861
return NULL;
862
}
863
864
fence->fd = fd;
865
fence->external = external;
866
867
pipe_reference_init(&fence->reference, 1);
868
869
return (struct pipe_fence_handle *)fence;
870
}
871
872
static struct pipe_fence_handle *
873
virgl_drm_fence_create_legacy(struct virgl_winsys *vws)
874
{
875
struct virgl_drm_fence *fence;
876
877
assert(!vws->supports_fences);
878
879
fence = CALLOC_STRUCT(virgl_drm_fence);
880
if (!fence)
881
return NULL;
882
fence->fd = -1;
883
884
/* Resources for fences should not be from the cache, since we are basing
885
* the fence status on the resource creation busy status.
886
*/
887
fence->hw_res = virgl_drm_winsys_resource_create(vws, PIPE_BUFFER,
888
PIPE_FORMAT_R8_UNORM, VIRGL_BIND_CUSTOM, 8, 1, 1, 0, 0, 0, 8, true);
889
if (!fence->hw_res) {
890
FREE(fence);
891
return NULL;
892
}
893
894
pipe_reference_init(&fence->reference, 1);
895
896
return (struct pipe_fence_handle *)fence;
897
}
898
899
static int virgl_drm_winsys_submit_cmd(struct virgl_winsys *qws,
900
struct virgl_cmd_buf *_cbuf,
901
struct pipe_fence_handle **fence)
902
{
903
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
904
struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
905
struct drm_virtgpu_execbuffer eb;
906
int ret;
907
908
if (cbuf->base.cdw == 0)
909
return 0;
910
911
memset(&eb, 0, sizeof(struct drm_virtgpu_execbuffer));
912
eb.command = (unsigned long)(void*)cbuf->buf;
913
eb.size = cbuf->base.cdw * 4;
914
eb.num_bo_handles = cbuf->cres;
915
eb.bo_handles = (unsigned long)(void *)cbuf->res_hlist;
916
917
eb.fence_fd = -1;
918
if (qws->supports_fences) {
919
if (cbuf->in_fence_fd >= 0) {
920
eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_IN;
921
eb.fence_fd = cbuf->in_fence_fd;
922
}
923
924
if (fence != NULL)
925
eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_OUT;
926
} else {
927
assert(cbuf->in_fence_fd < 0);
928
}
929
930
ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
931
if (ret == -1)
932
_debug_printf("got error from kernel - expect bad rendering %d\n", errno);
933
cbuf->base.cdw = 0;
934
935
if (qws->supports_fences) {
936
if (cbuf->in_fence_fd >= 0) {
937
close(cbuf->in_fence_fd);
938
cbuf->in_fence_fd = -1;
939
}
940
941
if (fence != NULL && ret == 0)
942
*fence = virgl_drm_fence_create(qws, eb.fence_fd, false);
943
} else {
944
if (fence != NULL && ret == 0)
945
*fence = virgl_drm_fence_create_legacy(qws);
946
}
947
948
virgl_drm_clear_res_list(cbuf);
949
950
return ret;
951
}
952
953
static int virgl_drm_get_caps(struct virgl_winsys *vws,
954
struct virgl_drm_caps *caps)
955
{
956
struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
957
struct drm_virtgpu_get_caps args;
958
int ret;
959
960
virgl_ws_fill_new_caps_defaults(caps);
961
962
memset(&args, 0, sizeof(args));
963
if (params[param_capset_fix].value) {
964
/* if we have the query fix - try and get cap set id 2 first */
965
args.cap_set_id = 2;
966
args.size = sizeof(union virgl_caps);
967
} else {
968
args.cap_set_id = 1;
969
args.size = sizeof(struct virgl_caps_v1);
970
}
971
args.addr = (unsigned long)&caps->caps;
972
973
ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
974
if (ret == -1 && errno == EINVAL) {
975
/* Fallback to v1 */
976
args.cap_set_id = 1;
977
args.size = sizeof(struct virgl_caps_v1);
978
ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
979
if (ret == -1)
980
return ret;
981
}
982
return ret;
983
}
984
985
static struct pipe_fence_handle *
986
virgl_cs_create_fence(struct virgl_winsys *vws, int fd)
987
{
988
if (!vws->supports_fences)
989
return NULL;
990
991
return virgl_drm_fence_create(vws, fd, true);
992
}
993
994
static bool virgl_fence_wait(struct virgl_winsys *vws,
995
struct pipe_fence_handle *_fence,
996
uint64_t timeout)
997
{
998
struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
999
1000
if (vws->supports_fences) {
1001
uint64_t timeout_ms;
1002
int timeout_poll;
1003
1004
if (timeout == 0)
1005
return sync_wait(fence->fd, 0) == 0;
1006
1007
timeout_ms = timeout / 1000000;
1008
/* round up */
1009
if (timeout_ms * 1000000 < timeout)
1010
timeout_ms++;
1011
1012
timeout_poll = timeout_ms <= INT_MAX ? (int) timeout_ms : -1;
1013
1014
return sync_wait(fence->fd, timeout_poll) == 0;
1015
}
1016
1017
if (timeout == 0)
1018
return !virgl_drm_resource_is_busy(vws, fence->hw_res);
1019
1020
if (timeout != PIPE_TIMEOUT_INFINITE) {
1021
int64_t start_time = os_time_get();
1022
timeout /= 1000;
1023
while (virgl_drm_resource_is_busy(vws, fence->hw_res)) {
1024
if (os_time_get() - start_time >= timeout)
1025
return FALSE;
1026
os_time_sleep(10);
1027
}
1028
return TRUE;
1029
}
1030
virgl_drm_resource_wait(vws, fence->hw_res);
1031
1032
return TRUE;
1033
}
1034
1035
static void virgl_fence_reference(struct virgl_winsys *vws,
1036
struct pipe_fence_handle **dst,
1037
struct pipe_fence_handle *src)
1038
{
1039
struct virgl_drm_fence *dfence = virgl_drm_fence(*dst);
1040
struct virgl_drm_fence *sfence = virgl_drm_fence(src);
1041
1042
if (pipe_reference(&dfence->reference, &sfence->reference)) {
1043
if (vws->supports_fences) {
1044
close(dfence->fd);
1045
} else {
1046
virgl_drm_resource_reference(vws, &dfence->hw_res, NULL);
1047
}
1048
FREE(dfence);
1049
}
1050
1051
*dst = src;
1052
}
1053
1054
static void virgl_fence_server_sync(struct virgl_winsys *vws,
1055
struct virgl_cmd_buf *_cbuf,
1056
struct pipe_fence_handle *_fence)
1057
{
1058
struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
1059
struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
1060
1061
if (!vws->supports_fences)
1062
return;
1063
1064
/* if not an external fence, then nothing more to do without preemption: */
1065
if (!fence->external)
1066
return;
1067
1068
sync_accumulate("virgl", &cbuf->in_fence_fd, fence->fd);
1069
}
1070
1071
static int virgl_fence_get_fd(struct virgl_winsys *vws,
1072
struct pipe_fence_handle *_fence)
1073
{
1074
struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
1075
1076
if (!vws->supports_fences)
1077
return -1;
1078
1079
return os_dupfd_cloexec(fence->fd);
1080
}
1081
1082
static int virgl_drm_get_version(int fd)
1083
{
1084
int ret;
1085
drmVersionPtr version;
1086
1087
version = drmGetVersion(fd);
1088
1089
if (!version)
1090
ret = -EFAULT;
1091
else if (version->version_major != 0)
1092
ret = -EINVAL;
1093
else
1094
ret = VIRGL_DRM_VERSION(0, version->version_minor);
1095
1096
drmFreeVersion(version);
1097
1098
return ret;
1099
}
1100
1101
static bool
1102
virgl_drm_resource_cache_entry_is_busy(struct virgl_resource_cache_entry *entry,
1103
void *user_data)
1104
{
1105
struct virgl_drm_winsys *qdws = user_data;
1106
struct virgl_hw_res *res = cache_entry_container_res(entry);
1107
1108
return virgl_drm_resource_is_busy(&qdws->base, res);
1109
}
1110
1111
static void
1112
virgl_drm_resource_cache_entry_release(struct virgl_resource_cache_entry *entry,
1113
void *user_data)
1114
{
1115
struct virgl_drm_winsys *qdws = user_data;
1116
struct virgl_hw_res *res = cache_entry_container_res(entry);
1117
1118
virgl_hw_res_destroy(qdws, res);
1119
}
1120
1121
static struct virgl_winsys *
1122
virgl_drm_winsys_create(int drmFD)
1123
{
1124
static const unsigned CACHE_TIMEOUT_USEC = 1000000;
1125
struct virgl_drm_winsys *qdws;
1126
int drm_version;
1127
int ret;
1128
1129
for (uint32_t i = 0; i < ARRAY_SIZE(params); i++) {
1130
struct drm_virtgpu_getparam getparam = { 0 };
1131
uint64_t value = 0;
1132
getparam.param = params[i].param;
1133
getparam.value = (uint64_t)(uintptr_t)&value;
1134
ret = drmIoctl(drmFD, DRM_IOCTL_VIRTGPU_GETPARAM, &getparam);
1135
params[i].value = (ret == 0) ? value : 0;
1136
}
1137
1138
if (!params[param_3d_features].value)
1139
return NULL;
1140
1141
drm_version = virgl_drm_get_version(drmFD);
1142
if (drm_version < 0)
1143
return NULL;
1144
1145
qdws = CALLOC_STRUCT(virgl_drm_winsys);
1146
if (!qdws)
1147
return NULL;
1148
1149
qdws->fd = drmFD;
1150
virgl_resource_cache_init(&qdws->cache, CACHE_TIMEOUT_USEC,
1151
virgl_drm_resource_cache_entry_is_busy,
1152
virgl_drm_resource_cache_entry_release,
1153
qdws);
1154
(void) mtx_init(&qdws->mutex, mtx_plain);
1155
(void) mtx_init(&qdws->bo_handles_mutex, mtx_plain);
1156
p_atomic_set(&qdws->blob_id, 0);
1157
1158
qdws->bo_handles = util_hash_table_create_ptr_keys();
1159
qdws->bo_names = util_hash_table_create_ptr_keys();
1160
qdws->base.destroy = virgl_drm_winsys_destroy;
1161
1162
qdws->base.transfer_put = virgl_bo_transfer_put;
1163
qdws->base.transfer_get = virgl_bo_transfer_get;
1164
qdws->base.resource_create = virgl_drm_winsys_resource_cache_create;
1165
qdws->base.resource_reference = virgl_drm_resource_reference;
1166
qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle;
1167
qdws->base.resource_set_type = virgl_drm_winsys_resource_set_type;
1168
qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle;
1169
qdws->base.resource_map = virgl_drm_resource_map;
1170
qdws->base.resource_wait = virgl_drm_resource_wait;
1171
qdws->base.resource_is_busy = virgl_drm_resource_is_busy;
1172
qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create;
1173
qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy;
1174
qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
1175
qdws->base.emit_res = virgl_drm_emit_res;
1176
qdws->base.res_is_referenced = virgl_drm_res_is_ref;
1177
1178
qdws->base.cs_create_fence = virgl_cs_create_fence;
1179
qdws->base.fence_wait = virgl_fence_wait;
1180
qdws->base.fence_reference = virgl_fence_reference;
1181
qdws->base.fence_server_sync = virgl_fence_server_sync;
1182
qdws->base.fence_get_fd = virgl_fence_get_fd;
1183
qdws->base.get_caps = virgl_drm_get_caps;
1184
qdws->base.supports_fences = drm_version >= VIRGL_DRM_VERSION_FENCE_FD;
1185
qdws->base.supports_encoded_transfers = 1;
1186
1187
qdws->base.supports_coherent = params[param_resource_blob].value &&
1188
params[param_host_visible].value;
1189
return &qdws->base;
1190
1191
}
1192
1193
static struct hash_table *fd_tab = NULL;
1194
static mtx_t virgl_screen_mutex = _MTX_INITIALIZER_NP;
1195
1196
static void
1197
virgl_drm_screen_destroy(struct pipe_screen *pscreen)
1198
{
1199
struct virgl_screen *screen = virgl_screen(pscreen);
1200
boolean destroy;
1201
1202
mtx_lock(&virgl_screen_mutex);
1203
destroy = --screen->refcnt == 0;
1204
if (destroy) {
1205
int fd = virgl_drm_winsys(screen->vws)->fd;
1206
_mesa_hash_table_remove_key(fd_tab, intptr_to_pointer(fd));
1207
close(fd);
1208
}
1209
mtx_unlock(&virgl_screen_mutex);
1210
1211
if (destroy) {
1212
pscreen->destroy = screen->winsys_priv;
1213
pscreen->destroy(pscreen);
1214
}
1215
}
1216
1217
struct pipe_screen *
1218
virgl_drm_screen_create(int fd, const struct pipe_screen_config *config)
1219
{
1220
struct pipe_screen *pscreen = NULL;
1221
1222
mtx_lock(&virgl_screen_mutex);
1223
if (!fd_tab) {
1224
fd_tab = util_hash_table_create_fd_keys();
1225
if (!fd_tab)
1226
goto unlock;
1227
}
1228
1229
pscreen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
1230
if (pscreen) {
1231
virgl_screen(pscreen)->refcnt++;
1232
} else {
1233
struct virgl_winsys *vws;
1234
int dup_fd = os_dupfd_cloexec(fd);
1235
1236
vws = virgl_drm_winsys_create(dup_fd);
1237
if (!vws) {
1238
close(dup_fd);
1239
goto unlock;
1240
}
1241
1242
pscreen = virgl_create_screen(vws, config);
1243
if (pscreen) {
1244
_mesa_hash_table_insert(fd_tab, intptr_to_pointer(dup_fd), pscreen);
1245
1246
/* Bit of a hack, to avoid circular linkage dependency,
1247
* ie. pipe driver having to call in to winsys, we
1248
* override the pipe drivers screen->destroy():
1249
*/
1250
virgl_screen(pscreen)->winsys_priv = pscreen->destroy;
1251
pscreen->destroy = virgl_drm_screen_destroy;
1252
}
1253
}
1254
1255
unlock:
1256
mtx_unlock(&virgl_screen_mutex);
1257
return pscreen;
1258
}
1259
1260