Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/virtio/vulkan/vn_renderer.h
4560 views
1
/*
2
* Copyright 2019 Google LLC
3
* SPDX-License-Identifier: MIT
4
*/
5
6
#ifndef VN_RENDERER_H
7
#define VN_RENDERER_H
8
9
#include "vn_common.h"
10
11
struct vn_renderer_shmem {
12
atomic_int refcount;
13
uint32_t res_id;
14
size_t mmap_size; /* for internal use only (i.e., munmap) */
15
void *mmap_ptr;
16
};
17
18
struct vn_renderer_bo {
19
atomic_int refcount;
20
uint32_t res_id;
21
/* for internal use only */
22
size_t mmap_size;
23
void *mmap_ptr;
24
};
25
26
/*
27
* A sync consists of a uint64_t counter. The counter can be updated by CPU
28
* or by GPU. It can also be waited on by CPU or by GPU until it reaches
29
* certain values.
30
*
31
* This models after timeline VkSemaphore rather than timeline drm_syncobj.
32
* The main difference is that drm_syncobj can have unsignaled value 0.
33
*/
34
struct vn_renderer_sync {
35
uint32_t sync_id;
36
};
37
38
struct vn_renderer_info {
39
struct {
40
uint16_t vendor_id;
41
uint16_t device_id;
42
43
bool has_bus_info;
44
uint16_t domain;
45
uint8_t bus;
46
uint8_t device;
47
uint8_t function;
48
} pci;
49
50
bool has_dma_buf_import;
51
bool has_cache_management;
52
bool has_external_sync;
53
bool has_implicit_fencing;
54
55
uint32_t max_sync_queue_count;
56
57
/* hw capset */
58
uint32_t wire_format_version;
59
uint32_t vk_xml_version;
60
uint32_t vk_ext_command_serialization_spec_version;
61
uint32_t vk_mesa_venus_protocol_spec_version;
62
};
63
64
struct vn_renderer_submit_batch {
65
const void *cs_data;
66
size_t cs_size;
67
68
/*
69
* Submit cs to the virtual sync queue identified by sync_queue_index. The
70
* virtual queue is assumed to be associated with the physical VkQueue
71
* identified by vk_queue_id. After the execution completes on the
72
* VkQueue, the virtual sync queue is signaled.
73
*
74
* sync_queue_index must be less than max_sync_queue_count.
75
*
76
* vk_queue_id specifies the object id of a VkQueue.
77
*
78
* When sync_queue_cpu is true, it specifies the special CPU sync queue,
79
* and sync_queue_index/vk_queue_id are ignored. TODO revisit this later
80
*/
81
uint32_t sync_queue_index;
82
bool sync_queue_cpu;
83
vn_object_id vk_queue_id;
84
85
/* syncs to update when the virtual sync queue is signaled */
86
struct vn_renderer_sync *const *syncs;
87
/* TODO allow NULL when syncs are all binary? */
88
const uint64_t *sync_values;
89
uint32_t sync_count;
90
};
91
92
struct vn_renderer_submit {
93
/* BOs to pin and to fence implicitly
94
*
95
* TODO track all bos and automatically pin them. We don't do it yet
96
* because each vn_command_buffer owns a bo. We can probably make do by
97
* returning the bos to a bo cache and exclude bo cache from pinning.
98
*/
99
struct vn_renderer_bo *const *bos;
100
uint32_t bo_count;
101
102
const struct vn_renderer_submit_batch *batches;
103
uint32_t batch_count;
104
};
105
106
struct vn_renderer_wait {
107
bool wait_any;
108
uint64_t timeout;
109
110
struct vn_renderer_sync *const *syncs;
111
/* TODO allow NULL when syncs are all binary? */
112
const uint64_t *sync_values;
113
uint32_t sync_count;
114
};
115
116
struct vn_renderer_ops {
117
void (*destroy)(struct vn_renderer *renderer,
118
const VkAllocationCallbacks *alloc);
119
120
void (*get_info)(struct vn_renderer *renderer,
121
struct vn_renderer_info *info);
122
123
VkResult (*submit)(struct vn_renderer *renderer,
124
const struct vn_renderer_submit *submit);
125
126
/*
127
* On success, returns VK_SUCCESS or VK_TIMEOUT. On failure, returns
128
* VK_ERROR_DEVICE_LOST or out of device/host memory.
129
*/
130
VkResult (*wait)(struct vn_renderer *renderer,
131
const struct vn_renderer_wait *wait);
132
};
133
134
struct vn_renderer_shmem_ops {
135
struct vn_renderer_shmem *(*create)(struct vn_renderer *renderer,
136
size_t size);
137
void (*destroy)(struct vn_renderer *renderer,
138
struct vn_renderer_shmem *shmem);
139
};
140
141
struct vn_renderer_bo_ops {
142
VkResult (*create_from_device_memory)(
143
struct vn_renderer *renderer,
144
VkDeviceSize size,
145
vn_object_id mem_id,
146
VkMemoryPropertyFlags flags,
147
VkExternalMemoryHandleTypeFlags external_handles,
148
struct vn_renderer_bo **out_bo);
149
150
VkResult (*create_from_dma_buf)(struct vn_renderer *renderer,
151
VkDeviceSize size,
152
int fd,
153
VkMemoryPropertyFlags flags,
154
struct vn_renderer_bo **out_bo);
155
156
bool (*destroy)(struct vn_renderer *renderer, struct vn_renderer_bo *bo);
157
158
int (*export_dma_buf)(struct vn_renderer *renderer,
159
struct vn_renderer_bo *bo);
160
161
/* map is not thread-safe */
162
void *(*map)(struct vn_renderer *renderer, struct vn_renderer_bo *bo);
163
164
void (*flush)(struct vn_renderer *renderer,
165
struct vn_renderer_bo *bo,
166
VkDeviceSize offset,
167
VkDeviceSize size);
168
void (*invalidate)(struct vn_renderer *renderer,
169
struct vn_renderer_bo *bo,
170
VkDeviceSize offset,
171
VkDeviceSize size);
172
};
173
174
enum vn_renderer_sync_flags {
175
VN_RENDERER_SYNC_SHAREABLE = 1u << 0,
176
VN_RENDERER_SYNC_BINARY = 1u << 1,
177
};
178
179
struct vn_renderer_sync_ops {
180
VkResult (*create)(struct vn_renderer *renderer,
181
uint64_t initial_val,
182
uint32_t flags,
183
struct vn_renderer_sync **out_sync);
184
185
VkResult (*create_from_syncobj)(struct vn_renderer *renderer,
186
int fd,
187
bool sync_file,
188
struct vn_renderer_sync **out_sync);
189
void (*destroy)(struct vn_renderer *renderer,
190
struct vn_renderer_sync *sync);
191
192
int (*export_syncobj)(struct vn_renderer *renderer,
193
struct vn_renderer_sync *sync,
194
bool sync_file);
195
196
/* reset the counter */
197
VkResult (*reset)(struct vn_renderer *renderer,
198
struct vn_renderer_sync *sync,
199
uint64_t initial_val);
200
201
/* read the current value from the counter */
202
VkResult (*read)(struct vn_renderer *renderer,
203
struct vn_renderer_sync *sync,
204
uint64_t *val);
205
206
/* write a new value (larger than the current one) to the counter */
207
VkResult (*write)(struct vn_renderer *renderer,
208
struct vn_renderer_sync *sync,
209
uint64_t val);
210
};
211
212
struct vn_renderer {
213
struct vn_renderer_ops ops;
214
struct vn_renderer_shmem_ops shmem_ops;
215
struct vn_renderer_bo_ops bo_ops;
216
struct vn_renderer_sync_ops sync_ops;
217
};
218
219
VkResult
220
vn_renderer_create_virtgpu(struct vn_instance *instance,
221
const VkAllocationCallbacks *alloc,
222
struct vn_renderer **renderer);
223
224
VkResult
225
vn_renderer_create_vtest(struct vn_instance *instance,
226
const VkAllocationCallbacks *alloc,
227
struct vn_renderer **renderer);
228
229
static inline VkResult
230
vn_renderer_create(struct vn_instance *instance,
231
const VkAllocationCallbacks *alloc,
232
struct vn_renderer **renderer)
233
{
234
if (VN_DEBUG(VTEST)) {
235
VkResult result = vn_renderer_create_vtest(instance, alloc, renderer);
236
if (result == VK_SUCCESS)
237
return VK_SUCCESS;
238
}
239
240
return vn_renderer_create_virtgpu(instance, alloc, renderer);
241
}
242
243
static inline void
244
vn_renderer_destroy(struct vn_renderer *renderer,
245
const VkAllocationCallbacks *alloc)
246
{
247
renderer->ops.destroy(renderer, alloc);
248
}
249
250
static inline void
251
vn_renderer_get_info(struct vn_renderer *renderer,
252
struct vn_renderer_info *info)
253
{
254
renderer->ops.get_info(renderer, info);
255
}
256
257
static inline VkResult
258
vn_renderer_submit(struct vn_renderer *renderer,
259
const struct vn_renderer_submit *submit)
260
{
261
return renderer->ops.submit(renderer, submit);
262
}
263
264
static inline VkResult
265
vn_renderer_submit_simple(struct vn_renderer *renderer,
266
const void *cs_data,
267
size_t cs_size)
268
{
269
const struct vn_renderer_submit submit = {
270
.batches =
271
&(const struct vn_renderer_submit_batch){
272
.cs_data = cs_data,
273
.cs_size = cs_size,
274
},
275
.batch_count = 1,
276
};
277
return vn_renderer_submit(renderer, &submit);
278
}
279
280
static inline VkResult
281
vn_renderer_wait(struct vn_renderer *renderer,
282
const struct vn_renderer_wait *wait)
283
{
284
return renderer->ops.wait(renderer, wait);
285
}
286
287
static inline struct vn_renderer_shmem *
288
vn_renderer_shmem_create(struct vn_renderer *renderer, size_t size)
289
{
290
struct vn_renderer_shmem *shmem =
291
renderer->shmem_ops.create(renderer, size);
292
if (shmem) {
293
assert(atomic_load(&shmem->refcount) == 1);
294
assert(shmem->res_id);
295
assert(shmem->mmap_size >= size);
296
assert(shmem->mmap_ptr);
297
}
298
299
return shmem;
300
}
301
302
static inline struct vn_renderer_shmem *
303
vn_renderer_shmem_ref(struct vn_renderer *renderer,
304
struct vn_renderer_shmem *shmem)
305
{
306
ASSERTED const int old =
307
atomic_fetch_add_explicit(&shmem->refcount, 1, memory_order_relaxed);
308
assert(old >= 1);
309
310
return shmem;
311
}
312
313
static inline void
314
vn_renderer_shmem_unref(struct vn_renderer *renderer,
315
struct vn_renderer_shmem *shmem)
316
{
317
const int old =
318
atomic_fetch_sub_explicit(&shmem->refcount, 1, memory_order_release);
319
assert(old >= 1);
320
321
if (old == 1) {
322
atomic_thread_fence(memory_order_acquire);
323
renderer->shmem_ops.destroy(renderer, shmem);
324
}
325
}
326
327
static inline VkResult
328
vn_renderer_bo_create_from_device_memory(
329
struct vn_renderer *renderer,
330
VkDeviceSize size,
331
vn_object_id mem_id,
332
VkMemoryPropertyFlags flags,
333
VkExternalMemoryHandleTypeFlags external_handles,
334
struct vn_renderer_bo **out_bo)
335
{
336
struct vn_renderer_bo *bo;
337
VkResult result = renderer->bo_ops.create_from_device_memory(
338
renderer, size, mem_id, flags, external_handles, &bo);
339
if (result != VK_SUCCESS)
340
return result;
341
342
assert(atomic_load(&bo->refcount) == 1);
343
assert(bo->res_id);
344
assert(!bo->mmap_size || bo->mmap_size >= size);
345
346
*out_bo = bo;
347
return VK_SUCCESS;
348
}
349
350
static inline VkResult
351
vn_renderer_bo_create_from_dma_buf(struct vn_renderer *renderer,
352
VkDeviceSize size,
353
int fd,
354
VkMemoryPropertyFlags flags,
355
struct vn_renderer_bo **out_bo)
356
{
357
struct vn_renderer_bo *bo;
358
VkResult result =
359
renderer->bo_ops.create_from_dma_buf(renderer, size, fd, flags, &bo);
360
if (result != VK_SUCCESS)
361
return result;
362
363
assert(atomic_load(&bo->refcount) >= 1);
364
assert(bo->res_id);
365
assert(!bo->mmap_size || bo->mmap_size >= size);
366
367
*out_bo = bo;
368
return VK_SUCCESS;
369
}
370
371
static inline struct vn_renderer_bo *
372
vn_renderer_bo_ref(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
373
{
374
ASSERTED const int old =
375
atomic_fetch_add_explicit(&bo->refcount, 1, memory_order_relaxed);
376
assert(old >= 1);
377
378
return bo;
379
}
380
381
static inline bool
382
vn_renderer_bo_unref(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
383
{
384
const int old =
385
atomic_fetch_sub_explicit(&bo->refcount, 1, memory_order_release);
386
assert(old >= 1);
387
388
if (old == 1) {
389
atomic_thread_fence(memory_order_acquire);
390
return renderer->bo_ops.destroy(renderer, bo);
391
}
392
393
return false;
394
}
395
396
static inline int
397
vn_renderer_bo_export_dma_buf(struct vn_renderer *renderer,
398
struct vn_renderer_bo *bo)
399
{
400
return renderer->bo_ops.export_dma_buf(renderer, bo);
401
}
402
403
static inline void *
404
vn_renderer_bo_map(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
405
{
406
return renderer->bo_ops.map(renderer, bo);
407
}
408
409
static inline void
410
vn_renderer_bo_flush(struct vn_renderer *renderer,
411
struct vn_renderer_bo *bo,
412
VkDeviceSize offset,
413
VkDeviceSize end)
414
{
415
renderer->bo_ops.flush(renderer, bo, offset, end);
416
}
417
418
static inline void
419
vn_renderer_bo_invalidate(struct vn_renderer *renderer,
420
struct vn_renderer_bo *bo,
421
VkDeviceSize offset,
422
VkDeviceSize size)
423
{
424
renderer->bo_ops.invalidate(renderer, bo, offset, size);
425
}
426
427
static inline VkResult
428
vn_renderer_sync_create(struct vn_renderer *renderer,
429
uint64_t initial_val,
430
uint32_t flags,
431
struct vn_renderer_sync **out_sync)
432
{
433
return renderer->sync_ops.create(renderer, initial_val, flags, out_sync);
434
}
435
436
static inline VkResult
437
vn_renderer_sync_create_from_syncobj(struct vn_renderer *renderer,
438
int fd,
439
bool sync_file,
440
struct vn_renderer_sync **out_sync)
441
{
442
return renderer->sync_ops.create_from_syncobj(renderer, fd, sync_file,
443
out_sync);
444
}
445
446
static inline void
447
vn_renderer_sync_destroy(struct vn_renderer *renderer,
448
struct vn_renderer_sync *sync)
449
{
450
renderer->sync_ops.destroy(renderer, sync);
451
}
452
453
static inline int
454
vn_renderer_sync_export_syncobj(struct vn_renderer *renderer,
455
struct vn_renderer_sync *sync,
456
bool sync_file)
457
{
458
return renderer->sync_ops.export_syncobj(renderer, sync, sync_file);
459
}
460
461
static inline VkResult
462
vn_renderer_sync_reset(struct vn_renderer *renderer,
463
struct vn_renderer_sync *sync,
464
uint64_t initial_val)
465
{
466
return renderer->sync_ops.reset(renderer, sync, initial_val);
467
}
468
469
static inline VkResult
470
vn_renderer_sync_read(struct vn_renderer *renderer,
471
struct vn_renderer_sync *sync,
472
uint64_t *val)
473
{
474
return renderer->sync_ops.read(renderer, sync, val);
475
}
476
477
static inline VkResult
478
vn_renderer_sync_write(struct vn_renderer *renderer,
479
struct vn_renderer_sync *sync,
480
uint64_t val)
481
{
482
return renderer->sync_ops.write(renderer, sync, val);
483
}
484
485
#endif /* VN_RENDERER_H */
486
487