Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/virtio/vulkan/vn_device_memory.c
4560 views
1
/*
2
* Copyright 2019 Google LLC
3
* SPDX-License-Identifier: MIT
4
*
5
* based in part on anv and radv which are:
6
* Copyright © 2015 Intel Corporation
7
* Copyright © 2016 Red Hat.
8
* Copyright © 2016 Bas Nieuwenhuizen
9
*/
10
11
#include "vn_device_memory.h"
12
13
#include "venus-protocol/vn_protocol_driver_device_memory.h"
14
#include "venus-protocol/vn_protocol_driver_transport.h"
15
16
#include "vn_android.h"
17
#include "vn_buffer.h"
18
#include "vn_device.h"
19
#include "vn_image.h"
20
21
/* device memory commands */
22
23
static VkResult
24
vn_device_memory_simple_alloc(struct vn_device *dev,
25
uint32_t mem_type_index,
26
VkDeviceSize size,
27
struct vn_device_memory **out_mem)
28
{
29
const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
30
31
struct vn_device_memory *mem =
32
vk_zalloc(alloc, sizeof(*mem), VN_DEFAULT_ALIGN,
33
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
34
if (!mem)
35
return VK_ERROR_OUT_OF_HOST_MEMORY;
36
37
vn_object_base_init(&mem->base, VK_OBJECT_TYPE_DEVICE_MEMORY, &dev->base);
38
mem->size = size;
39
40
VkDeviceMemory mem_handle = vn_device_memory_to_handle(mem);
41
VkResult result = vn_call_vkAllocateMemory(
42
dev->instance, vn_device_to_handle(dev),
43
&(const VkMemoryAllocateInfo){
44
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
45
.allocationSize = size,
46
.memoryTypeIndex = mem_type_index,
47
},
48
NULL, &mem_handle);
49
if (result != VK_SUCCESS) {
50
vk_free(alloc, mem);
51
return result;
52
}
53
54
const VkPhysicalDeviceMemoryProperties *mem_props =
55
&dev->physical_device->memory_properties.memoryProperties;
56
const VkMemoryType *mem_type = &mem_props->memoryTypes[mem_type_index];
57
result = vn_renderer_bo_create_from_device_memory(
58
dev->renderer, mem->size, mem->base.id, mem_type->propertyFlags, 0,
59
&mem->base_bo);
60
if (result != VK_SUCCESS) {
61
vn_async_vkFreeMemory(dev->instance, vn_device_to_handle(dev),
62
mem_handle, NULL);
63
vk_free(alloc, mem);
64
return result;
65
}
66
vn_instance_roundtrip(dev->instance);
67
68
*out_mem = mem;
69
70
return VK_SUCCESS;
71
}
72
73
static void
74
vn_device_memory_simple_free(struct vn_device *dev,
75
struct vn_device_memory *mem)
76
{
77
const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
78
79
if (mem->base_bo)
80
vn_renderer_bo_unref(dev->renderer, mem->base_bo);
81
82
vn_async_vkFreeMemory(dev->instance, vn_device_to_handle(dev),
83
vn_device_memory_to_handle(mem), NULL);
84
vn_object_base_fini(&mem->base);
85
vk_free(alloc, mem);
86
}
87
88
void
89
vn_device_memory_pool_fini(struct vn_device *dev, uint32_t mem_type_index)
90
{
91
struct vn_device_memory_pool *pool = &dev->memory_pools[mem_type_index];
92
if (pool->memory)
93
vn_device_memory_simple_free(dev, pool->memory);
94
mtx_destroy(&pool->mutex);
95
}
96
97
static VkResult
98
vn_device_memory_pool_grow_locked(struct vn_device *dev,
99
uint32_t mem_type_index,
100
VkDeviceSize size)
101
{
102
struct vn_device_memory *mem;
103
VkResult result =
104
vn_device_memory_simple_alloc(dev, mem_type_index, size, &mem);
105
if (result != VK_SUCCESS)
106
return result;
107
108
struct vn_device_memory_pool *pool = &dev->memory_pools[mem_type_index];
109
if (pool->memory) {
110
const bool bo_destroyed =
111
vn_renderer_bo_unref(dev->renderer, pool->memory->base_bo);
112
pool->memory->base_bo = NULL;
113
114
/* we use pool->memory's base_bo to keep it alive */
115
if (bo_destroyed)
116
vn_device_memory_simple_free(dev, pool->memory);
117
}
118
119
pool->memory = mem;
120
pool->used = 0;
121
122
return VK_SUCCESS;
123
}
124
125
static VkResult
126
vn_device_memory_pool_alloc(struct vn_device *dev,
127
uint32_t mem_type_index,
128
VkDeviceSize size,
129
struct vn_device_memory **base_mem,
130
struct vn_renderer_bo **base_bo,
131
VkDeviceSize *base_offset)
132
{
133
const VkDeviceSize pool_size = 16 * 1024 * 1024;
134
/* XXX We don't know the alignment requirement. We should probably use 64K
135
* because some GPUs have 64K pages.
136
*/
137
const VkDeviceSize pool_align = 4096;
138
struct vn_device_memory_pool *pool = &dev->memory_pools[mem_type_index];
139
140
assert(size <= pool_size);
141
142
mtx_lock(&pool->mutex);
143
144
if (!pool->memory || pool->used + size > pool_size) {
145
VkResult result =
146
vn_device_memory_pool_grow_locked(dev, mem_type_index, pool_size);
147
if (result != VK_SUCCESS) {
148
mtx_unlock(&pool->mutex);
149
return result;
150
}
151
}
152
153
/* we use base_bo to keep base_mem alive */
154
*base_mem = pool->memory;
155
*base_bo = vn_renderer_bo_ref(dev->renderer, pool->memory->base_bo);
156
157
*base_offset = pool->used;
158
pool->used += align64(size, pool_align);
159
160
mtx_unlock(&pool->mutex);
161
162
return VK_SUCCESS;
163
}
164
165
static void
166
vn_device_memory_pool_free(struct vn_device *dev,
167
struct vn_device_memory *base_mem,
168
struct vn_renderer_bo *base_bo)
169
{
170
/* we use base_bo to keep base_mem alive */
171
if (vn_renderer_bo_unref(dev->renderer, base_bo))
172
vn_device_memory_simple_free(dev, base_mem);
173
}
174
175
static bool
176
vn_device_memory_should_suballocate(const VkMemoryAllocateInfo *alloc_info,
177
const VkMemoryType *mem_type)
178
{
179
/* We should not support suballocations because apps can do better. But
180
* each BO takes up a KVM memslot currently and some CTS tests exhausts
181
* them. This might not be needed on newer (host) kernels where there are
182
* many more KVM memslots.
183
*/
184
185
/* consider host-visible memory only */
186
if (!(mem_type->propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))
187
return false;
188
189
/* reject larger allocations */
190
if (alloc_info->allocationSize > 64 * 1024)
191
return false;
192
193
/* reject if there is any pnext struct other than
194
* VkMemoryDedicatedAllocateInfo, or if dedicated allocation is required
195
*/
196
if (alloc_info->pNext) {
197
const VkMemoryDedicatedAllocateInfo *dedicated = alloc_info->pNext;
198
if (dedicated->sType !=
199
VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO ||
200
dedicated->pNext)
201
return false;
202
203
const struct vn_image *img = vn_image_from_handle(dedicated->image);
204
if (img) {
205
for (uint32_t i = 0; i < ARRAY_SIZE(img->dedicated_requirements);
206
i++) {
207
if (img->dedicated_requirements[i].requiresDedicatedAllocation)
208
return false;
209
}
210
}
211
212
const struct vn_buffer *buf = vn_buffer_from_handle(dedicated->buffer);
213
if (buf && buf->dedicated_requirements.requiresDedicatedAllocation)
214
return false;
215
}
216
217
return true;
218
}
219
220
VkResult
221
vn_device_memory_import_dma_buf(struct vn_device *dev,
222
struct vn_device_memory *mem,
223
const VkMemoryAllocateInfo *alloc_info,
224
int fd)
225
{
226
VkDevice device = vn_device_to_handle(dev);
227
VkDeviceMemory memory = vn_device_memory_to_handle(mem);
228
const VkPhysicalDeviceMemoryProperties *mem_props =
229
&dev->physical_device->memory_properties.memoryProperties;
230
const VkMemoryType *mem_type =
231
&mem_props->memoryTypes[alloc_info->memoryTypeIndex];
232
struct vn_renderer_bo *bo;
233
VkResult result = VK_SUCCESS;
234
235
result = vn_renderer_bo_create_from_dma_buf(dev->renderer,
236
alloc_info->allocationSize, fd,
237
mem_type->propertyFlags, &bo);
238
if (result != VK_SUCCESS)
239
return result;
240
241
vn_instance_roundtrip(dev->instance);
242
243
/* XXX fix VkImportMemoryResourceInfoMESA to support memory planes */
244
const VkImportMemoryResourceInfoMESA import_memory_resource_info = {
245
.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_RESOURCE_INFO_MESA,
246
.pNext = alloc_info->pNext,
247
.resourceId = bo->res_id,
248
};
249
const VkMemoryAllocateInfo memory_allocate_info = {
250
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
251
.pNext = &import_memory_resource_info,
252
.allocationSize = alloc_info->allocationSize,
253
.memoryTypeIndex = alloc_info->memoryTypeIndex,
254
};
255
result = vn_call_vkAllocateMemory(dev->instance, device,
256
&memory_allocate_info, NULL, &memory);
257
if (result != VK_SUCCESS) {
258
vn_renderer_bo_unref(dev->renderer, bo);
259
return result;
260
}
261
262
/* need to close import fd on success to avoid fd leak */
263
close(fd);
264
mem->base_bo = bo;
265
266
return VK_SUCCESS;
267
}
268
269
static VkResult
270
vn_device_memory_alloc(struct vn_device *dev,
271
struct vn_device_memory *mem,
272
const VkMemoryAllocateInfo *alloc_info,
273
bool need_bo,
274
VkMemoryPropertyFlags flags,
275
VkExternalMemoryHandleTypeFlags external_handles)
276
{
277
VkDevice dev_handle = vn_device_to_handle(dev);
278
VkDeviceMemory mem_handle = vn_device_memory_to_handle(mem);
279
VkResult result = vn_call_vkAllocateMemory(dev->instance, dev_handle,
280
alloc_info, NULL, &mem_handle);
281
if (result != VK_SUCCESS || !need_bo)
282
return result;
283
284
result = vn_renderer_bo_create_from_device_memory(
285
dev->renderer, mem->size, mem->base.id, flags, external_handles,
286
&mem->base_bo);
287
if (result != VK_SUCCESS) {
288
vn_async_vkFreeMemory(dev->instance, dev_handle, mem_handle, NULL);
289
return result;
290
}
291
292
vn_instance_roundtrip(dev->instance);
293
294
return VK_SUCCESS;
295
}
296
297
VkResult
298
vn_AllocateMemory(VkDevice device,
299
const VkMemoryAllocateInfo *pAllocateInfo,
300
const VkAllocationCallbacks *pAllocator,
301
VkDeviceMemory *pMemory)
302
{
303
struct vn_device *dev = vn_device_from_handle(device);
304
const VkAllocationCallbacks *alloc =
305
pAllocator ? pAllocator : &dev->base.base.alloc;
306
307
const VkPhysicalDeviceMemoryProperties *mem_props =
308
&dev->physical_device->memory_properties.memoryProperties;
309
const VkMemoryType *mem_type =
310
&mem_props->memoryTypes[pAllocateInfo->memoryTypeIndex];
311
312
const VkExportMemoryAllocateInfo *export_info = NULL;
313
const VkImportAndroidHardwareBufferInfoANDROID *import_ahb_info = NULL;
314
const VkImportMemoryFdInfoKHR *import_fd_info = NULL;
315
bool export_ahb = false;
316
317
vk_foreach_struct_const(pnext, pAllocateInfo->pNext) {
318
switch (pnext->sType) {
319
case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
320
export_info = (void *)pnext;
321
if (export_info->handleTypes &
322
VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)
323
export_ahb = true;
324
else if (!export_info->handleTypes)
325
export_info = NULL;
326
break;
327
case VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID:
328
import_ahb_info = (void *)pnext;
329
break;
330
case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
331
import_fd_info = (void *)pnext;
332
break;
333
default:
334
break;
335
}
336
}
337
338
struct vn_device_memory *mem =
339
vk_zalloc(alloc, sizeof(*mem), VN_DEFAULT_ALIGN,
340
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
341
if (!mem)
342
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
343
344
vn_object_base_init(&mem->base, VK_OBJECT_TYPE_DEVICE_MEMORY, &dev->base);
345
mem->size = pAllocateInfo->allocationSize;
346
347
VkDeviceMemory mem_handle = vn_device_memory_to_handle(mem);
348
VkResult result;
349
if (import_ahb_info) {
350
result = vn_android_device_import_ahb(dev, mem, pAllocateInfo, alloc,
351
import_ahb_info->buffer);
352
} else if (export_ahb) {
353
result = vn_android_device_allocate_ahb(dev, mem, pAllocateInfo, alloc);
354
} else if (import_fd_info) {
355
result = vn_device_memory_import_dma_buf(dev, mem, pAllocateInfo,
356
import_fd_info->fd);
357
} else if (export_info) {
358
result = vn_device_memory_alloc(dev, mem, pAllocateInfo, true,
359
mem_type->propertyFlags,
360
export_info->handleTypes);
361
} else if (vn_device_memory_should_suballocate(pAllocateInfo, mem_type)) {
362
result = vn_device_memory_pool_alloc(
363
dev, pAllocateInfo->memoryTypeIndex, mem->size, &mem->base_memory,
364
&mem->base_bo, &mem->base_offset);
365
} else {
366
const bool need_bo =
367
mem_type->propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
368
result = vn_device_memory_alloc(dev, mem, pAllocateInfo, need_bo,
369
mem_type->propertyFlags, 0);
370
}
371
if (result != VK_SUCCESS) {
372
vk_free(alloc, mem);
373
return vn_error(dev->instance, result);
374
}
375
376
*pMemory = mem_handle;
377
378
return VK_SUCCESS;
379
}
380
381
void
382
vn_FreeMemory(VkDevice device,
383
VkDeviceMemory memory,
384
const VkAllocationCallbacks *pAllocator)
385
{
386
struct vn_device *dev = vn_device_from_handle(device);
387
struct vn_device_memory *mem = vn_device_memory_from_handle(memory);
388
const VkAllocationCallbacks *alloc =
389
pAllocator ? pAllocator : &dev->base.base.alloc;
390
391
if (!mem)
392
return;
393
394
if (mem->base_memory) {
395
vn_device_memory_pool_free(dev, mem->base_memory, mem->base_bo);
396
} else {
397
if (mem->base_bo)
398
vn_renderer_bo_unref(dev->renderer, mem->base_bo);
399
vn_async_vkFreeMemory(dev->instance, device, memory, NULL);
400
}
401
402
if (mem->ahb)
403
vn_android_release_ahb(mem->ahb);
404
405
vn_object_base_fini(&mem->base);
406
vk_free(alloc, mem);
407
}
408
409
uint64_t
410
vn_GetDeviceMemoryOpaqueCaptureAddress(
411
VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo)
412
{
413
struct vn_device *dev = vn_device_from_handle(device);
414
ASSERTED struct vn_device_memory *mem =
415
vn_device_memory_from_handle(pInfo->memory);
416
417
assert(!mem->base_memory);
418
return vn_call_vkGetDeviceMemoryOpaqueCaptureAddress(dev->instance, device,
419
pInfo);
420
}
421
422
VkResult
423
vn_MapMemory(VkDevice device,
424
VkDeviceMemory memory,
425
VkDeviceSize offset,
426
VkDeviceSize size,
427
VkMemoryMapFlags flags,
428
void **ppData)
429
{
430
struct vn_device *dev = vn_device_from_handle(device);
431
struct vn_device_memory *mem = vn_device_memory_from_handle(memory);
432
433
void *ptr = vn_renderer_bo_map(dev->renderer, mem->base_bo);
434
if (!ptr)
435
return vn_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
436
437
mem->map_end = size == VK_WHOLE_SIZE ? mem->size : offset + size;
438
439
*ppData = ptr + mem->base_offset + offset;
440
441
return VK_SUCCESS;
442
}
443
444
void
445
vn_UnmapMemory(VkDevice device, VkDeviceMemory memory)
446
{
447
}
448
449
VkResult
450
vn_FlushMappedMemoryRanges(VkDevice device,
451
uint32_t memoryRangeCount,
452
const VkMappedMemoryRange *pMemoryRanges)
453
{
454
struct vn_device *dev = vn_device_from_handle(device);
455
456
for (uint32_t i = 0; i < memoryRangeCount; i++) {
457
const VkMappedMemoryRange *range = &pMemoryRanges[i];
458
struct vn_device_memory *mem =
459
vn_device_memory_from_handle(range->memory);
460
461
const VkDeviceSize size = range->size == VK_WHOLE_SIZE
462
? mem->map_end - range->offset
463
: range->size;
464
vn_renderer_bo_flush(dev->renderer, mem->base_bo,
465
mem->base_offset + range->offset, size);
466
}
467
468
return VK_SUCCESS;
469
}
470
471
VkResult
472
vn_InvalidateMappedMemoryRanges(VkDevice device,
473
uint32_t memoryRangeCount,
474
const VkMappedMemoryRange *pMemoryRanges)
475
{
476
struct vn_device *dev = vn_device_from_handle(device);
477
478
for (uint32_t i = 0; i < memoryRangeCount; i++) {
479
const VkMappedMemoryRange *range = &pMemoryRanges[i];
480
struct vn_device_memory *mem =
481
vn_device_memory_from_handle(range->memory);
482
483
const VkDeviceSize size = range->size == VK_WHOLE_SIZE
484
? mem->map_end - range->offset
485
: range->size;
486
vn_renderer_bo_invalidate(dev->renderer, mem->base_bo,
487
mem->base_offset + range->offset, size);
488
}
489
490
return VK_SUCCESS;
491
}
492
493
void
494
vn_GetDeviceMemoryCommitment(VkDevice device,
495
VkDeviceMemory memory,
496
VkDeviceSize *pCommittedMemoryInBytes)
497
{
498
struct vn_device *dev = vn_device_from_handle(device);
499
ASSERTED struct vn_device_memory *mem =
500
vn_device_memory_from_handle(memory);
501
502
assert(!mem->base_memory);
503
vn_call_vkGetDeviceMemoryCommitment(dev->instance, device, memory,
504
pCommittedMemoryInBytes);
505
}
506
507
VkResult
508
vn_GetMemoryFdKHR(VkDevice device,
509
const VkMemoryGetFdInfoKHR *pGetFdInfo,
510
int *pFd)
511
{
512
struct vn_device *dev = vn_device_from_handle(device);
513
struct vn_device_memory *mem =
514
vn_device_memory_from_handle(pGetFdInfo->memory);
515
516
/* At the moment, we support only the below handle types. */
517
assert(pGetFdInfo->handleType &
518
(VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
519
VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT));
520
assert(!mem->base_memory && mem->base_bo);
521
*pFd = vn_renderer_bo_export_dma_buf(dev->renderer, mem->base_bo);
522
if (*pFd < 0)
523
return vn_error(dev->instance, VK_ERROR_TOO_MANY_OBJECTS);
524
525
return VK_SUCCESS;
526
}
527
528
VkResult
529
vn_get_memory_dma_buf_properties(struct vn_device *dev,
530
int fd,
531
uint64_t *out_alloc_size,
532
uint32_t *out_mem_type_bits)
533
{
534
VkDevice device = vn_device_to_handle(dev);
535
struct vn_renderer_bo *bo = NULL;
536
VkResult result = VK_SUCCESS;
537
538
result = vn_renderer_bo_create_from_dma_buf(dev->renderer, 0 /* size */,
539
fd, 0 /* flags */, &bo);
540
if (result != VK_SUCCESS)
541
return result;
542
543
vn_instance_roundtrip(dev->instance);
544
545
VkMemoryResourceAllocationSizeProperties100000MESA alloc_size_props = {
546
.sType =
547
VK_STRUCTURE_TYPE_MEMORY_RESOURCE_ALLOCATION_SIZE_PROPERTIES_100000_MESA,
548
.pNext = NULL,
549
.allocationSize = 0,
550
};
551
VkMemoryResourcePropertiesMESA props = {
552
.sType = VK_STRUCTURE_TYPE_MEMORY_RESOURCE_PROPERTIES_MESA,
553
.pNext =
554
dev->instance->experimental.memoryResourceAllocationSize == VK_TRUE
555
? &alloc_size_props
556
: NULL,
557
.memoryTypeBits = 0,
558
};
559
result = vn_call_vkGetMemoryResourcePropertiesMESA(dev->instance, device,
560
bo->res_id, &props);
561
vn_renderer_bo_unref(dev->renderer, bo);
562
if (result != VK_SUCCESS)
563
return result;
564
565
*out_alloc_size = alloc_size_props.allocationSize;
566
*out_mem_type_bits = props.memoryTypeBits;
567
568
return VK_SUCCESS;
569
}
570
571
VkResult
572
vn_GetMemoryFdPropertiesKHR(VkDevice device,
573
VkExternalMemoryHandleTypeFlagBits handleType,
574
int fd,
575
VkMemoryFdPropertiesKHR *pMemoryFdProperties)
576
{
577
struct vn_device *dev = vn_device_from_handle(device);
578
uint64_t alloc_size = 0;
579
uint32_t mem_type_bits = 0;
580
VkResult result = VK_SUCCESS;
581
582
if (handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)
583
return vn_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
584
585
result =
586
vn_get_memory_dma_buf_properties(dev, fd, &alloc_size, &mem_type_bits);
587
if (result != VK_SUCCESS)
588
return vn_error(dev->instance, result);
589
590
pMemoryFdProperties->memoryTypeBits = mem_type_bits;
591
592
return VK_SUCCESS;
593
}
594
595