Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
hrydgard
GitHub Repository: hrydgard/ppsspp
Path: blob/master/libretro/libretro_vulkan.cpp
5659 views
1
#include <cstring>
2
#include <cassert>
3
#include <vector>
4
#include <mutex>
5
#include <condition_variable>
6
7
#include "Common/GPU/Vulkan/VulkanLoader.h"
8
#include "Common/Log.h"
9
#include "Core/Config.h"
10
11
#define VK_NO_PROTOTYPES
12
#include <libretro_vulkan.h>
13
#include "libretro/LibretroGraphicsContext.h"
14
15
using namespace PPSSPP_VK;
16
17
static retro_hw_render_interface_vulkan *vulkan;
18
19
static struct {
20
VkInstance instance;
21
VkPhysicalDevice gpu;
22
VkSurfaceKHR surface;
23
PFN_vkGetInstanceProcAddr get_instance_proc_addr;
24
const char **required_device_extensions;
25
unsigned num_required_device_extensions;
26
const char **required_device_layers;
27
unsigned num_required_device_layers;
28
const VkPhysicalDeviceFeatures *required_features;
29
} vk_init_info;
30
static bool DEDICATED_ALLOCATION;
31
32
#define VULKAN_MAX_SWAPCHAIN_IMAGES 8
33
struct VkSwapchainKHR_T {
34
uint32_t count;
35
struct {
36
VkImage handle;
37
VkDeviceMemory memory;
38
retro_vulkan_image retro_image;
39
} images[VULKAN_MAX_SWAPCHAIN_IMAGES];
40
std::mutex mutex;
41
std::condition_variable condVar;
42
int current_index;
43
};
44
static VkSwapchainKHR_T chain;
45
46
#define LIBRETRO_VK_WARP_LIST() \
47
LIBRETRO_VK_WARP_FUNC(vkCreateInstance); \
48
LIBRETRO_VK_WARP_FUNC(vkDestroyInstance); \
49
LIBRETRO_VK_WARP_FUNC(vkCreateDevice); \
50
LIBRETRO_VK_WARP_FUNC(vkDestroyDevice); \
51
LIBRETRO_VK_WARP_FUNC(vkGetPhysicalDeviceSurfaceCapabilitiesKHR); \
52
LIBRETRO_VK_WARP_FUNC(vkDestroySurfaceKHR); \
53
LIBRETRO_VK_WARP_FUNC(vkCreateSwapchainKHR); \
54
LIBRETRO_VK_WARP_FUNC(vkGetSwapchainImagesKHR); \
55
LIBRETRO_VK_WARP_FUNC(vkAcquireNextImageKHR); \
56
LIBRETRO_VK_WARP_FUNC(vkQueuePresentKHR); \
57
LIBRETRO_VK_WARP_FUNC(vkDestroySwapchainKHR); \
58
LIBRETRO_VK_WARP_FUNC(vkQueueSubmit); \
59
LIBRETRO_VK_WARP_FUNC(vkQueueWaitIdle); \
60
LIBRETRO_VK_WARP_FUNC(vkCmdPipelineBarrier); \
61
LIBRETRO_VK_WARP_FUNC(vkCreateRenderPass);
62
63
#define LIBRETRO_VK_WARP_FUNC(x) \
64
PFN_##x x##_org
65
66
LIBRETRO_VK_WARP_FUNC(vkGetInstanceProcAddr);
67
LIBRETRO_VK_WARP_FUNC(vkGetDeviceProcAddr);
68
LIBRETRO_VK_WARP_LIST();
69
70
static VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance_libretro(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
71
*pInstance = vk_init_info.instance;
72
return VK_SUCCESS;
73
}
74
75
static void add_name_unique(std::vector<const char *> &list, const char *value) {
76
for (const char *name : list) {
77
if (!strcmp(value, name))
78
return;
79
}
80
81
list.push_back(value);
82
}
83
84
static VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice_libretro(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
85
VkDeviceCreateInfo newInfo = *pCreateInfo;
86
87
// Add our custom layers
88
std::vector<const char *> enabledLayerNames(pCreateInfo->ppEnabledLayerNames, pCreateInfo->ppEnabledLayerNames + pCreateInfo->enabledLayerCount);
89
90
for (uint32_t i = 0; i < vk_init_info.num_required_device_layers; i++) {
91
add_name_unique(enabledLayerNames, vk_init_info.required_device_layers[i]);
92
}
93
94
newInfo.enabledLayerCount = (uint32_t)enabledLayerNames.size();
95
newInfo.ppEnabledLayerNames = newInfo.enabledLayerCount ? enabledLayerNames.data() : nullptr;
96
97
// Add our custom extensions
98
std::vector<const char *> enabledExtensionNames(pCreateInfo->ppEnabledExtensionNames, pCreateInfo->ppEnabledExtensionNames + pCreateInfo->enabledExtensionCount);
99
100
for (uint32_t i = 0; i < vk_init_info.num_required_device_extensions; i++) {
101
add_name_unique(enabledExtensionNames, vk_init_info.required_device_extensions[i]);
102
}
103
104
for (const char *extensionName : enabledExtensionNames) {
105
if (!strcmp(extensionName, VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME))
106
DEDICATED_ALLOCATION = true;
107
}
108
109
newInfo.enabledExtensionCount = (uint32_t)enabledExtensionNames.size();
110
newInfo.ppEnabledExtensionNames = newInfo.enabledExtensionCount ? enabledExtensionNames.data() : nullptr;
111
112
// Then check for VkPhysicalDeviceFeatures2 chaining or pEnabledFeatures to enable required features. Note that when both
113
// structs are present Features2 takes precedence. vkCreateDevice parameters don't give us a simple way to detect
114
// VK_KHR_get_physical_device_properties2 usage so we'll always try both paths.
115
std::unordered_map<VkPhysicalDeviceFeatures *, VkPhysicalDeviceFeatures> originalFeaturePointers;
116
VkPhysicalDeviceFeatures placeholderEnabledFeatures{};
117
118
for (const VkBaseOutStructure *next = (const VkBaseOutStructure *)pCreateInfo->pNext; next != nullptr;) {
119
if (next->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2) {
120
VkPhysicalDeviceFeatures *enabledFeatures = &((VkPhysicalDeviceFeatures2 *)next)->features;
121
originalFeaturePointers.try_emplace(enabledFeatures, *enabledFeatures);
122
}
123
124
next = (const VkBaseOutStructure *)next->pNext;
125
}
126
127
if (newInfo.pEnabledFeatures) {
128
placeholderEnabledFeatures = *newInfo.pEnabledFeatures;
129
}
130
131
newInfo.pEnabledFeatures = &placeholderEnabledFeatures;
132
originalFeaturePointers.try_emplace((VkPhysicalDeviceFeatures *)newInfo.pEnabledFeatures, *newInfo.pEnabledFeatures);
133
134
for (const auto& pair : originalFeaturePointers) {
135
for (uint32_t i = 0; i < sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32); i++) {
136
if (((VkBool32 *)vk_init_info.required_features)[i])
137
((VkBool32 *)pair.first)[i] = VK_TRUE;
138
}
139
}
140
141
VkResult res = vkCreateDevice_org(physicalDevice, &newInfo, pAllocator, pDevice);
142
143
// The above code potentially modifies application memory. Restore it to avoid unexpected side effects.
144
for (const auto& pair : originalFeaturePointers) {
145
*pair.first = pair.second;
146
}
147
148
return res;
149
}
150
151
static VKAPI_ATTR VkResult VKAPI_CALL vkCreateLibretroSurfaceKHR(VkInstance instance, const void *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
152
*pSurface = vk_init_info.surface;
153
return VK_SUCCESS;
154
}
155
156
VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR_libretro(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
157
VkResult res = vkGetPhysicalDeviceSurfaceCapabilitiesKHR_org(physicalDevice, surface, pSurfaceCapabilities);
158
if (res == VK_SUCCESS) {
159
int w = g_Config.iInternalResolution * NATIVEWIDTH;
160
int h = g_Config.iInternalResolution * NATIVEHEIGHT;
161
162
if (g_Config.bDisplayCropTo16x9)
163
h -= g_Config.iInternalResolution * 2;
164
165
pSurfaceCapabilities->minImageExtent.width = w;
166
pSurfaceCapabilities->minImageExtent.height = h;
167
pSurfaceCapabilities->maxImageExtent.width = w;
168
pSurfaceCapabilities->maxImageExtent.height = h;
169
pSurfaceCapabilities->currentExtent.width = w;
170
pSurfaceCapabilities->currentExtent.height = h;
171
}
172
return res;
173
}
174
175
static bool MemoryTypeFromProperties(uint32_t typeBits, VkFlags requirements_mask, uint32_t *typeIndex) {
176
VkPhysicalDeviceMemoryProperties memory_properties;
177
vkGetPhysicalDeviceMemoryProperties(vulkan->gpu, &memory_properties);
178
// Search memtypes to find first index with those properties
179
for (uint32_t i = 0; i < 32; i++) {
180
if ((typeBits & 1) == 1) {
181
// Type is available, does it match user properties?
182
if ((memory_properties.memoryTypes[i].propertyFlags & requirements_mask) == requirements_mask) {
183
*typeIndex = i;
184
return true;
185
}
186
}
187
typeBits >>= 1;
188
}
189
// No memory types matched, return failure
190
return false;
191
}
192
193
static VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR_libretro(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
194
uint32_t swapchain_mask = vulkan->get_sync_index_mask(vulkan->handle);
195
196
chain.count = 0;
197
while (swapchain_mask) {
198
chain.count++;
199
swapchain_mask >>= 1;
200
}
201
assert(chain.count <= VULKAN_MAX_SWAPCHAIN_IMAGES);
202
203
for (uint32_t i = 0; i < chain.count; i++) {
204
{
205
VkImageCreateInfo info{ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
206
info.flags = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
207
info.imageType = VK_IMAGE_TYPE_2D;
208
info.format = pCreateInfo->imageFormat;
209
info.extent.width = pCreateInfo->imageExtent.width;
210
info.extent.height = pCreateInfo->imageExtent.height;
211
info.extent.depth = 1;
212
info.mipLevels = 1;
213
info.arrayLayers = 1;
214
info.samples = VK_SAMPLE_COUNT_1_BIT;
215
info.tiling = VK_IMAGE_TILING_OPTIMAL;
216
info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
217
info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
218
219
vkCreateImage(device, &info, pAllocator, &chain.images[i].handle);
220
}
221
222
VkMemoryRequirements memreq;
223
vkGetImageMemoryRequirements(device, chain.images[i].handle, &memreq);
224
225
VkMemoryAllocateInfo alloc{ VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
226
alloc.allocationSize = memreq.size;
227
228
VkMemoryDedicatedAllocateInfoKHR dedicated{ VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
229
if (DEDICATED_ALLOCATION) {
230
alloc.pNext = &dedicated;
231
dedicated.image = chain.images[i].handle;
232
}
233
234
MemoryTypeFromProperties(memreq.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &alloc.memoryTypeIndex);
235
VkResult res = vkAllocateMemory(device, &alloc, pAllocator, &chain.images[i].memory);
236
assert(res == VK_SUCCESS);
237
res = vkBindImageMemory(device, chain.images[i].handle, chain.images[i].memory, 0);
238
assert(res == VK_SUCCESS);
239
240
chain.images[i].retro_image.create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
241
chain.images[i].retro_image.create_info.image = chain.images[i].handle;
242
chain.images[i].retro_image.create_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
243
chain.images[i].retro_image.create_info.format = pCreateInfo->imageFormat;
244
chain.images[i].retro_image.create_info.components = { VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY };
245
chain.images[i].retro_image.create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
246
chain.images[i].retro_image.create_info.subresourceRange.layerCount = 1;
247
chain.images[i].retro_image.create_info.subresourceRange.levelCount = 1;
248
res = vkCreateImageView(device, &chain.images[i].retro_image.create_info, pAllocator, &chain.images[i].retro_image.image_view);
249
assert(res == VK_SUCCESS);
250
251
chain.images[i].retro_image.image_layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
252
}
253
254
chain.current_index = -1;
255
*pSwapchain = (VkSwapchainKHR)&chain;
256
257
return VK_SUCCESS;
258
}
259
static VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR_libretro(VkDevice device, VkSwapchainKHR swapchain_, uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
260
VkSwapchainKHR_T *swapchain = (VkSwapchainKHR_T *)swapchain_;
261
if (pSwapchainImages) {
262
assert(*pSwapchainImageCount <= swapchain->count);
263
for (int i = 0; i < *pSwapchainImageCount; i++)
264
pSwapchainImages[i] = swapchain->images[i].handle;
265
} else
266
*pSwapchainImageCount = swapchain->count;
267
268
return VK_SUCCESS;
269
}
270
271
static VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR_libretro(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
272
vulkan->wait_sync_index(vulkan->handle);
273
*pImageIndex = vulkan->get_sync_index(vulkan->handle);
274
#if 0
275
vulkan->set_signal_semaphore(vulkan->handle, semaphore);
276
#endif
277
return VK_SUCCESS;
278
}
279
280
static VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR_libretro(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
281
VkSwapchainKHR_T *swapchain = (VkSwapchainKHR_T *)pPresentInfo->pSwapchains[0];
282
std::unique_lock<std::mutex> lock(swapchain->mutex);
283
#if 0
284
if(chain.current_index >= 0)
285
chain.condVar.wait(lock);
286
#endif
287
288
chain.current_index = pPresentInfo->pImageIndices[0];
289
#if 0
290
vulkan->set_image(vulkan->handle, &swapchain->images[pPresentInfo->pImageIndices[0]].retro_image, pPresentInfo->waitSemaphoreCount, pPresentInfo->pWaitSemaphores, vulkan->queue_index);
291
#else
292
vulkan->set_image(vulkan->handle, &swapchain->images[pPresentInfo->pImageIndices[0]].retro_image, 0, nullptr, vulkan->queue_index);
293
#endif
294
swapchain->condVar.notify_all();
295
296
return VK_SUCCESS;
297
}
298
299
void vk_libretro_wait_for_presentation() {
300
std::unique_lock<std::mutex> lock(chain.mutex);
301
if (chain.current_index < 0)
302
chain.condVar.wait(lock);
303
#if 0
304
chain.current_index = -1;
305
chain.condVar.notify_all();
306
#endif
307
}
308
309
static VKAPI_ATTR void VKAPI_CALL vkDestroyInstance_libretro(VkInstance instance, const VkAllocationCallbacks *pAllocator) {}
310
static VKAPI_ATTR void VKAPI_CALL vkDestroyDevice_libretro(VkDevice device, const VkAllocationCallbacks *pAllocator) {}
311
static VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR_libretro(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {}
312
static VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR_libretro(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
313
for (int i = 0; i < chain.count; i++) {
314
vkDestroyImage(device, chain.images[i].handle, pAllocator);
315
vkDestroyImageView(device, chain.images[i].retro_image.image_view, pAllocator);
316
vkFreeMemory(device, chain.images[i].memory, pAllocator);
317
}
318
319
memset(&chain.images, 0x00, sizeof(chain.images));
320
chain.count = 0;
321
chain.current_index = -1;
322
}
323
324
VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit_libretro(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
325
VkResult res = VK_SUCCESS;
326
327
#if 0
328
for(int i = 0; i < submitCount; i++)
329
vulkan->set_command_buffers(vulkan->handle, pSubmits[i].commandBufferCount, pSubmits[i].pCommandBuffers);
330
#else
331
#if 1
332
for (int i = 0; i < submitCount; i++) {
333
((VkSubmitInfo *)pSubmits)[i].waitSemaphoreCount = 0;
334
((VkSubmitInfo *)pSubmits)[i].pWaitSemaphores = nullptr;
335
((VkSubmitInfo *)pSubmits)[i].signalSemaphoreCount = 0;
336
((VkSubmitInfo *)pSubmits)[i].pSignalSemaphores = nullptr;
337
}
338
#endif
339
vulkan->lock_queue(vulkan->handle);
340
res = vkQueueSubmit_org(queue, submitCount, pSubmits, fence);
341
vulkan->unlock_queue(vulkan->handle);
342
#endif
343
344
return res;
345
}
346
347
VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle_libretro(VkQueue queue) {
348
vulkan->lock_queue(vulkan->handle);
349
VkResult res = vkQueueWaitIdle_org(queue);
350
vulkan->unlock_queue(vulkan->handle);
351
return res;
352
}
353
354
VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier_libretro(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
355
VkImageMemoryBarrier *barriers = (VkImageMemoryBarrier *)pImageMemoryBarriers;
356
for (int i = 0; i < imageMemoryBarrierCount; i++) {
357
if (pImageMemoryBarriers[i].oldLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
358
barriers[i].oldLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
359
barriers[i].srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
360
}
361
if (pImageMemoryBarriers[i].newLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
362
barriers[i].newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
363
barriers[i].dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
364
}
365
}
366
return vkCmdPipelineBarrier_org(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, barriers);
367
}
368
369
VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass_libretro(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
370
if (pCreateInfo->pAttachments[0].finalLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR)
371
((VkAttachmentDescription *)pCreateInfo->pAttachments)[0].finalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
372
373
return vkCreateRenderPass_org(device, pCreateInfo, pAllocator, pRenderPass);
374
}
375
376
#undef LIBRETRO_VK_WARP_FUNC
377
#define LIBRETRO_VK_WARP_FUNC(x) \
378
if (!strcmp(pName, #x)) { \
379
x##_org = (PFN_##x)fptr; \
380
return (PFN_vkVoidFunction)x##_libretro; \
381
}
382
383
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr_libretro(VkInstance instance, const char *pName) {
384
if (false
385
#ifdef _WIN32
386
|| !strcmp(pName, "vkCreateWin32SurfaceKHR")
387
#endif
388
#ifdef __ANDROID__
389
|| !strcmp(pName, "vkCreateAndroidSurfaceKHR")
390
#endif
391
#ifdef VK_USE_PLATFORM_METAL_EXT
392
|| !strcmp(pName, "vkCreateMetalSurfaceEXT")
393
#endif
394
#ifdef VK_USE_PLATFORM_XLIB_KHR
395
|| !strcmp(pName, "vkCreateXlibSurfaceKHR")
396
#endif
397
#ifdef VK_USE_PLATFORM_XCB_KHR
398
|| !strcmp(pName, "vkCreateXcbSurfaceKHR")
399
#endif
400
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
401
|| !strcmp(pName, "vkCreateWaylandSurfaceKHR")
402
#endif
403
#ifdef VK_USE_PLATFORM_DISPLAY_KHR
404
|| !strcmp(pName, "vkCreateDisplayPlaneSurfaceKHR")
405
#endif
406
) {
407
return (PFN_vkVoidFunction)vkCreateLibretroSurfaceKHR;
408
}
409
410
PFN_vkVoidFunction fptr = vk_init_info.get_instance_proc_addr(instance, pName);
411
if (!fptr) {
412
ERROR_LOG(Log::G3D, "Failed to load VK instance function: %s", pName);
413
return fptr;
414
}
415
416
LIBRETRO_VK_WARP_LIST();
417
418
return fptr;
419
}
420
421
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr_libretro(VkDevice device, const char *pName) {
422
PFN_vkVoidFunction fptr = vkGetDeviceProcAddr_org(device, pName);
423
if (!fptr)
424
return fptr;
425
426
LIBRETRO_VK_WARP_LIST();
427
428
return fptr;
429
}
430
431
void vk_libretro_init(VkInstance instance, VkPhysicalDevice gpu, VkSurfaceKHR surface, PFN_vkGetInstanceProcAddr get_instance_proc_addr, const char **required_device_extensions, unsigned num_required_device_extensions, const char **required_device_layers, unsigned num_required_device_layers, const VkPhysicalDeviceFeatures *required_features) {
432
assert(surface);
433
434
vk_init_info.instance = instance;
435
vk_init_info.gpu = gpu;
436
vk_init_info.surface = surface;
437
vk_init_info.get_instance_proc_addr = get_instance_proc_addr;
438
vk_init_info.required_device_extensions = required_device_extensions;
439
vk_init_info.num_required_device_extensions = num_required_device_extensions;
440
vk_init_info.required_device_layers = required_device_layers;
441
vk_init_info.num_required_device_layers = num_required_device_layers;
442
vk_init_info.required_features = required_features;
443
444
vkGetInstanceProcAddr_org = vkGetInstanceProcAddr;
445
vkGetInstanceProcAddr = vkGetInstanceProcAddr_libretro;
446
vkGetDeviceProcAddr_org = (PFN_vkGetDeviceProcAddr)vkGetInstanceProcAddr(instance, "vkGetDeviceProcAddr");;
447
vkGetDeviceProcAddr = vkGetDeviceProcAddr_libretro;
448
vkCreateInstance = vkCreateInstance_libretro;
449
450
vkEnumerateInstanceVersion = (PFN_vkEnumerateInstanceVersion)vkGetInstanceProcAddr(NULL, "vkEnumerateInstanceVersion");
451
vkEnumerateInstanceExtensionProperties = (PFN_vkEnumerateInstanceExtensionProperties)vkGetInstanceProcAddr(NULL, "vkEnumerateInstanceExtensionProperties");
452
vkEnumerateInstanceLayerProperties = (PFN_vkEnumerateInstanceLayerProperties)vkGetInstanceProcAddr(NULL, "vkEnumerateInstanceLayerProperties");
453
}
454
455
void vk_libretro_set_hwrender_interface(retro_hw_render_interface *hw_render_interface) {
456
vulkan = (retro_hw_render_interface_vulkan *)hw_render_interface;
457
}
458
459
void vk_libretro_shutdown() {
460
memset(&vk_init_info, 0, sizeof(vk_init_info));
461
vulkan = nullptr;
462
DEDICATED_ALLOCATION = false;
463
}
464
465