Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/vulkan/wsi/wsi_common.c
7354 views
1
/*
2
* Copyright © 2017 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include "wsi_common_private.h"
25
#include "util/macros.h"
26
#include "util/os_file.h"
27
#include "util/os_time.h"
28
#include "util/xmlconfig.h"
29
#include "vk_util.h"
30
31
#include <time.h>
32
#include <stdlib.h>
33
#include <stdio.h>
34
35
VkResult
36
wsi_device_init(struct wsi_device *wsi,
37
VkPhysicalDevice pdevice,
38
WSI_FN_GetPhysicalDeviceProcAddr proc_addr,
39
const VkAllocationCallbacks *alloc,
40
int display_fd,
41
const struct driOptionCache *dri_options,
42
bool sw_device)
43
{
44
const char *present_mode;
45
UNUSED VkResult result;
46
47
memset(wsi, 0, sizeof(*wsi));
48
49
wsi->instance_alloc = *alloc;
50
wsi->pdevice = pdevice;
51
wsi->sw = sw_device;
52
#define WSI_GET_CB(func) \
53
PFN_vk##func func = (PFN_vk##func)proc_addr(pdevice, "vk" #func)
54
WSI_GET_CB(GetPhysicalDeviceProperties2);
55
WSI_GET_CB(GetPhysicalDeviceMemoryProperties);
56
WSI_GET_CB(GetPhysicalDeviceQueueFamilyProperties);
57
#undef WSI_GET_CB
58
59
wsi->pci_bus_info.sType =
60
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT;
61
VkPhysicalDeviceProperties2 pdp2 = {
62
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
63
.pNext = &wsi->pci_bus_info,
64
};
65
GetPhysicalDeviceProperties2(pdevice, &pdp2);
66
67
wsi->maxImageDimension2D = pdp2.properties.limits.maxImageDimension2D;
68
wsi->override_present_mode = VK_PRESENT_MODE_MAX_ENUM_KHR;
69
70
GetPhysicalDeviceMemoryProperties(pdevice, &wsi->memory_props);
71
GetPhysicalDeviceQueueFamilyProperties(pdevice, &wsi->queue_family_count, NULL);
72
73
#define WSI_GET_CB(func) \
74
wsi->func = (PFN_vk##func)proc_addr(pdevice, "vk" #func)
75
WSI_GET_CB(AllocateMemory);
76
WSI_GET_CB(AllocateCommandBuffers);
77
WSI_GET_CB(BindBufferMemory);
78
WSI_GET_CB(BindImageMemory);
79
WSI_GET_CB(BeginCommandBuffer);
80
WSI_GET_CB(CmdCopyImageToBuffer);
81
WSI_GET_CB(CreateBuffer);
82
WSI_GET_CB(CreateCommandPool);
83
WSI_GET_CB(CreateFence);
84
WSI_GET_CB(CreateImage);
85
WSI_GET_CB(DestroyBuffer);
86
WSI_GET_CB(DestroyCommandPool);
87
WSI_GET_CB(DestroyFence);
88
WSI_GET_CB(DestroyImage);
89
WSI_GET_CB(EndCommandBuffer);
90
WSI_GET_CB(FreeMemory);
91
WSI_GET_CB(FreeCommandBuffers);
92
WSI_GET_CB(GetBufferMemoryRequirements);
93
WSI_GET_CB(GetImageDrmFormatModifierPropertiesEXT);
94
WSI_GET_CB(GetImageMemoryRequirements);
95
WSI_GET_CB(GetImageSubresourceLayout);
96
if (!wsi->sw)
97
WSI_GET_CB(GetMemoryFdKHR);
98
WSI_GET_CB(GetPhysicalDeviceFormatProperties);
99
WSI_GET_CB(GetPhysicalDeviceFormatProperties2KHR);
100
WSI_GET_CB(GetPhysicalDeviceImageFormatProperties2);
101
WSI_GET_CB(ResetFences);
102
WSI_GET_CB(QueueSubmit);
103
WSI_GET_CB(WaitForFences);
104
WSI_GET_CB(MapMemory);
105
WSI_GET_CB(UnmapMemory);
106
#undef WSI_GET_CB
107
108
#ifdef VK_USE_PLATFORM_XCB_KHR
109
result = wsi_x11_init_wsi(wsi, alloc, dri_options);
110
if (result != VK_SUCCESS)
111
goto fail;
112
#endif
113
114
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
115
result = wsi_wl_init_wsi(wsi, alloc, pdevice);
116
if (result != VK_SUCCESS)
117
goto fail;
118
#endif
119
120
#ifdef VK_USE_PLATFORM_WIN32_KHR
121
result = wsi_win32_init_wsi(wsi, alloc, pdevice);
122
if (result != VK_SUCCESS)
123
goto fail;
124
#endif
125
126
#ifdef VK_USE_PLATFORM_DISPLAY_KHR
127
result = wsi_display_init_wsi(wsi, alloc, display_fd);
128
if (result != VK_SUCCESS)
129
goto fail;
130
#endif
131
132
present_mode = getenv("MESA_VK_WSI_PRESENT_MODE");
133
if (present_mode) {
134
if (!strcmp(present_mode, "fifo")) {
135
wsi->override_present_mode = VK_PRESENT_MODE_FIFO_KHR;
136
} else if (!strcmp(present_mode, "relaxed")) {
137
wsi->override_present_mode = VK_PRESENT_MODE_FIFO_RELAXED_KHR;
138
} else if (!strcmp(present_mode, "mailbox")) {
139
wsi->override_present_mode = VK_PRESENT_MODE_MAILBOX_KHR;
140
} else if (!strcmp(present_mode, "immediate")) {
141
wsi->override_present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR;
142
} else {
143
fprintf(stderr, "Invalid MESA_VK_WSI_PRESENT_MODE value!\n");
144
}
145
}
146
147
if (dri_options) {
148
if (driCheckOption(dri_options, "adaptive_sync", DRI_BOOL))
149
wsi->enable_adaptive_sync = driQueryOptionb(dri_options,
150
"adaptive_sync");
151
152
if (driCheckOption(dri_options, "vk_wsi_force_bgra8_unorm_first", DRI_BOOL)) {
153
wsi->force_bgra8_unorm_first =
154
driQueryOptionb(dri_options, "vk_wsi_force_bgra8_unorm_first");
155
}
156
}
157
158
return VK_SUCCESS;
159
#if defined(VK_USE_PLATFORM_XCB_KHR) || \
160
defined(VK_USE_PLATFORM_WAYLAND_KHR) || \
161
defined(VK_USE_PLATFORM_WIN32_KHR) || \
162
defined(VK_USE_PLATFORM_DISPLAY_KHR)
163
fail:
164
wsi_device_finish(wsi, alloc);
165
return result;
166
#endif
167
}
168
169
void
170
wsi_device_finish(struct wsi_device *wsi,
171
const VkAllocationCallbacks *alloc)
172
{
173
#ifdef VK_USE_PLATFORM_DISPLAY_KHR
174
wsi_display_finish_wsi(wsi, alloc);
175
#endif
176
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
177
wsi_wl_finish_wsi(wsi, alloc);
178
#endif
179
#ifdef VK_USE_PLATFORM_WIN32_KHR
180
wsi_win32_finish_wsi(wsi, alloc);
181
#endif
182
#ifdef VK_USE_PLATFORM_XCB_KHR
183
wsi_x11_finish_wsi(wsi, alloc);
184
#endif
185
}
186
187
VkResult
188
wsi_swapchain_init(const struct wsi_device *wsi,
189
struct wsi_swapchain *chain,
190
VkDevice device,
191
const VkSwapchainCreateInfoKHR *pCreateInfo,
192
const VkAllocationCallbacks *pAllocator)
193
{
194
VkResult result;
195
196
memset(chain, 0, sizeof(*chain));
197
198
vk_object_base_init(NULL, &chain->base, VK_OBJECT_TYPE_SWAPCHAIN_KHR);
199
200
chain->wsi = wsi;
201
chain->device = device;
202
chain->alloc = *pAllocator;
203
chain->use_prime_blit = false;
204
205
chain->cmd_pools =
206
vk_zalloc(pAllocator, sizeof(VkCommandPool) * wsi->queue_family_count, 8,
207
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
208
if (!chain->cmd_pools)
209
return VK_ERROR_OUT_OF_HOST_MEMORY;
210
211
for (uint32_t i = 0; i < wsi->queue_family_count; i++) {
212
const VkCommandPoolCreateInfo cmd_pool_info = {
213
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
214
.pNext = NULL,
215
.flags = 0,
216
.queueFamilyIndex = i,
217
};
218
result = wsi->CreateCommandPool(device, &cmd_pool_info, &chain->alloc,
219
&chain->cmd_pools[i]);
220
if (result != VK_SUCCESS)
221
goto fail;
222
}
223
224
return VK_SUCCESS;
225
226
fail:
227
wsi_swapchain_finish(chain);
228
return result;
229
}
230
231
static bool
232
wsi_swapchain_is_present_mode_supported(struct wsi_device *wsi,
233
const VkSwapchainCreateInfoKHR *pCreateInfo,
234
VkPresentModeKHR mode)
235
{
236
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
237
struct wsi_interface *iface = wsi->wsi[surface->platform];
238
VkPresentModeKHR *present_modes;
239
uint32_t present_mode_count;
240
bool supported = false;
241
VkResult result;
242
243
result = iface->get_present_modes(surface, &present_mode_count, NULL);
244
if (result != VK_SUCCESS)
245
return supported;
246
247
present_modes = malloc(present_mode_count * sizeof(*present_modes));
248
if (!present_modes)
249
return supported;
250
251
result = iface->get_present_modes(surface, &present_mode_count,
252
present_modes);
253
if (result != VK_SUCCESS)
254
goto fail;
255
256
for (uint32_t i = 0; i < present_mode_count; i++) {
257
if (present_modes[i] == mode) {
258
supported = true;
259
break;
260
}
261
}
262
263
fail:
264
free(present_modes);
265
return supported;
266
}
267
268
enum VkPresentModeKHR
269
wsi_swapchain_get_present_mode(struct wsi_device *wsi,
270
const VkSwapchainCreateInfoKHR *pCreateInfo)
271
{
272
if (wsi->override_present_mode == VK_PRESENT_MODE_MAX_ENUM_KHR)
273
return pCreateInfo->presentMode;
274
275
if (!wsi_swapchain_is_present_mode_supported(wsi, pCreateInfo,
276
wsi->override_present_mode)) {
277
fprintf(stderr, "Unsupported MESA_VK_WSI_PRESENT_MODE value!\n");
278
return pCreateInfo->presentMode;
279
}
280
281
return wsi->override_present_mode;
282
}
283
284
void
285
wsi_swapchain_finish(struct wsi_swapchain *chain)
286
{
287
if (chain->fences) {
288
for (unsigned i = 0; i < chain->image_count; i++)
289
chain->wsi->DestroyFence(chain->device, chain->fences[i], &chain->alloc);
290
291
vk_free(&chain->alloc, chain->fences);
292
}
293
294
for (uint32_t i = 0; i < chain->wsi->queue_family_count; i++) {
295
chain->wsi->DestroyCommandPool(chain->device, chain->cmd_pools[i],
296
&chain->alloc);
297
}
298
vk_free(&chain->alloc, chain->cmd_pools);
299
300
vk_object_base_finish(&chain->base);
301
}
302
303
void
304
wsi_destroy_image(const struct wsi_swapchain *chain,
305
struct wsi_image *image)
306
{
307
const struct wsi_device *wsi = chain->wsi;
308
309
if (image->prime.blit_cmd_buffers) {
310
for (uint32_t i = 0; i < wsi->queue_family_count; i++) {
311
wsi->FreeCommandBuffers(chain->device, chain->cmd_pools[i],
312
1, &image->prime.blit_cmd_buffers[i]);
313
}
314
vk_free(&chain->alloc, image->prime.blit_cmd_buffers);
315
}
316
317
wsi->FreeMemory(chain->device, image->memory, &chain->alloc);
318
wsi->DestroyImage(chain->device, image->image, &chain->alloc);
319
wsi->FreeMemory(chain->device, image->prime.memory, &chain->alloc);
320
wsi->DestroyBuffer(chain->device, image->prime.buffer, &chain->alloc);
321
}
322
323
VkResult
324
wsi_common_get_surface_support(struct wsi_device *wsi_device,
325
uint32_t queueFamilyIndex,
326
VkSurfaceKHR _surface,
327
VkBool32* pSupported)
328
{
329
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
330
struct wsi_interface *iface = wsi_device->wsi[surface->platform];
331
332
return iface->get_support(surface, wsi_device,
333
queueFamilyIndex, pSupported);
334
}
335
336
VkResult
337
wsi_common_get_surface_capabilities(struct wsi_device *wsi_device,
338
VkSurfaceKHR _surface,
339
VkSurfaceCapabilitiesKHR *pSurfaceCapabilities)
340
{
341
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
342
struct wsi_interface *iface = wsi_device->wsi[surface->platform];
343
344
VkSurfaceCapabilities2KHR caps2 = {
345
.sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
346
};
347
348
VkResult result = iface->get_capabilities2(surface, wsi_device, NULL, &caps2);
349
350
if (result == VK_SUCCESS)
351
*pSurfaceCapabilities = caps2.surfaceCapabilities;
352
353
return result;
354
}
355
356
VkResult
357
wsi_common_get_surface_capabilities2(struct wsi_device *wsi_device,
358
const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
359
VkSurfaceCapabilities2KHR *pSurfaceCapabilities)
360
{
361
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
362
struct wsi_interface *iface = wsi_device->wsi[surface->platform];
363
364
return iface->get_capabilities2(surface, wsi_device, pSurfaceInfo->pNext,
365
pSurfaceCapabilities);
366
}
367
368
VkResult
369
wsi_common_get_surface_capabilities2ext(
370
struct wsi_device *wsi_device,
371
VkSurfaceKHR _surface,
372
VkSurfaceCapabilities2EXT *pSurfaceCapabilities)
373
{
374
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
375
struct wsi_interface *iface = wsi_device->wsi[surface->platform];
376
377
assert(pSurfaceCapabilities->sType ==
378
VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT);
379
380
struct wsi_surface_supported_counters counters = {
381
.sType = VK_STRUCTURE_TYPE_WSI_SURFACE_SUPPORTED_COUNTERS_MESA,
382
.pNext = pSurfaceCapabilities->pNext,
383
.supported_surface_counters = 0,
384
};
385
386
VkSurfaceCapabilities2KHR caps2 = {
387
.sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
388
.pNext = &counters,
389
};
390
391
VkResult result = iface->get_capabilities2(surface, wsi_device, NULL, &caps2);
392
393
if (result == VK_SUCCESS) {
394
VkSurfaceCapabilities2EXT *ext_caps = pSurfaceCapabilities;
395
VkSurfaceCapabilitiesKHR khr_caps = caps2.surfaceCapabilities;
396
397
ext_caps->minImageCount = khr_caps.minImageCount;
398
ext_caps->maxImageCount = khr_caps.maxImageCount;
399
ext_caps->currentExtent = khr_caps.currentExtent;
400
ext_caps->minImageExtent = khr_caps.minImageExtent;
401
ext_caps->maxImageExtent = khr_caps.maxImageExtent;
402
ext_caps->maxImageArrayLayers = khr_caps.maxImageArrayLayers;
403
ext_caps->supportedTransforms = khr_caps.supportedTransforms;
404
ext_caps->currentTransform = khr_caps.currentTransform;
405
ext_caps->supportedCompositeAlpha = khr_caps.supportedCompositeAlpha;
406
ext_caps->supportedUsageFlags = khr_caps.supportedUsageFlags;
407
ext_caps->supportedSurfaceCounters = counters.supported_surface_counters;
408
}
409
410
return result;
411
}
412
413
VkResult
414
wsi_common_get_surface_formats(struct wsi_device *wsi_device,
415
VkSurfaceKHR _surface,
416
uint32_t *pSurfaceFormatCount,
417
VkSurfaceFormatKHR *pSurfaceFormats)
418
{
419
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
420
struct wsi_interface *iface = wsi_device->wsi[surface->platform];
421
422
return iface->get_formats(surface, wsi_device,
423
pSurfaceFormatCount, pSurfaceFormats);
424
}
425
426
VkResult
427
wsi_common_get_surface_formats2(struct wsi_device *wsi_device,
428
const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
429
uint32_t *pSurfaceFormatCount,
430
VkSurfaceFormat2KHR *pSurfaceFormats)
431
{
432
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
433
struct wsi_interface *iface = wsi_device->wsi[surface->platform];
434
435
return iface->get_formats2(surface, wsi_device, pSurfaceInfo->pNext,
436
pSurfaceFormatCount, pSurfaceFormats);
437
}
438
439
VkResult
440
wsi_common_get_surface_present_modes(struct wsi_device *wsi_device,
441
VkSurfaceKHR _surface,
442
uint32_t *pPresentModeCount,
443
VkPresentModeKHR *pPresentModes)
444
{
445
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
446
struct wsi_interface *iface = wsi_device->wsi[surface->platform];
447
448
return iface->get_present_modes(surface, pPresentModeCount,
449
pPresentModes);
450
}
451
452
VkResult
453
wsi_common_get_present_rectangles(struct wsi_device *wsi_device,
454
VkSurfaceKHR _surface,
455
uint32_t* pRectCount,
456
VkRect2D* pRects)
457
{
458
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
459
struct wsi_interface *iface = wsi_device->wsi[surface->platform];
460
461
return iface->get_present_rectangles(surface, wsi_device,
462
pRectCount, pRects);
463
}
464
465
VkResult
466
wsi_common_create_swapchain(struct wsi_device *wsi,
467
VkDevice device,
468
const VkSwapchainCreateInfoKHR *pCreateInfo,
469
const VkAllocationCallbacks *pAllocator,
470
VkSwapchainKHR *pSwapchain)
471
{
472
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
473
struct wsi_interface *iface = wsi->wsi[surface->platform];
474
struct wsi_swapchain *swapchain;
475
476
VkResult result = iface->create_swapchain(surface, device, wsi,
477
pCreateInfo, pAllocator,
478
&swapchain);
479
if (result != VK_SUCCESS)
480
return result;
481
482
swapchain->fences = vk_zalloc(pAllocator,
483
sizeof (*swapchain->fences) * swapchain->image_count,
484
sizeof (*swapchain->fences),
485
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
486
if (!swapchain->fences) {
487
swapchain->destroy(swapchain, pAllocator);
488
return VK_ERROR_OUT_OF_HOST_MEMORY;
489
}
490
491
*pSwapchain = wsi_swapchain_to_handle(swapchain);
492
493
return VK_SUCCESS;
494
}
495
496
void
497
wsi_common_destroy_swapchain(VkDevice device,
498
VkSwapchainKHR _swapchain,
499
const VkAllocationCallbacks *pAllocator)
500
{
501
VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
502
if (!swapchain)
503
return;
504
505
swapchain->destroy(swapchain, pAllocator);
506
}
507
508
VkResult
509
wsi_common_get_images(VkSwapchainKHR _swapchain,
510
uint32_t *pSwapchainImageCount,
511
VkImage *pSwapchainImages)
512
{
513
VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
514
VK_OUTARRAY_MAKE_TYPED(VkImage, images, pSwapchainImages, pSwapchainImageCount);
515
516
for (uint32_t i = 0; i < swapchain->image_count; i++) {
517
vk_outarray_append_typed(VkImage, &images, image) {
518
*image = swapchain->get_wsi_image(swapchain, i)->image;
519
}
520
}
521
522
return vk_outarray_status(&images);
523
}
524
525
VkResult
526
wsi_common_acquire_next_image2(const struct wsi_device *wsi,
527
VkDevice device,
528
const VkAcquireNextImageInfoKHR *pAcquireInfo,
529
uint32_t *pImageIndex)
530
{
531
VK_FROM_HANDLE(wsi_swapchain, swapchain, pAcquireInfo->swapchain);
532
533
VkResult result = swapchain->acquire_next_image(swapchain, pAcquireInfo,
534
pImageIndex);
535
if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR)
536
return result;
537
538
if (wsi->set_memory_ownership) {
539
VkDeviceMemory mem = swapchain->get_wsi_image(swapchain, *pImageIndex)->memory;
540
wsi->set_memory_ownership(swapchain->device, mem, true);
541
}
542
543
if (pAcquireInfo->semaphore != VK_NULL_HANDLE &&
544
wsi->signal_semaphore_for_memory != NULL) {
545
struct wsi_image *image =
546
swapchain->get_wsi_image(swapchain, *pImageIndex);
547
wsi->signal_semaphore_for_memory(device, pAcquireInfo->semaphore,
548
image->memory);
549
}
550
551
if (pAcquireInfo->fence != VK_NULL_HANDLE &&
552
wsi->signal_fence_for_memory != NULL) {
553
struct wsi_image *image =
554
swapchain->get_wsi_image(swapchain, *pImageIndex);
555
wsi->signal_fence_for_memory(device, pAcquireInfo->fence,
556
image->memory);
557
}
558
559
return result;
560
}
561
562
VkResult
563
wsi_common_queue_present(const struct wsi_device *wsi,
564
VkDevice device,
565
VkQueue queue,
566
int queue_family_index,
567
const VkPresentInfoKHR *pPresentInfo)
568
{
569
VkResult final_result = VK_SUCCESS;
570
571
const VkPresentRegionsKHR *regions =
572
vk_find_struct_const(pPresentInfo->pNext, PRESENT_REGIONS_KHR);
573
574
for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
575
VK_FROM_HANDLE(wsi_swapchain, swapchain, pPresentInfo->pSwapchains[i]);
576
uint32_t image_index = pPresentInfo->pImageIndices[i];
577
VkResult result;
578
579
if (swapchain->fences[image_index] == VK_NULL_HANDLE) {
580
const VkFenceCreateInfo fence_info = {
581
.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
582
.pNext = NULL,
583
.flags = 0,
584
};
585
result = wsi->CreateFence(device, &fence_info,
586
&swapchain->alloc,
587
&swapchain->fences[image_index]);
588
if (result != VK_SUCCESS)
589
goto fail_present;
590
} else {
591
result =
592
wsi->WaitForFences(device, 1, &swapchain->fences[image_index],
593
true, ~0ull);
594
if (result != VK_SUCCESS)
595
goto fail_present;
596
597
result =
598
wsi->ResetFences(device, 1, &swapchain->fences[image_index]);
599
if (result != VK_SUCCESS)
600
goto fail_present;
601
}
602
603
struct wsi_image *image =
604
swapchain->get_wsi_image(swapchain, image_index);
605
606
struct wsi_memory_signal_submit_info mem_signal = {
607
.sType = VK_STRUCTURE_TYPE_WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA,
608
.pNext = NULL,
609
.memory = image->memory,
610
};
611
612
VkSubmitInfo submit_info = {
613
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
614
.pNext = &mem_signal,
615
};
616
617
VkPipelineStageFlags *stage_flags = NULL;
618
if (i == 0) {
619
/* We only need/want to wait on semaphores once. After that, we're
620
* guaranteed ordering since it all happens on the same queue.
621
*/
622
submit_info.waitSemaphoreCount = pPresentInfo->waitSemaphoreCount;
623
submit_info.pWaitSemaphores = pPresentInfo->pWaitSemaphores;
624
625
/* Set up the pWaitDstStageMasks */
626
stage_flags = vk_alloc(&swapchain->alloc,
627
sizeof(VkPipelineStageFlags) *
628
pPresentInfo->waitSemaphoreCount,
629
8,
630
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
631
if (!stage_flags) {
632
result = VK_ERROR_OUT_OF_HOST_MEMORY;
633
goto fail_present;
634
}
635
for (uint32_t s = 0; s < pPresentInfo->waitSemaphoreCount; s++)
636
stage_flags[s] = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
637
638
submit_info.pWaitDstStageMask = stage_flags;
639
}
640
641
if (swapchain->use_prime_blit) {
642
/* If we are using prime blits, we need to perform the blit now. The
643
* command buffer is attached to the image.
644
*/
645
submit_info.commandBufferCount = 1;
646
submit_info.pCommandBuffers =
647
&image->prime.blit_cmd_buffers[queue_family_index];
648
mem_signal.memory = image->prime.memory;
649
}
650
651
result = wsi->QueueSubmit(queue, 1, &submit_info, swapchain->fences[image_index]);
652
vk_free(&swapchain->alloc, stage_flags);
653
if (result != VK_SUCCESS)
654
goto fail_present;
655
656
const VkPresentRegionKHR *region = NULL;
657
if (regions && regions->pRegions)
658
region = &regions->pRegions[i];
659
660
result = swapchain->queue_present(swapchain, image_index, region);
661
if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR)
662
goto fail_present;
663
664
if (wsi->set_memory_ownership) {
665
VkDeviceMemory mem = swapchain->get_wsi_image(swapchain, image_index)->memory;
666
wsi->set_memory_ownership(swapchain->device, mem, false);
667
}
668
669
fail_present:
670
if (pPresentInfo->pResults != NULL)
671
pPresentInfo->pResults[i] = result;
672
673
/* Let the final result be our first unsuccessful result */
674
if (final_result == VK_SUCCESS)
675
final_result = result;
676
}
677
678
return final_result;
679
}
680
681
uint64_t
682
wsi_common_get_current_time(void)
683
{
684
return os_time_get_nano();
685
}
686
687