Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/virtio/vulkan/vn_wsi.c
4560 views
1
/*
2
* Copyright 2019 Google LLC
3
* SPDX-License-Identifier: MIT
4
*
5
* based in part on anv and radv which are:
6
* Copyright © 2015 Intel Corporation
7
* Copyright © 2016 Red Hat.
8
* Copyright © 2016 Bas Nieuwenhuizen
9
*/
10
11
#include "vn_wsi.h"
12
13
#include "vn_device.h"
14
#include "vn_image.h"
15
#include "vn_queue.h"
16
17
/* The common WSI support makes some assumptions about the driver.
18
*
19
* In wsi_device_init, it assumes VK_EXT_pci_bus_info is available. In
20
* wsi_create_native_image and wsi_create_prime_image, it assumes
21
* VK_KHR_external_memory_fd and VK_EXT_external_memory_dma_buf are enabled.
22
*
23
* In wsi_create_native_image, if wsi_device::supports_modifiers is set and
24
* the window system supports modifiers, it assumes
25
* VK_EXT_image_drm_format_modifier is enabled. Otherwise, it assumes that
26
* wsi_image_create_info can be chained to VkImageCreateInfo and
27
* vkGetImageSubresourceLayout can be called even the tiling is
28
* VK_IMAGE_TILING_OPTIMAL.
29
*
30
* Together, it knows how to share dma-bufs, with explicit or implicit
31
* modifiers, to the window system.
32
*
33
* For venus, we use explicit modifiers when the renderer and the window
34
* system support them. Otherwise, we have to fall back to
35
* VK_IMAGE_TILING_LINEAR (or trigger the prime blit path). But the fallback
36
* can be problematic when the memory is scanned out directly and special
37
* requirements (e.g., alignments) must be met.
38
*
39
* The common WSI support makes other assumptions about the driver to support
40
* implicit fencing. In wsi_create_native_image and wsi_create_prime_image,
41
* it assumes wsi_memory_allocate_info can be chained to VkMemoryAllocateInfo.
42
* In wsi_common_queue_present, it assumes wsi_memory_signal_submit_info can
43
* be chained to VkSubmitInfo. Finally, in wsi_common_acquire_next_image2, it
44
* calls wsi_device::signal_semaphore_for_memory, and
45
* wsi_device::signal_fence_for_memory if the driver provides them.
46
*
47
* Some drivers use wsi_memory_allocate_info to set up implicit fencing.
48
* Others use wsi_memory_signal_submit_info to set up implicit IN-fences and
49
* use wsi_device::signal_*_for_memory to set up implicit OUT-fences.
50
*
51
* For venus, implicit fencing is broken (and there is no explicit fencing
52
* support yet). The kernel driver assumes everything is in the same fence
53
* context and no synchronization is needed. It should be fixed for
54
* correctness, but it is still not ideal. venus requires explicit fencing
55
* (and renderer-side synchronization) to work well.
56
*/
57
58
static PFN_vkVoidFunction
59
vn_wsi_proc_addr(VkPhysicalDevice physicalDevice, const char *pName)
60
{
61
struct vn_physical_device *physical_dev =
62
vn_physical_device_from_handle(physicalDevice);
63
return vk_instance_get_proc_addr_unchecked(
64
&physical_dev->instance->base.base, pName);
65
}
66
67
VkResult
68
vn_wsi_init(struct vn_physical_device *physical_dev)
69
{
70
const VkAllocationCallbacks *alloc =
71
&physical_dev->instance->base.base.alloc;
72
VkResult result = wsi_device_init(
73
&physical_dev->wsi_device, vn_physical_device_to_handle(physical_dev),
74
vn_wsi_proc_addr, alloc, -1, &physical_dev->instance->dri_options,
75
false);
76
if (result != VK_SUCCESS)
77
return result;
78
79
if (physical_dev->base.base.supported_extensions
80
.EXT_image_drm_format_modifier)
81
physical_dev->wsi_device.supports_modifiers = true;
82
83
return VK_SUCCESS;
84
}
85
86
void
87
vn_wsi_fini(struct vn_physical_device *physical_dev)
88
{
89
const VkAllocationCallbacks *alloc =
90
&physical_dev->instance->base.base.alloc;
91
wsi_device_finish(&physical_dev->wsi_device, alloc);
92
}
93
94
VkResult
95
vn_wsi_create_image(struct vn_device *dev,
96
const VkImageCreateInfo *create_info,
97
const struct wsi_image_create_info *wsi_info,
98
const VkAllocationCallbacks *alloc,
99
struct vn_image **out_img)
100
{
101
/* TODO This is the legacy path used by wsi_create_native_image when there
102
* is no modifier support. Instead of forcing VK_IMAGE_TILING_LINEAR, we
103
* should ask wsi to use wsi_create_prime_image instead.
104
*
105
* In fact, this is not enough when the image is truely used for scanout by
106
* the host compositor. There can be requirements we fail to meet. We
107
* should require modifier support at some point.
108
*/
109
VkImageCreateInfo local_create_info;
110
if (wsi_info->scanout) {
111
local_create_info = *create_info;
112
local_create_info.tiling = VK_IMAGE_TILING_LINEAR;
113
create_info = &local_create_info;
114
115
if (VN_DEBUG(WSI))
116
vn_log(dev->instance, "forcing scanout image linear");
117
}
118
119
struct vn_image *img;
120
VkResult result = vn_image_create(dev, create_info, alloc, &img);
121
if (result != VK_SUCCESS)
122
return result;
123
124
img->is_wsi = true;
125
img->prime_blit_buffer = wsi_info->prime_blit_buffer;
126
127
*out_img = img;
128
return VK_SUCCESS;
129
}
130
131
/* surface commands */
132
133
void
134
vn_DestroySurfaceKHR(VkInstance _instance,
135
VkSurfaceKHR surface,
136
const VkAllocationCallbacks *pAllocator)
137
{
138
struct vn_instance *instance = vn_instance_from_handle(_instance);
139
ICD_FROM_HANDLE(VkIcdSurfaceBase, surf, surface);
140
const VkAllocationCallbacks *alloc =
141
pAllocator ? pAllocator : &instance->base.base.alloc;
142
143
vk_free(alloc, surf);
144
}
145
146
VkResult
147
vn_GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,
148
uint32_t queueFamilyIndex,
149
VkSurfaceKHR surface,
150
VkBool32 *pSupported)
151
{
152
struct vn_physical_device *physical_dev =
153
vn_physical_device_from_handle(physicalDevice);
154
155
VkResult result = wsi_common_get_surface_support(
156
&physical_dev->wsi_device, queueFamilyIndex, surface, pSupported);
157
158
return vn_result(physical_dev->instance, result);
159
}
160
161
VkResult
162
vn_GetPhysicalDeviceSurfaceCapabilitiesKHR(
163
VkPhysicalDevice physicalDevice,
164
VkSurfaceKHR surface,
165
VkSurfaceCapabilitiesKHR *pSurfaceCapabilities)
166
{
167
struct vn_physical_device *physical_dev =
168
vn_physical_device_from_handle(physicalDevice);
169
170
VkResult result = wsi_common_get_surface_capabilities(
171
&physical_dev->wsi_device, surface, pSurfaceCapabilities);
172
173
return vn_result(physical_dev->instance, result);
174
}
175
176
VkResult
177
vn_GetPhysicalDeviceSurfaceCapabilities2KHR(
178
VkPhysicalDevice physicalDevice,
179
const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
180
VkSurfaceCapabilities2KHR *pSurfaceCapabilities)
181
{
182
struct vn_physical_device *physical_dev =
183
vn_physical_device_from_handle(physicalDevice);
184
185
VkResult result = wsi_common_get_surface_capabilities2(
186
&physical_dev->wsi_device, pSurfaceInfo, pSurfaceCapabilities);
187
188
return vn_result(physical_dev->instance, result);
189
}
190
191
VkResult
192
vn_GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,
193
VkSurfaceKHR surface,
194
uint32_t *pSurfaceFormatCount,
195
VkSurfaceFormatKHR *pSurfaceFormats)
196
{
197
struct vn_physical_device *physical_dev =
198
vn_physical_device_from_handle(physicalDevice);
199
200
VkResult result =
201
wsi_common_get_surface_formats(&physical_dev->wsi_device, surface,
202
pSurfaceFormatCount, pSurfaceFormats);
203
204
return vn_result(physical_dev->instance, result);
205
}
206
207
VkResult
208
vn_GetPhysicalDeviceSurfaceFormats2KHR(
209
VkPhysicalDevice physicalDevice,
210
const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
211
uint32_t *pSurfaceFormatCount,
212
VkSurfaceFormat2KHR *pSurfaceFormats)
213
{
214
struct vn_physical_device *physical_dev =
215
vn_physical_device_from_handle(physicalDevice);
216
217
VkResult result =
218
wsi_common_get_surface_formats2(&physical_dev->wsi_device, pSurfaceInfo,
219
pSurfaceFormatCount, pSurfaceFormats);
220
221
return vn_result(physical_dev->instance, result);
222
}
223
224
VkResult
225
vn_GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice,
226
VkSurfaceKHR surface,
227
uint32_t *pPresentModeCount,
228
VkPresentModeKHR *pPresentModes)
229
{
230
struct vn_physical_device *physical_dev =
231
vn_physical_device_from_handle(physicalDevice);
232
233
VkResult result = wsi_common_get_surface_present_modes(
234
&physical_dev->wsi_device, surface, pPresentModeCount, pPresentModes);
235
236
return vn_result(physical_dev->instance, result);
237
}
238
239
VkResult
240
vn_GetDeviceGroupPresentCapabilitiesKHR(
241
VkDevice device, VkDeviceGroupPresentCapabilitiesKHR *pCapabilities)
242
{
243
memset(pCapabilities->presentMask, 0, sizeof(pCapabilities->presentMask));
244
pCapabilities->presentMask[0] = 0x1;
245
pCapabilities->modes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
246
247
return VK_SUCCESS;
248
}
249
250
VkResult
251
vn_GetDeviceGroupSurfacePresentModesKHR(
252
VkDevice device,
253
VkSurfaceKHR surface,
254
VkDeviceGroupPresentModeFlagsKHR *pModes)
255
{
256
*pModes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
257
258
return VK_SUCCESS;
259
}
260
261
VkResult
262
vn_GetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice,
263
VkSurfaceKHR surface,
264
uint32_t *pRectCount,
265
VkRect2D *pRects)
266
{
267
struct vn_physical_device *physical_dev =
268
vn_physical_device_from_handle(physicalDevice);
269
270
VkResult result = wsi_common_get_present_rectangles(
271
&physical_dev->wsi_device, surface, pRectCount, pRects);
272
273
return vn_result(physical_dev->instance, result);
274
}
275
276
/* swapchain commands */
277
278
VkResult
279
vn_CreateSwapchainKHR(VkDevice device,
280
const VkSwapchainCreateInfoKHR *pCreateInfo,
281
const VkAllocationCallbacks *pAllocator,
282
VkSwapchainKHR *pSwapchain)
283
{
284
struct vn_device *dev = vn_device_from_handle(device);
285
const VkAllocationCallbacks *alloc =
286
pAllocator ? pAllocator : &dev->base.base.alloc;
287
288
VkResult result =
289
wsi_common_create_swapchain(&dev->physical_device->wsi_device, device,
290
pCreateInfo, alloc, pSwapchain);
291
292
return vn_result(dev->instance, result);
293
}
294
295
void
296
vn_DestroySwapchainKHR(VkDevice device,
297
VkSwapchainKHR swapchain,
298
const VkAllocationCallbacks *pAllocator)
299
{
300
struct vn_device *dev = vn_device_from_handle(device);
301
const VkAllocationCallbacks *alloc =
302
pAllocator ? pAllocator : &dev->base.base.alloc;
303
304
wsi_common_destroy_swapchain(device, swapchain, alloc);
305
}
306
307
VkResult
308
vn_GetSwapchainImagesKHR(VkDevice device,
309
VkSwapchainKHR swapchain,
310
uint32_t *pSwapchainImageCount,
311
VkImage *pSwapchainImages)
312
{
313
struct vn_device *dev = vn_device_from_handle(device);
314
315
VkResult result = wsi_common_get_images(swapchain, pSwapchainImageCount,
316
pSwapchainImages);
317
318
return vn_result(dev->instance, result);
319
}
320
321
VkResult
322
vn_AcquireNextImageKHR(VkDevice device,
323
VkSwapchainKHR swapchain,
324
uint64_t timeout,
325
VkSemaphore semaphore,
326
VkFence fence,
327
uint32_t *pImageIndex)
328
{
329
const VkAcquireNextImageInfoKHR acquire_info = {
330
.sType = VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR,
331
.swapchain = swapchain,
332
.timeout = timeout,
333
.semaphore = semaphore,
334
.fence = fence,
335
.deviceMask = 0x1,
336
};
337
338
return vn_AcquireNextImage2KHR(device, &acquire_info, pImageIndex);
339
}
340
341
VkResult
342
vn_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
343
{
344
struct vn_queue *queue = vn_queue_from_handle(_queue);
345
346
VkResult result =
347
wsi_common_queue_present(&queue->device->physical_device->wsi_device,
348
vn_device_to_handle(queue->device), _queue,
349
queue->family, pPresentInfo);
350
351
return vn_result(queue->device->instance, result);
352
}
353
354
VkResult
355
vn_AcquireNextImage2KHR(VkDevice device,
356
const VkAcquireNextImageInfoKHR *pAcquireInfo,
357
uint32_t *pImageIndex)
358
{
359
struct vn_device *dev = vn_device_from_handle(device);
360
361
VkResult result = wsi_common_acquire_next_image2(
362
&dev->physical_device->wsi_device, device, pAcquireInfo, pImageIndex);
363
364
/* XXX this relies on implicit sync */
365
if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
366
struct vn_semaphore *sem =
367
vn_semaphore_from_handle(pAcquireInfo->semaphore);
368
if (sem)
369
vn_semaphore_signal_wsi(dev, sem);
370
371
struct vn_fence *fence = vn_fence_from_handle(pAcquireInfo->fence);
372
if (fence)
373
vn_fence_signal_wsi(dev, fence);
374
}
375
376
return vn_result(dev->instance, result);
377
}
378
379