Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/virtio/vulkan/vn_pipeline.c
4560 views
1
/*
2
* Copyright 2019 Google LLC
3
* SPDX-License-Identifier: MIT
4
*
5
* based in part on anv and radv which are:
6
* Copyright © 2015 Intel Corporation
7
* Copyright © 2016 Red Hat.
8
* Copyright © 2016 Bas Nieuwenhuizen
9
*/
10
11
#include "vn_pipeline.h"
12
13
#include "venus-protocol/vn_protocol_driver_pipeline.h"
14
#include "venus-protocol/vn_protocol_driver_pipeline_cache.h"
15
#include "venus-protocol/vn_protocol_driver_pipeline_layout.h"
16
#include "venus-protocol/vn_protocol_driver_shader_module.h"
17
18
#include "vn_device.h"
19
20
/* shader module commands */
21
22
VkResult
23
vn_CreateShaderModule(VkDevice device,
24
const VkShaderModuleCreateInfo *pCreateInfo,
25
const VkAllocationCallbacks *pAllocator,
26
VkShaderModule *pShaderModule)
27
{
28
struct vn_device *dev = vn_device_from_handle(device);
29
const VkAllocationCallbacks *alloc =
30
pAllocator ? pAllocator : &dev->base.base.alloc;
31
32
struct vn_shader_module *mod =
33
vk_zalloc(alloc, sizeof(*mod), VN_DEFAULT_ALIGN,
34
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
35
if (!mod)
36
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
37
38
vn_object_base_init(&mod->base, VK_OBJECT_TYPE_SHADER_MODULE, &dev->base);
39
40
VkShaderModule mod_handle = vn_shader_module_to_handle(mod);
41
vn_async_vkCreateShaderModule(dev->instance, device, pCreateInfo, NULL,
42
&mod_handle);
43
44
*pShaderModule = mod_handle;
45
46
return VK_SUCCESS;
47
}
48
49
void
50
vn_DestroyShaderModule(VkDevice device,
51
VkShaderModule shaderModule,
52
const VkAllocationCallbacks *pAllocator)
53
{
54
struct vn_device *dev = vn_device_from_handle(device);
55
struct vn_shader_module *mod = vn_shader_module_from_handle(shaderModule);
56
const VkAllocationCallbacks *alloc =
57
pAllocator ? pAllocator : &dev->base.base.alloc;
58
59
if (!mod)
60
return;
61
62
vn_async_vkDestroyShaderModule(dev->instance, device, shaderModule, NULL);
63
64
vn_object_base_fini(&mod->base);
65
vk_free(alloc, mod);
66
}
67
68
/* pipeline layout commands */
69
70
VkResult
71
vn_CreatePipelineLayout(VkDevice device,
72
const VkPipelineLayoutCreateInfo *pCreateInfo,
73
const VkAllocationCallbacks *pAllocator,
74
VkPipelineLayout *pPipelineLayout)
75
{
76
struct vn_device *dev = vn_device_from_handle(device);
77
const VkAllocationCallbacks *alloc =
78
pAllocator ? pAllocator : &dev->base.base.alloc;
79
80
struct vn_pipeline_layout *layout =
81
vk_zalloc(alloc, sizeof(*layout), VN_DEFAULT_ALIGN,
82
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
83
if (!layout)
84
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
85
86
vn_object_base_init(&layout->base, VK_OBJECT_TYPE_PIPELINE_LAYOUT,
87
&dev->base);
88
89
VkPipelineLayout layout_handle = vn_pipeline_layout_to_handle(layout);
90
vn_async_vkCreatePipelineLayout(dev->instance, device, pCreateInfo, NULL,
91
&layout_handle);
92
93
*pPipelineLayout = layout_handle;
94
95
return VK_SUCCESS;
96
}
97
98
void
99
vn_DestroyPipelineLayout(VkDevice device,
100
VkPipelineLayout pipelineLayout,
101
const VkAllocationCallbacks *pAllocator)
102
{
103
struct vn_device *dev = vn_device_from_handle(device);
104
struct vn_pipeline_layout *layout =
105
vn_pipeline_layout_from_handle(pipelineLayout);
106
const VkAllocationCallbacks *alloc =
107
pAllocator ? pAllocator : &dev->base.base.alloc;
108
109
if (!layout)
110
return;
111
112
vn_async_vkDestroyPipelineLayout(dev->instance, device, pipelineLayout,
113
NULL);
114
115
vn_object_base_fini(&layout->base);
116
vk_free(alloc, layout);
117
}
118
119
/* pipeline cache commands */
120
121
VkResult
122
vn_CreatePipelineCache(VkDevice device,
123
const VkPipelineCacheCreateInfo *pCreateInfo,
124
const VkAllocationCallbacks *pAllocator,
125
VkPipelineCache *pPipelineCache)
126
{
127
struct vn_device *dev = vn_device_from_handle(device);
128
const VkAllocationCallbacks *alloc =
129
pAllocator ? pAllocator : &dev->base.base.alloc;
130
131
struct vn_pipeline_cache *cache =
132
vk_zalloc(alloc, sizeof(*cache), VN_DEFAULT_ALIGN,
133
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
134
if (!cache)
135
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
136
137
vn_object_base_init(&cache->base, VK_OBJECT_TYPE_PIPELINE_CACHE,
138
&dev->base);
139
140
VkPipelineCacheCreateInfo local_create_info;
141
if (pCreateInfo->initialDataSize) {
142
local_create_info = *pCreateInfo;
143
local_create_info.pInitialData +=
144
sizeof(struct vk_pipeline_cache_header);
145
pCreateInfo = &local_create_info;
146
}
147
148
VkPipelineCache cache_handle = vn_pipeline_cache_to_handle(cache);
149
vn_async_vkCreatePipelineCache(dev->instance, device, pCreateInfo, NULL,
150
&cache_handle);
151
152
*pPipelineCache = cache_handle;
153
154
return VK_SUCCESS;
155
}
156
157
void
158
vn_DestroyPipelineCache(VkDevice device,
159
VkPipelineCache pipelineCache,
160
const VkAllocationCallbacks *pAllocator)
161
{
162
struct vn_device *dev = vn_device_from_handle(device);
163
struct vn_pipeline_cache *cache =
164
vn_pipeline_cache_from_handle(pipelineCache);
165
const VkAllocationCallbacks *alloc =
166
pAllocator ? pAllocator : &dev->base.base.alloc;
167
168
if (!cache)
169
return;
170
171
vn_async_vkDestroyPipelineCache(dev->instance, device, pipelineCache,
172
NULL);
173
174
vn_object_base_fini(&cache->base);
175
vk_free(alloc, cache);
176
}
177
178
VkResult
179
vn_GetPipelineCacheData(VkDevice device,
180
VkPipelineCache pipelineCache,
181
size_t *pDataSize,
182
void *pData)
183
{
184
struct vn_device *dev = vn_device_from_handle(device);
185
struct vn_physical_device *physical_dev = dev->physical_device;
186
187
struct vk_pipeline_cache_header *header = pData;
188
VkResult result;
189
if (!pData) {
190
result = vn_call_vkGetPipelineCacheData(dev->instance, device,
191
pipelineCache, pDataSize, NULL);
192
if (result != VK_SUCCESS)
193
return vn_error(dev->instance, result);
194
195
*pDataSize += sizeof(*header);
196
return VK_SUCCESS;
197
}
198
199
if (*pDataSize <= sizeof(*header)) {
200
*pDataSize = 0;
201
return VK_INCOMPLETE;
202
}
203
204
const VkPhysicalDeviceProperties *props =
205
&physical_dev->properties.properties;
206
header->header_size = sizeof(*header);
207
header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
208
header->vendor_id = props->vendorID;
209
header->device_id = props->deviceID;
210
memcpy(header->uuid, props->pipelineCacheUUID, VK_UUID_SIZE);
211
212
*pDataSize -= header->header_size;
213
result =
214
vn_call_vkGetPipelineCacheData(dev->instance, device, pipelineCache,
215
pDataSize, pData + header->header_size);
216
if (result < VK_SUCCESS)
217
return vn_error(dev->instance, result);
218
219
*pDataSize += header->header_size;
220
221
return result;
222
}
223
224
VkResult
225
vn_MergePipelineCaches(VkDevice device,
226
VkPipelineCache dstCache,
227
uint32_t srcCacheCount,
228
const VkPipelineCache *pSrcCaches)
229
{
230
struct vn_device *dev = vn_device_from_handle(device);
231
232
vn_async_vkMergePipelineCaches(dev->instance, device, dstCache,
233
srcCacheCount, pSrcCaches);
234
235
return VK_SUCCESS;
236
}
237
238
/* pipeline commands */
239
240
VkResult
241
vn_CreateGraphicsPipelines(VkDevice device,
242
VkPipelineCache pipelineCache,
243
uint32_t createInfoCount,
244
const VkGraphicsPipelineCreateInfo *pCreateInfos,
245
const VkAllocationCallbacks *pAllocator,
246
VkPipeline *pPipelines)
247
{
248
struct vn_device *dev = vn_device_from_handle(device);
249
const VkAllocationCallbacks *alloc =
250
pAllocator ? pAllocator : &dev->base.base.alloc;
251
252
for (uint32_t i = 0; i < createInfoCount; i++) {
253
struct vn_pipeline *pipeline =
254
vk_zalloc(alloc, sizeof(*pipeline), VN_DEFAULT_ALIGN,
255
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
256
if (!pipeline) {
257
for (uint32_t j = 0; j < i; j++)
258
vk_free(alloc, vn_pipeline_from_handle(pPipelines[j]));
259
memset(pPipelines, 0, sizeof(*pPipelines) * createInfoCount);
260
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
261
}
262
263
vn_object_base_init(&pipeline->base, VK_OBJECT_TYPE_PIPELINE,
264
&dev->base);
265
266
VkPipeline pipeline_handle = vn_pipeline_to_handle(pipeline);
267
pPipelines[i] = pipeline_handle;
268
}
269
270
vn_async_vkCreateGraphicsPipelines(dev->instance, device, pipelineCache,
271
createInfoCount, pCreateInfos, NULL,
272
pPipelines);
273
274
return VK_SUCCESS;
275
}
276
277
VkResult
278
vn_CreateComputePipelines(VkDevice device,
279
VkPipelineCache pipelineCache,
280
uint32_t createInfoCount,
281
const VkComputePipelineCreateInfo *pCreateInfos,
282
const VkAllocationCallbacks *pAllocator,
283
VkPipeline *pPipelines)
284
{
285
struct vn_device *dev = vn_device_from_handle(device);
286
const VkAllocationCallbacks *alloc =
287
pAllocator ? pAllocator : &dev->base.base.alloc;
288
289
for (uint32_t i = 0; i < createInfoCount; i++) {
290
struct vn_pipeline *pipeline =
291
vk_zalloc(alloc, sizeof(*pipeline), VN_DEFAULT_ALIGN,
292
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
293
if (!pipeline) {
294
for (uint32_t j = 0; j < i; j++)
295
vk_free(alloc, vn_pipeline_from_handle(pPipelines[j]));
296
memset(pPipelines, 0, sizeof(*pPipelines) * createInfoCount);
297
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
298
}
299
300
vn_object_base_init(&pipeline->base, VK_OBJECT_TYPE_PIPELINE,
301
&dev->base);
302
303
VkPipeline pipeline_handle = vn_pipeline_to_handle(pipeline);
304
pPipelines[i] = pipeline_handle;
305
}
306
307
vn_async_vkCreateComputePipelines(dev->instance, device, pipelineCache,
308
createInfoCount, pCreateInfos, NULL,
309
pPipelines);
310
311
return VK_SUCCESS;
312
}
313
314
void
315
vn_DestroyPipeline(VkDevice device,
316
VkPipeline _pipeline,
317
const VkAllocationCallbacks *pAllocator)
318
{
319
struct vn_device *dev = vn_device_from_handle(device);
320
struct vn_pipeline *pipeline = vn_pipeline_from_handle(_pipeline);
321
const VkAllocationCallbacks *alloc =
322
pAllocator ? pAllocator : &dev->base.base.alloc;
323
324
if (!pipeline)
325
return;
326
327
vn_async_vkDestroyPipeline(dev->instance, device, _pipeline, NULL);
328
329
vn_object_base_fini(&pipeline->base);
330
vk_free(alloc, pipeline);
331
}
332
333