Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/zink/zink_resource.c
4570 views
1
/*
2
* Copyright 2018 Collabora Ltd.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* on the rights to use, copy, modify, merge, publish, distribute, sub
8
* license, and/or sell copies of the Software, and to permit persons to whom
9
* the Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21
* USE OR OTHER DEALINGS IN THE SOFTWARE.
22
*/
23
24
#include "zink_resource.h"
25
26
#include "zink_batch.h"
27
#include "zink_context.h"
28
#include "zink_fence.h"
29
#include "zink_program.h"
30
#include "zink_screen.h"
31
32
#ifdef VK_USE_PLATFORM_METAL_EXT
33
#include "QuartzCore/CAMetalLayer.h"
34
#endif
35
#include "vulkan/wsi/wsi_common.h"
36
37
#include "util/slab.h"
38
#include "util/u_blitter.h"
39
#include "util/u_debug.h"
40
#include "util/format/u_format.h"
41
#include "util/u_transfer_helper.h"
42
#include "util/u_inlines.h"
43
#include "util/u_memory.h"
44
#include "util/u_upload_mgr.h"
45
46
#include "frontend/sw_winsys.h"
47
48
#ifndef _WIN32
49
#define ZINK_USE_DMABUF
50
#endif
51
52
#ifdef ZINK_USE_DMABUF
53
#include "drm-uapi/drm_fourcc.h"
54
#else
55
/* these won't actually be used */
56
#define DRM_FORMAT_MOD_INVALID 0
57
#define DRM_FORMAT_MOD_LINEAR 0
58
#endif
59
60
static void
61
zink_transfer_flush_region(struct pipe_context *pctx,
62
struct pipe_transfer *ptrans,
63
const struct pipe_box *box);
64
static void *
65
zink_transfer_map(struct pipe_context *pctx,
66
struct pipe_resource *pres,
67
unsigned level,
68
unsigned usage,
69
const struct pipe_box *box,
70
struct pipe_transfer **transfer);
71
static void
72
zink_transfer_unmap(struct pipe_context *pctx,
73
struct pipe_transfer *ptrans);
74
75
void
76
debug_describe_zink_resource_object(char *buf, const struct zink_resource_object *ptr)
77
{
78
sprintf(buf, "zink_resource_object");
79
}
80
81
static uint32_t
82
get_resource_usage(struct zink_resource *res)
83
{
84
bool reads = zink_batch_usage_exists(res->obj->reads);
85
bool writes = zink_batch_usage_exists(res->obj->writes);
86
uint32_t batch_uses = 0;
87
if (reads)
88
batch_uses |= ZINK_RESOURCE_ACCESS_READ;
89
if (writes)
90
batch_uses |= ZINK_RESOURCE_ACCESS_WRITE;
91
return batch_uses;
92
}
93
94
static uint32_t
95
mem_hash(const void *key)
96
{
97
const struct mem_key *mkey = key;
98
return _mesa_hash_data(&mkey->key, sizeof(mkey->key));
99
}
100
101
static bool
102
mem_equals(const void *a, const void *b)
103
{
104
const struct mem_key *ma = a;
105
const struct mem_key *mb = b;
106
return !memcmp(&ma->key, &mb->key, sizeof(ma->key));
107
}
108
109
static void
110
cache_or_free_mem(struct zink_screen *screen, struct zink_resource_object *obj)
111
{
112
if (obj->mkey.key.heap_index != UINT32_MAX) {
113
simple_mtx_lock(&screen->mem_cache_mtx);
114
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(screen->resource_mem_cache, obj->mem_hash, &obj->mkey);
115
assert(he);
116
struct util_dynarray *array = he->data;
117
struct mem_key *mkey = (void*)he->key;
118
119
unsigned seen = mkey->seen_count;
120
mkey->seen_count--;
121
if (util_dynarray_num_elements(array, struct mem_cache_entry) < seen) {
122
struct mem_cache_entry mc = { obj->mem, obj->map };
123
screen->mem_cache_size += obj->size;
124
if (sizeof(void*) == 4 && obj->map) {
125
vkUnmapMemory(screen->dev, obj->mem);
126
mc.map = NULL;
127
}
128
util_dynarray_append(array, struct mem_cache_entry, mc);
129
simple_mtx_unlock(&screen->mem_cache_mtx);
130
return;
131
}
132
simple_mtx_unlock(&screen->mem_cache_mtx);
133
}
134
vkFreeMemory(screen->dev, obj->mem, NULL);
135
}
136
137
void
138
zink_destroy_resource_object(struct zink_screen *screen, struct zink_resource_object *obj)
139
{
140
if (obj->is_buffer) {
141
if (obj->sbuffer)
142
vkDestroyBuffer(screen->dev, obj->sbuffer, NULL);
143
vkDestroyBuffer(screen->dev, obj->buffer, NULL);
144
} else {
145
vkDestroyImage(screen->dev, obj->image, NULL);
146
}
147
148
zink_descriptor_set_refs_clear(&obj->desc_set_refs, obj);
149
cache_or_free_mem(screen, obj);
150
FREE(obj);
151
}
152
153
static void
154
zink_resource_destroy(struct pipe_screen *pscreen,
155
struct pipe_resource *pres)
156
{
157
struct zink_screen *screen = zink_screen(pscreen);
158
struct zink_resource *res = zink_resource(pres);
159
if (pres->target == PIPE_BUFFER) {
160
util_range_destroy(&res->valid_buffer_range);
161
util_idalloc_mt_free(&screen->buffer_ids, res->base.buffer_id_unique);
162
}
163
164
zink_resource_object_reference(screen, &res->obj, NULL);
165
zink_resource_object_reference(screen, &res->scanout_obj, NULL);
166
threaded_resource_deinit(pres);
167
FREE(res);
168
}
169
170
static uint32_t
171
get_memory_type_index(struct zink_screen *screen,
172
const VkMemoryRequirements *reqs,
173
VkMemoryPropertyFlags props)
174
{
175
int32_t idx = -1;
176
for (uint32_t i = 0u; i < VK_MAX_MEMORY_TYPES; i++) {
177
if (((reqs->memoryTypeBits >> i) & 1) == 1) {
178
if ((screen->info.mem_props.memoryTypes[i].propertyFlags & props) == props) {
179
if (!(props & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) &&
180
screen->info.mem_props.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) {
181
idx = i;
182
} else
183
return i;
184
}
185
}
186
}
187
if (idx >= 0)
188
return idx;
189
190
if (props & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) {
191
/* if no suitable cached memory can be found, fall back
192
* to non-cached memory instead.
193
*/
194
return get_memory_type_index(screen, reqs,
195
props & ~VK_MEMORY_PROPERTY_HOST_CACHED_BIT);
196
}
197
198
unreachable("Unsupported memory-type");
199
return 0;
200
}
201
202
static VkImageAspectFlags
203
aspect_from_format(enum pipe_format fmt)
204
{
205
if (util_format_is_depth_or_stencil(fmt)) {
206
VkImageAspectFlags aspect = 0;
207
const struct util_format_description *desc = util_format_description(fmt);
208
if (util_format_has_depth(desc))
209
aspect |= VK_IMAGE_ASPECT_DEPTH_BIT;
210
if (util_format_has_stencil(desc))
211
aspect |= VK_IMAGE_ASPECT_STENCIL_BIT;
212
return aspect;
213
} else
214
return VK_IMAGE_ASPECT_COLOR_BIT;
215
}
216
217
static VkBufferCreateInfo
218
create_bci(struct zink_screen *screen, const struct pipe_resource *templ, unsigned bind)
219
{
220
VkBufferCreateInfo bci = {0};
221
bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
222
bci.size = templ->width0;
223
assert(bci.size > 0);
224
225
bci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
226
VK_BUFFER_USAGE_TRANSFER_DST_BIT |
227
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
228
229
bci.usage |= VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT |
230
VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT |
231
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT |
232
VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
233
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
234
VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT |
235
VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT;
236
237
if (bind & PIPE_BIND_SHADER_IMAGE)
238
bci.usage |= VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
239
240
if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)
241
bci.flags |= VK_BUFFER_CREATE_SPARSE_BINDING_BIT;
242
return bci;
243
}
244
245
static bool
246
check_ici(struct zink_screen *screen, VkImageCreateInfo *ici, uint64_t modifier)
247
{
248
VkImageFormatProperties image_props;
249
VkResult ret;
250
assert(modifier == DRM_FORMAT_MOD_INVALID ||
251
(screen->vk.GetPhysicalDeviceImageFormatProperties2 && screen->info.have_EXT_image_drm_format_modifier));
252
if (screen->vk.GetPhysicalDeviceImageFormatProperties2) {
253
VkImageFormatProperties2 props2 = {0};
254
props2.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
255
VkPhysicalDeviceImageFormatInfo2 info = {0};
256
info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
257
info.format = ici->format;
258
info.type = ici->imageType;
259
info.tiling = ici->tiling;
260
info.usage = ici->usage;
261
info.flags = ici->flags;
262
263
VkPhysicalDeviceImageDrmFormatModifierInfoEXT mod_info;
264
if (modifier != DRM_FORMAT_MOD_INVALID) {
265
mod_info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT;
266
mod_info.pNext = NULL;
267
mod_info.drmFormatModifier = modifier;
268
mod_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
269
mod_info.queueFamilyIndexCount = 0;
270
info.pNext = &mod_info;
271
}
272
ret = screen->vk.GetPhysicalDeviceImageFormatProperties2(screen->pdev, &info, &props2);
273
image_props = props2.imageFormatProperties;
274
} else
275
ret = vkGetPhysicalDeviceImageFormatProperties(screen->pdev, ici->format, ici->imageType,
276
ici->tiling, ici->usage, ici->flags, &image_props);
277
return ret == VK_SUCCESS;
278
}
279
280
static VkImageUsageFlags
281
get_image_usage_for_feats(struct zink_screen *screen, VkFormatFeatureFlags feats, const struct pipe_resource *templ, unsigned bind)
282
{
283
VkImageUsageFlags usage = 0;
284
/* sadly, gallium doesn't let us know if it'll ever need this, so we have to assume */
285
if (feats & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT)
286
usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
287
if (feats & VK_FORMAT_FEATURE_TRANSFER_DST_BIT)
288
usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
289
if (feats & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT && (bind & (PIPE_BIND_LINEAR | PIPE_BIND_SHARED)) != (PIPE_BIND_LINEAR | PIPE_BIND_SHARED))
290
usage |= VK_IMAGE_USAGE_SAMPLED_BIT;
291
292
if ((templ->nr_samples <= 1 || screen->info.feats.features.shaderStorageImageMultisample) &&
293
(bind & PIPE_BIND_SHADER_IMAGE)) {
294
if (feats & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)
295
usage |= VK_IMAGE_USAGE_STORAGE_BIT;
296
}
297
298
if (bind & PIPE_BIND_RENDER_TARGET) {
299
if (feats & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)
300
usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
301
else
302
return 0;
303
}
304
305
if (bind & PIPE_BIND_DEPTH_STENCIL) {
306
if (feats & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)
307
usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
308
else
309
return 0;
310
/* this is unlikely to occur and has been included for completeness */
311
} else if (bind & PIPE_BIND_SAMPLER_VIEW && !(usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
312
if (feats & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)
313
usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
314
else
315
return 0;
316
}
317
318
if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)
319
usage |= VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
320
321
if (bind & PIPE_BIND_STREAM_OUTPUT)
322
usage |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
323
return usage;
324
}
325
326
static VkFormatFeatureFlags
327
find_modifier_feats(const struct zink_modifier_prop *prop, uint64_t modifier, uint64_t *mod)
328
{
329
for (unsigned j = 0; j < prop->drmFormatModifierCount; j++) {
330
if (prop->pDrmFormatModifierProperties[j].drmFormatModifier == modifier) {
331
*mod = modifier;
332
return prop->pDrmFormatModifierProperties[j].drmFormatModifierTilingFeatures;
333
}
334
}
335
return 0;
336
}
337
338
static VkImageUsageFlags
339
get_image_usage(struct zink_screen *screen, VkImageCreateInfo *ici, const struct pipe_resource *templ, unsigned bind, unsigned modifiers_count, const uint64_t *modifiers, uint64_t *mod)
340
{
341
VkImageTiling tiling = ici->tiling;
342
*mod = DRM_FORMAT_MOD_INVALID;
343
if (modifiers_count) {
344
bool have_linear = false;
345
const struct zink_modifier_prop *prop = &screen->modifier_props[templ->format];
346
assert(tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT);
347
for (unsigned i = 0; i < modifiers_count; i++) {
348
if (modifiers[i] == DRM_FORMAT_MOD_LINEAR) {
349
have_linear = true;
350
continue;
351
}
352
VkFormatFeatureFlags feats = find_modifier_feats(prop, modifiers[i], mod);
353
if (feats) {
354
VkImageUsageFlags usage = get_image_usage_for_feats(screen, feats, templ, bind);
355
if (usage) {
356
ici->usage = usage;
357
if (check_ici(screen, ici, *mod))
358
return usage;
359
}
360
}
361
}
362
/* only try linear if no other options available */
363
if (have_linear) {
364
VkFormatFeatureFlags feats = find_modifier_feats(prop, DRM_FORMAT_MOD_LINEAR, mod);
365
if (feats) {
366
VkImageUsageFlags usage = get_image_usage_for_feats(screen, feats, templ, bind);
367
if (usage) {
368
ici->usage = usage;
369
if (check_ici(screen, ici, *mod))
370
return usage;
371
}
372
}
373
}
374
} else
375
{
376
VkFormatProperties props = screen->format_props[templ->format];
377
VkFormatFeatureFlags feats = tiling == VK_IMAGE_TILING_LINEAR ? props.linearTilingFeatures : props.optimalTilingFeatures;
378
VkImageUsageFlags usage = get_image_usage_for_feats(screen, feats, templ, bind);
379
if (usage) {
380
ici->usage = usage;
381
if (check_ici(screen, ici, *mod))
382
return usage;
383
}
384
}
385
*mod = DRM_FORMAT_MOD_INVALID;
386
return 0;
387
}
388
389
static uint64_t
390
create_ici(struct zink_screen *screen, VkImageCreateInfo *ici, const struct pipe_resource *templ, unsigned bind, unsigned modifiers_count, const uint64_t *modifiers, bool *success)
391
{
392
ici->sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
393
ici->flags = bind & (PIPE_BIND_SCANOUT | PIPE_BIND_DEPTH_STENCIL) ? 0 : VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
394
395
switch (templ->target) {
396
case PIPE_TEXTURE_1D:
397
case PIPE_TEXTURE_1D_ARRAY:
398
ici->imageType = VK_IMAGE_TYPE_1D;
399
break;
400
401
case PIPE_TEXTURE_CUBE:
402
case PIPE_TEXTURE_CUBE_ARRAY:
403
ici->flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
404
FALLTHROUGH;
405
case PIPE_TEXTURE_2D:
406
case PIPE_TEXTURE_2D_ARRAY:
407
case PIPE_TEXTURE_RECT:
408
ici->imageType = VK_IMAGE_TYPE_2D;
409
break;
410
411
case PIPE_TEXTURE_3D:
412
ici->imageType = VK_IMAGE_TYPE_3D;
413
if (bind & PIPE_BIND_RENDER_TARGET)
414
ici->flags |= VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT;
415
break;
416
417
case PIPE_BUFFER:
418
unreachable("PIPE_BUFFER should already be handled");
419
420
default:
421
unreachable("Unknown target");
422
}
423
424
if (screen->info.have_EXT_sample_locations &&
425
bind & PIPE_BIND_DEPTH_STENCIL &&
426
util_format_has_depth(util_format_description(templ->format)))
427
ici->flags |= VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT;
428
429
ici->format = zink_get_format(screen, templ->format);
430
ici->extent.width = templ->width0;
431
ici->extent.height = templ->height0;
432
ici->extent.depth = templ->depth0;
433
ici->mipLevels = templ->last_level + 1;
434
ici->arrayLayers = MAX2(templ->array_size, 1);
435
ici->samples = templ->nr_samples ? templ->nr_samples : VK_SAMPLE_COUNT_1_BIT;
436
ici->tiling = modifiers_count ? VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT : bind & PIPE_BIND_LINEAR ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
437
ici->sharingMode = VK_SHARING_MODE_EXCLUSIVE;
438
ici->initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
439
440
if (templ->target == PIPE_TEXTURE_CUBE ||
441
templ->target == PIPE_TEXTURE_CUBE_ARRAY ||
442
(templ->target == PIPE_TEXTURE_2D_ARRAY &&
443
ici->extent.width == ici->extent.height &&
444
ici->arrayLayers >= 6)) {
445
VkImageFormatProperties props;
446
if (vkGetPhysicalDeviceImageFormatProperties(screen->pdev, ici->format,
447
ici->imageType, ici->tiling,
448
ici->usage, ici->flags |
449
VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT,
450
&props) == VK_SUCCESS) {
451
if (props.sampleCounts & ici->samples)
452
ici->flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
453
}
454
}
455
456
if (templ->target == PIPE_TEXTURE_CUBE)
457
ici->arrayLayers *= 6;
458
459
if (templ->usage == PIPE_USAGE_STAGING &&
460
templ->format != PIPE_FORMAT_B4G4R4A4_UNORM &&
461
templ->format != PIPE_FORMAT_B4G4R4A4_UINT)
462
ici->tiling = VK_IMAGE_TILING_LINEAR;
463
464
bool first = true;
465
bool tried[2] = {0};
466
uint64_t mod = DRM_FORMAT_MOD_INVALID;
467
while (!ici->usage) {
468
if (!first) {
469
switch (ici->tiling) {
470
case VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT:
471
ici->tiling = VK_IMAGE_TILING_OPTIMAL;
472
modifiers_count = 0;
473
break;
474
case VK_IMAGE_TILING_OPTIMAL:
475
ici->tiling = VK_IMAGE_TILING_LINEAR;
476
break;
477
case VK_IMAGE_TILING_LINEAR:
478
if (bind & PIPE_BIND_LINEAR) {
479
*success = false;
480
return DRM_FORMAT_MOD_INVALID;
481
}
482
ici->tiling = VK_IMAGE_TILING_OPTIMAL;
483
break;
484
default:
485
unreachable("unhandled tiling mode");
486
}
487
if (tried[ici->tiling]) {
488
*success = false;
489
return DRM_FORMAT_MOD_INVALID;
490
}
491
}
492
ici->usage = get_image_usage(screen, ici, templ, bind, modifiers_count, modifiers, &mod);
493
first = false;
494
if (ici->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT)
495
tried[ici->tiling] = true;
496
}
497
498
*success = true;
499
return mod;
500
}
501
502
static struct zink_resource_object *
503
resource_object_create(struct zink_screen *screen, const struct pipe_resource *templ, struct winsys_handle *whandle, bool *optimal_tiling,
504
const uint64_t *modifiers, int modifiers_count)
505
{
506
struct zink_resource_object *obj = CALLOC_STRUCT(zink_resource_object);
507
if (!obj)
508
return NULL;
509
510
VkMemoryRequirements reqs = {0};
511
VkMemoryPropertyFlags flags;
512
/* TODO: remove linear for wsi */
513
bool scanout = (templ->bind & (PIPE_BIND_SCANOUT | PIPE_BIND_LINEAR)) == (PIPE_BIND_SCANOUT | PIPE_BIND_LINEAR);
514
bool shared = (templ->bind & (PIPE_BIND_SHARED | PIPE_BIND_LINEAR)) == (PIPE_BIND_SHARED | PIPE_BIND_LINEAR);
515
516
pipe_reference_init(&obj->reference, 1);
517
util_dynarray_init(&obj->desc_set_refs.refs, NULL);
518
if (templ->target == PIPE_BUFFER) {
519
VkBufferCreateInfo bci = create_bci(screen, templ, templ->bind);
520
521
if (vkCreateBuffer(screen->dev, &bci, NULL, &obj->buffer) != VK_SUCCESS) {
522
debug_printf("vkCreateBuffer failed\n");
523
goto fail1;
524
}
525
526
vkGetBufferMemoryRequirements(screen->dev, obj->buffer, &reqs);
527
if (templ->usage == PIPE_USAGE_STAGING)
528
flags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
529
else if (templ->usage == PIPE_USAGE_STREAM)
530
flags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
531
else if (templ->usage == PIPE_USAGE_IMMUTABLE)
532
flags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
533
else
534
flags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
535
obj->is_buffer = true;
536
obj->transfer_dst = true;
537
} else {
538
bool winsys_modifier = shared && whandle && whandle->modifier != DRM_FORMAT_MOD_INVALID;
539
const uint64_t *ici_modifiers = winsys_modifier ? &whandle->modifier : modifiers;
540
unsigned ici_modifier_count = winsys_modifier ? 1 : modifiers_count;
541
bool success = false;
542
VkImageCreateInfo ici = {0};
543
uint64_t mod = create_ici(screen, &ici, templ, templ->bind, ici_modifier_count, ici_modifiers, &success);
544
VkExternalMemoryImageCreateInfo emici = {0};
545
VkImageDrmFormatModifierExplicitCreateInfoEXT idfmeci = {0};
546
VkImageDrmFormatModifierListCreateInfoEXT idfmlci;
547
if (!success)
548
goto fail1;
549
550
if (shared) {
551
emici.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
552
emici.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
553
ici.pNext = &emici;
554
555
assert(ici.tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT || mod != DRM_FORMAT_MOD_INVALID);
556
if (winsys_modifier && ici.tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
557
assert(mod == whandle->modifier);
558
idfmeci.sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT;
559
idfmeci.pNext = ici.pNext;
560
idfmeci.drmFormatModifier = mod;
561
562
/* TODO: store these values from other planes in their
563
* respective zink_resource, and walk the next-pointers to
564
* build up the planar array here instead.
565
*/
566
assert(util_format_get_num_planes(templ->format) == 1);
567
idfmeci.drmFormatModifierPlaneCount = 1;
568
VkSubresourceLayout plane_layout = {
569
.offset = whandle->offset,
570
.size = 0,
571
.rowPitch = whandle->stride,
572
.arrayPitch = 0,
573
.depthPitch = 0,
574
};
575
idfmeci.pPlaneLayouts = &plane_layout;
576
577
ici.pNext = &idfmeci;
578
} else if (ici.tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
579
idfmlci.sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT;
580
idfmlci.pNext = ici.pNext;
581
idfmlci.drmFormatModifierCount = 1;
582
idfmlci.pDrmFormatModifiers = &mod;
583
ici.pNext = &idfmlci;
584
} else if (ici.tiling == VK_IMAGE_TILING_OPTIMAL) {
585
// TODO: remove for wsi
586
ici.pNext = NULL;
587
scanout = false;
588
shared = false;
589
}
590
}
591
592
if (optimal_tiling)
593
*optimal_tiling = ici.tiling == VK_IMAGE_TILING_OPTIMAL;
594
595
if (ici.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)
596
obj->transfer_dst = true;
597
598
if (ici.tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT)
599
obj->modifier_aspect = VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT;
600
601
struct wsi_image_create_info image_wsi_info = {
602
VK_STRUCTURE_TYPE_WSI_IMAGE_CREATE_INFO_MESA,
603
NULL,
604
.scanout = true,
605
};
606
607
if ((screen->needs_mesa_wsi || screen->needs_mesa_flush_wsi) && scanout) {
608
image_wsi_info.pNext = ici.pNext;
609
ici.pNext = &image_wsi_info;
610
}
611
612
VkResult result = vkCreateImage(screen->dev, &ici, NULL, &obj->image);
613
if (result != VK_SUCCESS) {
614
debug_printf("vkCreateImage failed\n");
615
goto fail1;
616
}
617
618
vkGetImageMemoryRequirements(screen->dev, obj->image, &reqs);
619
if (templ->usage == PIPE_USAGE_STAGING && ici.tiling == VK_IMAGE_TILING_LINEAR)
620
flags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
621
else
622
flags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
623
}
624
625
if (templ->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT || templ->usage == PIPE_USAGE_DYNAMIC)
626
flags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
627
else if (!(flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) &&
628
templ->usage == PIPE_USAGE_STAGING)
629
flags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
630
631
VkMemoryAllocateInfo mai = {0};
632
mai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
633
mai.allocationSize = reqs.size;
634
mai.memoryTypeIndex = get_memory_type_index(screen, &reqs, flags);
635
636
VkMemoryType mem_type = screen->info.mem_props.memoryTypes[mai.memoryTypeIndex];
637
obj->coherent = mem_type.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
638
if (!(templ->flags & PIPE_RESOURCE_FLAG_SPARSE))
639
obj->host_visible = mem_type.propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
640
if (templ->target == PIPE_BUFFER && !obj->coherent && obj->host_visible) {
641
mai.allocationSize = reqs.size = align(reqs.size, screen->info.props.limits.nonCoherentAtomSize);
642
}
643
644
VkExportMemoryAllocateInfo emai = {0};
645
if (templ->bind & PIPE_BIND_SHARED && shared) {
646
emai.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO;
647
emai.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
648
649
emai.pNext = mai.pNext;
650
mai.pNext = &emai;
651
}
652
653
VkImportMemoryFdInfoKHR imfi = {
654
VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,
655
NULL,
656
};
657
658
if (whandle && whandle->type == WINSYS_HANDLE_TYPE_FD) {
659
imfi.pNext = NULL;
660
imfi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
661
imfi.fd = whandle->handle;
662
663
imfi.pNext = mai.pNext;
664
emai.pNext = &imfi;
665
}
666
667
struct wsi_memory_allocate_info memory_wsi_info = {
668
VK_STRUCTURE_TYPE_WSI_MEMORY_ALLOCATE_INFO_MESA,
669
NULL,
670
};
671
672
if (screen->needs_mesa_wsi && scanout) {
673
memory_wsi_info.implicit_sync = true;
674
675
memory_wsi_info.pNext = mai.pNext;
676
mai.pNext = &memory_wsi_info;
677
}
678
679
if (!mai.pNext && !(templ->flags & (PIPE_RESOURCE_FLAG_MAP_COHERENT | PIPE_RESOURCE_FLAG_SPARSE))) {
680
obj->mkey.key.reqs = reqs;
681
obj->mkey.key.heap_index = mai.memoryTypeIndex;
682
obj->mem_hash = mem_hash(&obj->mkey);
683
simple_mtx_lock(&screen->mem_cache_mtx);
684
685
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(screen->resource_mem_cache, obj->mem_hash, &obj->mkey);
686
struct mem_key *mkey;
687
if (he) {
688
struct util_dynarray *array = he->data;
689
mkey = (void*)he->key;
690
if (array && util_dynarray_num_elements(array, struct mem_cache_entry)) {
691
struct mem_cache_entry mc = util_dynarray_pop(array, struct mem_cache_entry);
692
obj->mem = mc.mem;
693
obj->map = mc.map;
694
screen->mem_cache_size -= reqs.size;
695
screen->mem_cache_count--;
696
}
697
} else {
698
mkey = ralloc(screen->resource_mem_cache, struct mem_key);
699
memcpy(&mkey->key, &obj->mkey.key, sizeof(obj->mkey.key));
700
mkey->seen_count = 0;
701
struct util_dynarray *array = rzalloc(screen->resource_mem_cache, struct util_dynarray);
702
util_dynarray_init(array, screen->resource_mem_cache);
703
_mesa_hash_table_insert_pre_hashed(screen->resource_mem_cache, obj->mem_hash, mkey, array);
704
}
705
mkey->seen_count++;
706
simple_mtx_unlock(&screen->mem_cache_mtx);
707
} else
708
obj->mkey.key.heap_index = UINT32_MAX;
709
710
/* TODO: sparse buffers should probably allocate multiple regions of memory instead of giant blobs? */
711
if (!obj->mem && vkAllocateMemory(screen->dev, &mai, NULL, &obj->mem) != VK_SUCCESS) {
712
debug_printf("vkAllocateMemory failed\n");
713
goto fail2;
714
}
715
716
obj->offset = 0;
717
obj->size = reqs.size;
718
719
if (templ->target == PIPE_BUFFER) {
720
if (!(templ->flags & PIPE_RESOURCE_FLAG_SPARSE))
721
if (vkBindBufferMemory(screen->dev, obj->buffer, obj->mem, obj->offset) != VK_SUCCESS)
722
goto fail3;
723
} else {
724
if (vkBindImageMemory(screen->dev, obj->image, obj->mem, obj->offset) != VK_SUCCESS)
725
goto fail3;
726
}
727
return obj;
728
729
fail3:
730
vkFreeMemory(screen->dev, obj->mem, NULL);
731
732
fail2:
733
if (templ->target == PIPE_BUFFER)
734
vkDestroyBuffer(screen->dev, obj->buffer, NULL);
735
else
736
vkDestroyImage(screen->dev, obj->image, NULL);
737
fail1:
738
FREE(obj);
739
return NULL;
740
}
741
742
static struct pipe_resource *
743
resource_create(struct pipe_screen *pscreen,
744
const struct pipe_resource *templ,
745
struct winsys_handle *whandle,
746
unsigned external_usage,
747
const uint64_t *modifiers, int modifiers_count)
748
{
749
struct zink_screen *screen = zink_screen(pscreen);
750
struct zink_resource *res = CALLOC_STRUCT(zink_resource);
751
752
if (modifiers_count > 0) {
753
/* for rebinds */
754
res->modifiers_count = modifiers_count;
755
res->modifiers = mem_dup(modifiers, modifiers_count * sizeof(uint64_t));
756
if (!res->modifiers) {
757
FREE(res);
758
return NULL;
759
}
760
}
761
762
res->base.b = *templ;
763
764
threaded_resource_init(&res->base.b);
765
pipe_reference_init(&res->base.b.reference, 1);
766
res->base.b.screen = pscreen;
767
768
bool optimal_tiling = false;
769
res->obj = resource_object_create(screen, templ, whandle, &optimal_tiling, modifiers, 0);
770
if (!res->obj) {
771
free(res->modifiers);
772
FREE(res);
773
return NULL;
774
}
775
776
res->internal_format = templ->format;
777
if (templ->target == PIPE_BUFFER) {
778
util_range_init(&res->valid_buffer_range);
779
} else {
780
res->format = zink_get_format(screen, templ->format);
781
res->layout = VK_IMAGE_LAYOUT_UNDEFINED;
782
res->optimal_tiling = optimal_tiling;
783
res->aspect = aspect_from_format(templ->format);
784
if (res->base.b.bind & (PIPE_BIND_SCANOUT | PIPE_BIND_SHARED) && optimal_tiling) {
785
// TODO: remove for wsi
786
struct pipe_resource templ2 = res->base.b;
787
templ2.bind = (res->base.b.bind & (PIPE_BIND_SCANOUT | PIPE_BIND_SHARED)) | PIPE_BIND_LINEAR;
788
res->scanout_obj = resource_object_create(screen, &templ2, whandle, &optimal_tiling, modifiers, modifiers_count);
789
assert(!optimal_tiling);
790
}
791
}
792
793
if (screen->winsys && (templ->bind & PIPE_BIND_DISPLAY_TARGET)) {
794
struct sw_winsys *winsys = screen->winsys;
795
res->dt = winsys->displaytarget_create(screen->winsys,
796
res->base.b.bind,
797
res->base.b.format,
798
templ->width0,
799
templ->height0,
800
64, NULL,
801
&res->dt_stride);
802
}
803
if (res->obj->is_buffer)
804
res->base.buffer_id_unique = util_idalloc_mt_alloc(&screen->buffer_ids);
805
806
return &res->base.b;
807
}
808
809
static struct pipe_resource *
810
zink_resource_create(struct pipe_screen *pscreen,
811
const struct pipe_resource *templ)
812
{
813
return resource_create(pscreen, templ, NULL, 0, NULL, 0);
814
}
815
816
static struct pipe_resource *
817
zink_resource_create_with_modifiers(struct pipe_screen *pscreen, const struct pipe_resource *templ,
818
const uint64_t *modifiers, int modifiers_count)
819
{
820
return resource_create(pscreen, templ, NULL, 0, modifiers, modifiers_count);
821
}
822
823
static bool
824
zink_resource_get_param(struct pipe_screen *pscreen, struct pipe_context *pctx,
825
struct pipe_resource *pres,
826
unsigned plane,
827
unsigned layer,
828
unsigned level,
829
enum pipe_resource_param param,
830
unsigned handle_usage,
831
uint64_t *value)
832
{
833
struct zink_screen *screen = zink_screen(pscreen);
834
struct zink_resource *res = zink_resource(pres);
835
//TODO: remove for wsi
836
struct zink_resource_object *obj = res->scanout_obj ? res->scanout_obj : res->obj;
837
VkImageAspectFlags aspect = obj->modifier_aspect ? obj->modifier_aspect : res->aspect;
838
struct winsys_handle whandle;
839
switch (param) {
840
case PIPE_RESOURCE_PARAM_NPLANES:
841
/* not yet implemented */
842
*value = 1;
843
break;
844
845
case PIPE_RESOURCE_PARAM_STRIDE: {
846
VkImageSubresource sub_res = {0};
847
VkSubresourceLayout sub_res_layout = {0};
848
849
sub_res.aspectMask = aspect;
850
851
vkGetImageSubresourceLayout(screen->dev, obj->image, &sub_res, &sub_res_layout);
852
853
*value = sub_res_layout.rowPitch;
854
break;
855
}
856
857
case PIPE_RESOURCE_PARAM_OFFSET: {
858
VkImageSubresource isr = {
859
aspect,
860
level,
861
layer
862
};
863
VkSubresourceLayout srl;
864
vkGetImageSubresourceLayout(screen->dev, obj->image, &isr, &srl);
865
*value = srl.offset;
866
break;
867
}
868
869
case PIPE_RESOURCE_PARAM_MODIFIER: {
870
*value = DRM_FORMAT_MOD_INVALID;
871
if (!screen->info.have_EXT_image_drm_format_modifier)
872
return false;
873
VkImageDrmFormatModifierPropertiesEXT prop;
874
prop.sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT;
875
prop.pNext = NULL;
876
if (screen->vk.GetImageDrmFormatModifierPropertiesEXT(screen->dev, obj->image, &prop) == VK_SUCCESS)
877
*value = prop.drmFormatModifier;
878
break;
879
}
880
881
case PIPE_RESOURCE_PARAM_LAYER_STRIDE: {
882
VkImageSubresource isr = {
883
aspect,
884
level,
885
layer
886
};
887
VkSubresourceLayout srl;
888
vkGetImageSubresourceLayout(screen->dev, obj->image, &isr, &srl);
889
if (res->base.b.target == PIPE_TEXTURE_3D)
890
*value = srl.depthPitch;
891
else
892
*value = srl.arrayPitch;
893
break;
894
}
895
896
case PIPE_RESOURCE_PARAM_HANDLE_TYPE_SHARED:
897
case PIPE_RESOURCE_PARAM_HANDLE_TYPE_KMS:
898
case PIPE_RESOURCE_PARAM_HANDLE_TYPE_FD: {
899
memset(&whandle, 0, sizeof(whandle));
900
if (param == PIPE_RESOURCE_PARAM_HANDLE_TYPE_SHARED)
901
whandle.type = WINSYS_HANDLE_TYPE_SHARED;
902
else if (param == PIPE_RESOURCE_PARAM_HANDLE_TYPE_KMS)
903
whandle.type = WINSYS_HANDLE_TYPE_KMS;
904
else if (param == PIPE_RESOURCE_PARAM_HANDLE_TYPE_FD)
905
whandle.type = WINSYS_HANDLE_TYPE_FD;
906
907
if (!pscreen->resource_get_handle(pscreen, pctx, pres, &whandle, handle_usage))
908
return false;
909
910
*value = whandle.handle;
911
break;
912
}
913
}
914
return true;
915
}
916
917
static bool
918
zink_resource_get_handle(struct pipe_screen *pscreen,
919
struct pipe_context *context,
920
struct pipe_resource *tex,
921
struct winsys_handle *whandle,
922
unsigned usage)
923
{
924
if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
925
#ifdef ZINK_USE_DMABUF
926
struct zink_resource *res = zink_resource(tex);
927
struct zink_screen *screen = zink_screen(pscreen);
928
//TODO: remove for wsi
929
struct zink_resource_object *obj = res->scanout_obj ? res->scanout_obj : res->obj;
930
931
VkMemoryGetFdInfoKHR fd_info = {0};
932
int fd;
933
fd_info.sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR;
934
//TODO: remove for wsi
935
fd_info.memory = obj->mem;
936
fd_info.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
937
VkResult result = (*screen->vk.GetMemoryFdKHR)(screen->dev, &fd_info, &fd);
938
if (result != VK_SUCCESS)
939
return false;
940
whandle->handle = fd;
941
uint64_t value;
942
zink_resource_get_param(pscreen, context, tex, 0, 0, 0, PIPE_RESOURCE_PARAM_MODIFIER, 0, &value);
943
whandle->modifier = value;
944
zink_resource_get_param(pscreen, context, tex, 0, 0, 0, PIPE_RESOURCE_PARAM_OFFSET, 0, &value);
945
whandle->offset = value;
946
zink_resource_get_param(pscreen, context, tex, 0, 0, 0, PIPE_RESOURCE_PARAM_STRIDE, 0, &value);
947
whandle->stride = value;
948
#else
949
return false;
950
#endif
951
}
952
return true;
953
}
954
955
static struct pipe_resource *
956
zink_resource_from_handle(struct pipe_screen *pscreen,
957
const struct pipe_resource *templ,
958
struct winsys_handle *whandle,
959
unsigned usage)
960
{
961
#ifdef ZINK_USE_DMABUF
962
if (whandle->modifier != DRM_FORMAT_MOD_INVALID &&
963
!zink_screen(pscreen)->info.have_EXT_image_drm_format_modifier)
964
return NULL;
965
966
/* ignore any AUX planes, as well as planar formats */
967
if (templ->format == PIPE_FORMAT_NONE ||
968
util_format_get_num_planes(templ->format) != 1)
969
return NULL;
970
971
uint64_t modifier = DRM_FORMAT_MOD_INVALID;
972
int modifier_count = 0;
973
if (whandle->modifier != DRM_FORMAT_MOD_INVALID) {
974
modifier = whandle->modifier;
975
modifier_count = 1;
976
}
977
return resource_create(pscreen, templ, whandle, usage, &modifier, modifier_count);
978
#else
979
return NULL;
980
#endif
981
}
982
983
static bool
984
invalidate_buffer(struct zink_context *ctx, struct zink_resource *res)
985
{
986
struct zink_screen *screen = zink_screen(ctx->base.screen);
987
988
assert(res->base.b.target == PIPE_BUFFER);
989
990
if (res->base.b.flags & PIPE_RESOURCE_FLAG_SPARSE)
991
return false;
992
993
if (res->valid_buffer_range.start > res->valid_buffer_range.end)
994
return false;
995
996
if (res->bind_history & ZINK_RESOURCE_USAGE_STREAMOUT)
997
ctx->dirty_so_targets = true;
998
/* force counter buffer reset */
999
res->bind_history &= ~ZINK_RESOURCE_USAGE_STREAMOUT;
1000
1001
util_range_set_empty(&res->valid_buffer_range);
1002
if (!get_resource_usage(res))
1003
return false;
1004
1005
struct zink_resource_object *old_obj = res->obj;
1006
struct zink_resource_object *new_obj = resource_object_create(screen, &res->base.b, NULL, NULL, NULL, 0);
1007
if (!new_obj) {
1008
debug_printf("new backing resource alloc failed!");
1009
return false;
1010
}
1011
bool needs_unref = true;
1012
if (zink_batch_usage_exists(old_obj->reads) ||
1013
zink_batch_usage_exists(old_obj->writes)) {
1014
zink_batch_reference_resource_move(&ctx->batch, res);
1015
needs_unref = false;
1016
}
1017
res->obj = new_obj;
1018
res->access_stage = 0;
1019
res->access = 0;
1020
res->unordered_barrier = false;
1021
zink_resource_rebind(ctx, res);
1022
zink_descriptor_set_refs_clear(&old_obj->desc_set_refs, old_obj);
1023
if (needs_unref)
1024
zink_resource_object_reference(screen, &old_obj, NULL);
1025
return true;
1026
}
1027
1028
1029
static void
1030
zink_resource_invalidate(struct pipe_context *pctx, struct pipe_resource *pres)
1031
{
1032
if (pres->target == PIPE_BUFFER)
1033
invalidate_buffer(zink_context(pctx), zink_resource(pres));
1034
}
1035
1036
static void
1037
zink_transfer_copy_bufimage(struct zink_context *ctx,
1038
struct zink_resource *dst,
1039
struct zink_resource *src,
1040
struct zink_transfer *trans)
1041
{
1042
assert((trans->base.b.usage & (PIPE_MAP_DEPTH_ONLY | PIPE_MAP_STENCIL_ONLY)) !=
1043
(PIPE_MAP_DEPTH_ONLY | PIPE_MAP_STENCIL_ONLY));
1044
1045
bool buf2img = src->base.b.target == PIPE_BUFFER;
1046
1047
struct pipe_box box = trans->base.b.box;
1048
int x = box.x;
1049
if (buf2img)
1050
box.x = trans->offset;
1051
1052
if (dst->obj->transfer_dst)
1053
zink_copy_image_buffer(ctx, NULL, dst, src, trans->base.b.level, buf2img ? x : 0,
1054
box.y, box.z, trans->base.b.level, &box, trans->base.b.usage);
1055
else
1056
util_blitter_copy_texture(ctx->blitter, &dst->base.b, trans->base.b.level,
1057
x, box.y, box.z, &src->base.b,
1058
0, &box);
1059
}
1060
1061
bool
1062
zink_resource_has_usage(struct zink_resource *res, enum zink_resource_access usage)
1063
{
1064
uint32_t batch_uses = get_resource_usage(res);
1065
return batch_uses & usage;
1066
}
1067
1068
ALWAYS_INLINE static void
1069
align_offset_size(const VkDeviceSize alignment, VkDeviceSize *offset, VkDeviceSize *size, VkDeviceSize obj_size)
1070
{
1071
VkDeviceSize align = *offset % alignment;
1072
if (alignment - 1 > *offset)
1073
*offset = 0;
1074
else
1075
*offset -= align, *size += align;
1076
align = alignment - (*size % alignment);
1077
if (*offset + *size + align > obj_size)
1078
*size = obj_size - *offset;
1079
else
1080
*size += align;
1081
}
1082
1083
VkMappedMemoryRange
1084
zink_resource_init_mem_range(struct zink_screen *screen, struct zink_resource_object *obj, VkDeviceSize offset, VkDeviceSize size)
1085
{
1086
assert(obj->size);
1087
align_offset_size(screen->info.props.limits.nonCoherentAtomSize, &offset, &size, obj->size);
1088
VkMappedMemoryRange range = {
1089
VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
1090
NULL,
1091
obj->mem,
1092
offset,
1093
size
1094
};
1095
assert(range.size);
1096
return range;
1097
}
1098
1099
static void *
1100
map_resource(struct zink_screen *screen, struct zink_resource *res)
1101
{
1102
VkResult result = VK_SUCCESS;
1103
if (res->obj->map)
1104
return res->obj->map;
1105
assert(res->obj->host_visible);
1106
result = vkMapMemory(screen->dev, res->obj->mem, res->obj->offset,
1107
res->obj->size, 0, &res->obj->map);
1108
if (zink_screen_handle_vkresult(screen, result))
1109
return res->obj->map;
1110
return NULL;
1111
}
1112
1113
static void
1114
unmap_resource(struct zink_screen *screen, struct zink_resource *res)
1115
{
1116
res->obj->map = NULL;
1117
vkUnmapMemory(screen->dev, res->obj->mem);
1118
}
1119
1120
static void *
1121
buffer_transfer_map(struct zink_context *ctx, struct zink_resource *res, unsigned usage,
1122
const struct pipe_box *box, struct zink_transfer *trans)
1123
{
1124
struct zink_screen *screen = zink_screen(ctx->base.screen);
1125
void *ptr = NULL;
1126
1127
if (res->base.is_user_ptr)
1128
usage |= PIPE_MAP_PERSISTENT;
1129
1130
/* See if the buffer range being mapped has never been initialized,
1131
* in which case it can be mapped unsynchronized. */
1132
if (!(usage & (PIPE_MAP_UNSYNCHRONIZED | TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED)) &&
1133
usage & PIPE_MAP_WRITE && !res->base.is_shared &&
1134
!util_ranges_intersect(&res->valid_buffer_range, box->x, box->x + box->width)) {
1135
usage |= PIPE_MAP_UNSYNCHRONIZED;
1136
}
1137
1138
/* If discarding the entire range, discard the whole resource instead. */
1139
if (usage & PIPE_MAP_DISCARD_RANGE && box->x == 0 && box->width == res->base.b.width0) {
1140
usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
1141
}
1142
1143
if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE &&
1144
!(usage & (PIPE_MAP_UNSYNCHRONIZED | TC_TRANSFER_MAP_NO_INVALIDATE))) {
1145
assert(usage & PIPE_MAP_WRITE);
1146
1147
if (invalidate_buffer(ctx, res)) {
1148
/* At this point, the buffer is always idle. */
1149
usage |= PIPE_MAP_UNSYNCHRONIZED;
1150
} else {
1151
/* Fall back to a temporary buffer. */
1152
usage |= PIPE_MAP_DISCARD_RANGE;
1153
}
1154
}
1155
1156
if ((usage & PIPE_MAP_WRITE) &&
1157
(usage & PIPE_MAP_DISCARD_RANGE || (!(usage & PIPE_MAP_READ) && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_RW))) &&
1158
((!res->obj->host_visible) || !(usage & (PIPE_MAP_UNSYNCHRONIZED | PIPE_MAP_PERSISTENT)))) {
1159
1160
/* Check if mapping this buffer would cause waiting for the GPU.
1161
*/
1162
1163
if (!res->obj->host_visible ||
1164
!zink_batch_usage_check_completion(ctx, res->obj->reads) ||
1165
!zink_batch_usage_check_completion(ctx, res->obj->writes)) {
1166
/* Do a wait-free write-only transfer using a temporary buffer. */
1167
unsigned offset;
1168
1169
/* If we are not called from the driver thread, we have
1170
* to use the uploader from u_threaded_context, which is
1171
* local to the calling thread.
1172
*/
1173
struct u_upload_mgr *mgr;
1174
if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC)
1175
mgr = ctx->tc->base.stream_uploader;
1176
else
1177
mgr = ctx->base.stream_uploader;
1178
u_upload_alloc(mgr, 0, box->width + box->x,
1179
screen->info.props.limits.minMemoryMapAlignment, &offset,
1180
(struct pipe_resource **)&trans->staging_res, (void **)&ptr);
1181
res = zink_resource(trans->staging_res);
1182
trans->offset = offset;
1183
} else {
1184
/* At this point, the buffer is always idle (we checked it above). */
1185
usage |= PIPE_MAP_UNSYNCHRONIZED;
1186
}
1187
} else if ((usage & PIPE_MAP_READ) && !(usage & PIPE_MAP_PERSISTENT)) {
1188
assert(!(usage & (TC_TRANSFER_MAP_THREADED_UNSYNC | PIPE_MAP_THREAD_SAFE)));
1189
if (usage & PIPE_MAP_DONTBLOCK) {
1190
/* sparse/device-local will always need to wait since it has to copy */
1191
if (!res->obj->host_visible)
1192
return NULL;
1193
if (!zink_batch_usage_check_completion(ctx, res->obj->writes))
1194
return NULL;
1195
} else if (!res->obj->host_visible) {
1196
trans->staging_res = pipe_buffer_create(&screen->base, PIPE_BIND_LINEAR, PIPE_USAGE_STAGING, box->x + box->width);
1197
if (!trans->staging_res)
1198
return NULL;
1199
struct zink_resource *staging_res = zink_resource(trans->staging_res);
1200
zink_copy_buffer(ctx, NULL, staging_res, res, box->x, box->x, box->width);
1201
res = staging_res;
1202
zink_fence_wait(&ctx->base);
1203
} else
1204
zink_batch_usage_wait(ctx, res->obj->writes);
1205
}
1206
1207
if (!ptr) {
1208
/* if writing to a streamout buffer, ensure synchronization next time it's used */
1209
if (usage & PIPE_MAP_WRITE && res->bind_history & ZINK_RESOURCE_USAGE_STREAMOUT) {
1210
ctx->dirty_so_targets = true;
1211
/* force counter buffer reset */
1212
res->bind_history &= ~ZINK_RESOURCE_USAGE_STREAMOUT;
1213
}
1214
ptr = map_resource(screen, res);
1215
if (!ptr)
1216
return NULL;
1217
}
1218
1219
if (!res->obj->coherent
1220
#if defined(MVK_VERSION)
1221
// Work around for MoltenVk limitation specifically on coherent memory
1222
// MoltenVk returns blank memory ranges when there should be data present
1223
// This is a known limitation of MoltenVK.
1224
// See https://github.com/KhronosGroup/MoltenVK/blob/master/Docs/MoltenVK_Runtime_UserGuide.md#known-moltenvk-limitations
1225
1226
|| screen->instance_info.have_MVK_moltenvk
1227
#endif
1228
) {
1229
VkDeviceSize size = box->width;
1230
VkDeviceSize offset = res->obj->offset + trans->offset + box->x;
1231
VkMappedMemoryRange range = zink_resource_init_mem_range(screen, res->obj, offset, size);
1232
if (vkInvalidateMappedMemoryRanges(screen->dev, 1, &range) != VK_SUCCESS) {
1233
vkUnmapMemory(screen->dev, res->obj->mem);
1234
return NULL;
1235
}
1236
}
1237
trans->base.b.usage = usage;
1238
if (usage & PIPE_MAP_WRITE)
1239
util_range_add(&res->base.b, &res->valid_buffer_range, box->x, box->x + box->width);
1240
return ptr;
1241
}
1242
1243
static void *
1244
zink_transfer_map(struct pipe_context *pctx,
1245
struct pipe_resource *pres,
1246
unsigned level,
1247
unsigned usage,
1248
const struct pipe_box *box,
1249
struct pipe_transfer **transfer)
1250
{
1251
struct zink_context *ctx = zink_context(pctx);
1252
struct zink_screen *screen = zink_screen(pctx->screen);
1253
struct zink_resource *res = zink_resource(pres);
1254
1255
struct zink_transfer *trans;
1256
1257
if (usage & PIPE_MAP_THREAD_SAFE)
1258
trans = malloc(sizeof(*trans));
1259
else if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC)
1260
trans = slab_alloc(&ctx->transfer_pool_unsync);
1261
else
1262
trans = slab_alloc(&ctx->transfer_pool);
1263
if (!trans)
1264
return NULL;
1265
1266
memset(trans, 0, sizeof(*trans));
1267
pipe_resource_reference(&trans->base.b.resource, pres);
1268
1269
trans->base.b.resource = pres;
1270
trans->base.b.level = level;
1271
trans->base.b.usage = usage;
1272
trans->base.b.box = *box;
1273
1274
void *ptr, *base;
1275
if (pres->target == PIPE_BUFFER) {
1276
base = buffer_transfer_map(ctx, res, usage, box, trans);
1277
ptr = ((uint8_t *)base) + box->x;
1278
} else {
1279
if (usage & PIPE_MAP_WRITE && !(usage & PIPE_MAP_READ))
1280
/* this is like a blit, so we can potentially dump some clears or maybe we have to */
1281
zink_fb_clears_apply_or_discard(ctx, pres, zink_rect_from_box(box), false);
1282
else if (usage & PIPE_MAP_READ)
1283
/* if the map region intersects with any clears then we have to apply them */
1284
zink_fb_clears_apply_region(ctx, pres, zink_rect_from_box(box));
1285
if (res->optimal_tiling || !res->obj->host_visible) {
1286
enum pipe_format format = pres->format;
1287
if (usage & PIPE_MAP_DEPTH_ONLY)
1288
format = util_format_get_depth_only(pres->format);
1289
else if (usage & PIPE_MAP_STENCIL_ONLY)
1290
format = PIPE_FORMAT_S8_UINT;
1291
trans->base.b.stride = util_format_get_stride(format, box->width);
1292
trans->base.b.layer_stride = util_format_get_2d_size(format,
1293
trans->base.b.stride,
1294
box->height);
1295
1296
struct pipe_resource templ = *pres;
1297
templ.format = format;
1298
templ.usage = usage & PIPE_MAP_READ ? PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
1299
templ.target = PIPE_BUFFER;
1300
templ.bind = PIPE_BIND_LINEAR;
1301
templ.width0 = trans->base.b.layer_stride * box->depth;
1302
templ.height0 = templ.depth0 = 0;
1303
templ.last_level = 0;
1304
templ.array_size = 1;
1305
templ.flags = 0;
1306
1307
trans->staging_res = zink_resource_create(pctx->screen, &templ);
1308
if (!trans->staging_res)
1309
return NULL;
1310
1311
struct zink_resource *staging_res = zink_resource(trans->staging_res);
1312
1313
if (usage & PIPE_MAP_READ) {
1314
/* force multi-context sync */
1315
if (zink_batch_usage_is_unflushed(res->obj->writes))
1316
zink_batch_usage_wait(ctx, res->obj->writes);
1317
zink_transfer_copy_bufimage(ctx, staging_res, res, trans);
1318
/* need to wait for rendering to finish */
1319
zink_fence_wait(pctx);
1320
}
1321
1322
ptr = base = map_resource(screen, staging_res);
1323
if (!base)
1324
return NULL;
1325
} else {
1326
assert(!res->optimal_tiling);
1327
base = map_resource(screen, res);
1328
if (!base)
1329
return NULL;
1330
if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_RW)) {
1331
if (usage & PIPE_MAP_WRITE)
1332
zink_fence_wait(pctx);
1333
else
1334
zink_batch_usage_wait(ctx, res->obj->writes);
1335
}
1336
VkImageSubresource isr = {
1337
res->obj->modifier_aspect ? res->obj->modifier_aspect : res->aspect,
1338
level,
1339
0
1340
};
1341
VkSubresourceLayout srl;
1342
vkGetImageSubresourceLayout(screen->dev, res->obj->image, &isr, &srl);
1343
trans->base.b.stride = srl.rowPitch;
1344
if (res->base.b.target == PIPE_TEXTURE_3D)
1345
trans->base.b.layer_stride = srl.depthPitch;
1346
else
1347
trans->base.b.layer_stride = srl.arrayPitch;
1348
trans->offset = srl.offset;
1349
trans->depthPitch = srl.depthPitch;
1350
const struct util_format_description *desc = util_format_description(res->base.b.format);
1351
unsigned offset = srl.offset +
1352
box->z * srl.depthPitch +
1353
(box->y / desc->block.height) * srl.rowPitch +
1354
(box->x / desc->block.width) * (desc->block.bits / 8);
1355
if (!res->obj->coherent) {
1356
VkDeviceSize size = box->width * box->height * desc->block.bits / 8;
1357
VkMappedMemoryRange range = zink_resource_init_mem_range(screen, res->obj, res->obj->offset + offset, size);
1358
vkFlushMappedMemoryRanges(screen->dev, 1, &range);
1359
}
1360
ptr = ((uint8_t *)base) + offset;
1361
if (sizeof(void*) == 4)
1362
trans->base.b.usage |= ZINK_MAP_TEMPORARY;
1363
}
1364
}
1365
if ((usage & PIPE_MAP_PERSISTENT) && !(usage & PIPE_MAP_COHERENT))
1366
res->obj->persistent_maps++;
1367
1368
if (trans->base.b.usage & (PIPE_MAP_ONCE | ZINK_MAP_TEMPORARY))
1369
p_atomic_inc(&res->obj->map_count);
1370
1371
*transfer = &trans->base.b;
1372
return ptr;
1373
}
1374
1375
static void
1376
zink_transfer_flush_region(struct pipe_context *pctx,
1377
struct pipe_transfer *ptrans,
1378
const struct pipe_box *box)
1379
{
1380
struct zink_context *ctx = zink_context(pctx);
1381
struct zink_resource *res = zink_resource(ptrans->resource);
1382
struct zink_transfer *trans = (struct zink_transfer *)ptrans;
1383
1384
if (trans->base.b.usage & PIPE_MAP_WRITE) {
1385
struct zink_screen *screen = zink_screen(pctx->screen);
1386
struct zink_resource *m = trans->staging_res ? zink_resource(trans->staging_res) :
1387
res;
1388
ASSERTED VkDeviceSize size, offset;
1389
if (m->obj->is_buffer) {
1390
size = box->width;
1391
offset = trans->offset + box->x;
1392
} else {
1393
size = box->width * box->height * util_format_get_blocksize(m->base.b.format);
1394
offset = trans->offset +
1395
box->z * trans->depthPitch +
1396
util_format_get_2d_size(m->base.b.format, trans->base.b.stride, box->y) +
1397
util_format_get_stride(m->base.b.format, box->x);
1398
assert(offset + size <= res->obj->size);
1399
}
1400
if (!m->obj->coherent) {
1401
VkMappedMemoryRange range = zink_resource_init_mem_range(screen, m->obj, m->obj->offset, m->obj->size);
1402
vkFlushMappedMemoryRanges(screen->dev, 1, &range);
1403
}
1404
if (trans->staging_res) {
1405
struct zink_resource *staging_res = zink_resource(trans->staging_res);
1406
1407
if (ptrans->resource->target == PIPE_BUFFER)
1408
zink_copy_buffer(ctx, NULL, res, staging_res, box->x, offset, box->width);
1409
else
1410
zink_transfer_copy_bufimage(ctx, res, staging_res, trans);
1411
}
1412
}
1413
}
1414
1415
static void
1416
zink_transfer_unmap(struct pipe_context *pctx,
1417
struct pipe_transfer *ptrans)
1418
{
1419
struct zink_context *ctx = zink_context(pctx);
1420
struct zink_screen *screen = zink_screen(pctx->screen);
1421
struct zink_resource *res = zink_resource(ptrans->resource);
1422
struct zink_transfer *trans = (struct zink_transfer *)ptrans;
1423
1424
if (!(trans->base.b.usage & (PIPE_MAP_FLUSH_EXPLICIT | PIPE_MAP_COHERENT))) {
1425
zink_transfer_flush_region(pctx, ptrans, &ptrans->box);
1426
}
1427
1428
if ((trans->base.b.usage & PIPE_MAP_ONCE && !trans->staging_res && !screen->threaded) ||
1429
(trans->base.b.usage & ZINK_MAP_TEMPORARY && !p_atomic_dec_return(&res->obj->map_count)))
1430
unmap_resource(screen, res);
1431
if ((trans->base.b.usage & PIPE_MAP_PERSISTENT) && !(trans->base.b.usage & PIPE_MAP_COHERENT))
1432
res->obj->persistent_maps--;
1433
1434
if (trans->staging_res)
1435
pipe_resource_reference(&trans->staging_res, NULL);
1436
pipe_resource_reference(&trans->base.b.resource, NULL);
1437
1438
if (trans->base.b.usage & PIPE_MAP_THREAD_SAFE) {
1439
free(trans);
1440
} else {
1441
/* Don't use pool_transfers_unsync. We are always in the driver
1442
* thread. Freeing an object into a different pool is allowed.
1443
*/
1444
slab_free(&ctx->transfer_pool, ptrans);
1445
}
1446
}
1447
1448
static void
1449
zink_buffer_subdata(struct pipe_context *ctx, struct pipe_resource *buffer,
1450
unsigned usage, unsigned offset, unsigned size, const void *data)
1451
{
1452
struct pipe_transfer *transfer = NULL;
1453
struct pipe_box box;
1454
uint8_t *map = NULL;
1455
1456
usage |= PIPE_MAP_WRITE | PIPE_MAP_ONCE;
1457
1458
if (!(usage & PIPE_MAP_DIRECTLY))
1459
usage |= PIPE_MAP_DISCARD_RANGE;
1460
1461
u_box_1d(offset, size, &box);
1462
map = zink_transfer_map(ctx, buffer, 0, usage, &box, &transfer);
1463
if (!map)
1464
return;
1465
1466
memcpy(map, data, size);
1467
zink_transfer_unmap(ctx, transfer);
1468
}
1469
1470
static struct pipe_resource *
1471
zink_resource_get_separate_stencil(struct pipe_resource *pres)
1472
{
1473
/* For packed depth-stencil, we treat depth as the primary resource
1474
* and store S8 as the "second plane" resource.
1475
*/
1476
if (pres->next && pres->next->format == PIPE_FORMAT_S8_UINT)
1477
return pres->next;
1478
1479
return NULL;
1480
1481
}
1482
1483
bool
1484
zink_resource_object_init_storage(struct zink_context *ctx, struct zink_resource *res)
1485
{
1486
struct zink_screen *screen = zink_screen(ctx->base.screen);
1487
/* base resource already has the cap */
1488
if (res->base.b.bind & PIPE_BIND_SHADER_IMAGE)
1489
return true;
1490
if (res->obj->is_buffer) {
1491
if (res->obj->sbuffer)
1492
return true;
1493
VkBufferCreateInfo bci = create_bci(screen, &res->base.b, res->base.b.bind | PIPE_BIND_SHADER_IMAGE);
1494
bci.size = res->obj->size;
1495
1496
VkBuffer buffer;
1497
if (vkCreateBuffer(screen->dev, &bci, NULL, &buffer) != VK_SUCCESS)
1498
return false;
1499
vkBindBufferMemory(screen->dev, buffer, res->obj->mem, res->obj->offset);
1500
res->obj->sbuffer = res->obj->buffer;
1501
res->obj->buffer = buffer;
1502
} else {
1503
zink_fb_clears_apply_region(ctx, &res->base.b, (struct u_rect){0, res->base.b.width0, 0, res->base.b.height0});
1504
zink_resource_image_barrier(ctx, NULL, res, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 0, 0);
1505
res->base.b.bind |= PIPE_BIND_SHADER_IMAGE;
1506
struct zink_resource_object *old_obj = res->obj;
1507
struct zink_resource_object *new_obj = resource_object_create(screen, &res->base.b, NULL, &res->optimal_tiling, res->modifiers, res->modifiers_count);
1508
if (!new_obj) {
1509
debug_printf("new backing resource alloc failed!");
1510
res->base.b.bind &= ~PIPE_BIND_SHADER_IMAGE;
1511
return false;
1512
}
1513
struct zink_resource staging = *res;
1514
staging.obj = old_obj;
1515
bool needs_unref = true;
1516
if (get_resource_usage(res)) {
1517
zink_batch_reference_resource_move(&ctx->batch, res);
1518
needs_unref = false;
1519
}
1520
res->obj = new_obj;
1521
zink_descriptor_set_refs_clear(&old_obj->desc_set_refs, old_obj);
1522
for (unsigned i = 0; i <= res->base.b.last_level; i++) {
1523
struct pipe_box box = {0, 0, 0,
1524
u_minify(res->base.b.width0, i),
1525
u_minify(res->base.b.height0, i), res->base.b.array_size};
1526
box.depth = util_num_layers(&res->base.b, i);
1527
ctx->base.resource_copy_region(&ctx->base, &res->base.b, i, 0, 0, 0, &staging.base.b, i, &box);
1528
}
1529
if (needs_unref)
1530
zink_resource_object_reference(screen, &old_obj, NULL);
1531
}
1532
1533
zink_resource_rebind(ctx, res);
1534
1535
return true;
1536
}
1537
1538
void
1539
zink_resource_setup_transfer_layouts(struct zink_context *ctx, struct zink_resource *src, struct zink_resource *dst)
1540
{
1541
if (src == dst) {
1542
/* The Vulkan 1.1 specification says the following about valid usage
1543
* of vkCmdBlitImage:
1544
*
1545
* "srcImageLayout must be VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR,
1546
* VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL or VK_IMAGE_LAYOUT_GENERAL"
1547
*
1548
* and:
1549
*
1550
* "dstImageLayout must be VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR,
1551
* VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL or VK_IMAGE_LAYOUT_GENERAL"
1552
*
1553
* Since we cant have the same image in two states at the same time,
1554
* we're effectively left with VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or
1555
* VK_IMAGE_LAYOUT_GENERAL. And since this isn't a present-related
1556
* operation, VK_IMAGE_LAYOUT_GENERAL seems most appropriate.
1557
*/
1558
zink_resource_image_barrier(ctx, NULL, src,
1559
VK_IMAGE_LAYOUT_GENERAL,
1560
VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT,
1561
VK_PIPELINE_STAGE_TRANSFER_BIT);
1562
} else {
1563
zink_resource_image_barrier(ctx, NULL, src,
1564
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1565
VK_ACCESS_TRANSFER_READ_BIT,
1566
VK_PIPELINE_STAGE_TRANSFER_BIT);
1567
1568
zink_resource_image_barrier(ctx, NULL, dst,
1569
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1570
VK_ACCESS_TRANSFER_WRITE_BIT,
1571
VK_PIPELINE_STAGE_TRANSFER_BIT);
1572
}
1573
}
1574
1575
void
1576
zink_get_depth_stencil_resources(struct pipe_resource *res,
1577
struct zink_resource **out_z,
1578
struct zink_resource **out_s)
1579
{
1580
if (!res) {
1581
if (out_z) *out_z = NULL;
1582
if (out_s) *out_s = NULL;
1583
return;
1584
}
1585
1586
if (res->format != PIPE_FORMAT_S8_UINT) {
1587
if (out_z) *out_z = zink_resource(res);
1588
if (out_s) *out_s = zink_resource(zink_resource_get_separate_stencil(res));
1589
} else {
1590
if (out_z) *out_z = NULL;
1591
if (out_s) *out_s = zink_resource(res);
1592
}
1593
}
1594
1595
static void
1596
zink_resource_set_separate_stencil(struct pipe_resource *pres,
1597
struct pipe_resource *stencil)
1598
{
1599
assert(util_format_has_depth(util_format_description(pres->format)));
1600
pipe_resource_reference(&pres->next, stencil);
1601
}
1602
1603
static enum pipe_format
1604
zink_resource_get_internal_format(struct pipe_resource *pres)
1605
{
1606
struct zink_resource *res = zink_resource(pres);
1607
return res->internal_format;
1608
}
1609
1610
static const struct u_transfer_vtbl transfer_vtbl = {
1611
.resource_create = zink_resource_create,
1612
.resource_destroy = zink_resource_destroy,
1613
.transfer_map = zink_transfer_map,
1614
.transfer_unmap = zink_transfer_unmap,
1615
.transfer_flush_region = zink_transfer_flush_region,
1616
.get_internal_format = zink_resource_get_internal_format,
1617
.set_stencil = zink_resource_set_separate_stencil,
1618
.get_stencil = zink_resource_get_separate_stencil,
1619
};
1620
1621
bool
1622
zink_screen_resource_init(struct pipe_screen *pscreen)
1623
{
1624
struct zink_screen *screen = zink_screen(pscreen);
1625
pscreen->resource_create = zink_resource_create;
1626
pscreen->resource_create_with_modifiers = zink_resource_create_with_modifiers;
1627
pscreen->resource_destroy = zink_resource_destroy;
1628
pscreen->transfer_helper = u_transfer_helper_create(&transfer_vtbl, true, true, false, false);
1629
1630
if (screen->info.have_KHR_external_memory_fd) {
1631
pscreen->resource_get_handle = zink_resource_get_handle;
1632
pscreen->resource_from_handle = zink_resource_from_handle;
1633
}
1634
pscreen->resource_get_param = zink_resource_get_param;
1635
simple_mtx_init(&screen->mem_cache_mtx, mtx_plain);
1636
screen->resource_mem_cache = _mesa_hash_table_create(NULL, mem_hash, mem_equals);
1637
return !!screen->resource_mem_cache;
1638
}
1639
1640
void
1641
zink_context_resource_init(struct pipe_context *pctx)
1642
{
1643
pctx->buffer_map = u_transfer_helper_deinterleave_transfer_map;
1644
pctx->buffer_unmap = u_transfer_helper_deinterleave_transfer_unmap;
1645
pctx->texture_map = u_transfer_helper_deinterleave_transfer_map;
1646
pctx->texture_unmap = u_transfer_helper_deinterleave_transfer_unmap;
1647
1648
pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
1649
pctx->buffer_subdata = zink_buffer_subdata;
1650
pctx->texture_subdata = u_default_texture_subdata;
1651
pctx->invalidate_resource = zink_resource_invalidate;
1652
}
1653
1654