Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/virgl/virgl_resource.c
4570 views
1
/*
2
* Copyright 2014, 2015 Red Hat.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* on the rights to use, copy, modify, merge, publish, distribute, sub
8
* license, and/or sell copies of the Software, and to permit persons to whom
9
* the Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21
* USE OR OTHER DEALINGS IN THE SOFTWARE.
22
*/
23
#include "util/format/u_format.h"
24
#include "util/u_inlines.h"
25
#include "util/u_memory.h"
26
#include "util/u_upload_mgr.h"
27
#include "virgl_context.h"
28
#include "virgl_resource.h"
29
#include "virgl_screen.h"
30
#include "virgl_staging_mgr.h"
31
32
/* A (soft) limit for the amount of memory we want to allow for queued staging
33
* resources. This is used to decide when we should force a flush, in order to
34
* avoid exhausting virtio-gpu memory.
35
*/
36
#define VIRGL_QUEUED_STAGING_RES_SIZE_LIMIT (128 * 1024 * 1024)
37
38
enum virgl_transfer_map_type {
39
VIRGL_TRANSFER_MAP_ERROR = -1,
40
VIRGL_TRANSFER_MAP_HW_RES,
41
42
/* Map a range of a staging buffer. The updated contents should be transferred
43
* with a copy transfer.
44
*/
45
VIRGL_TRANSFER_MAP_STAGING,
46
47
/* Reallocate the underlying virgl_hw_res. */
48
VIRGL_TRANSFER_MAP_REALLOC,
49
};
50
51
/* We need to flush to properly sync the transfer with the current cmdbuf.
52
* But there are cases where the flushing can be skipped:
53
*
54
* - synchronization is disabled
55
* - the resource is not referenced by the current cmdbuf
56
*/
57
static bool virgl_res_needs_flush(struct virgl_context *vctx,
58
struct virgl_transfer *trans)
59
{
60
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
61
struct virgl_resource *res = virgl_resource(trans->base.resource);
62
63
if (trans->base.usage & PIPE_MAP_UNSYNCHRONIZED)
64
return false;
65
66
if (!vws->res_is_referenced(vws, vctx->cbuf, res->hw_res))
67
return false;
68
69
return true;
70
}
71
72
/* We need to read back from the host storage to make sure the guest storage
73
* is up-to-date. But there are cases where the readback can be skipped:
74
*
75
* - the content can be discarded
76
* - the host storage is read-only
77
*
78
* Note that PIPE_MAP_WRITE without discard bits requires readback.
79
* PIPE_MAP_READ becomes irrelevant. PIPE_MAP_UNSYNCHRONIZED and
80
* PIPE_MAP_FLUSH_EXPLICIT are also irrelevant.
81
*/
82
static bool virgl_res_needs_readback(struct virgl_context *vctx,
83
struct virgl_resource *res,
84
unsigned usage, unsigned level)
85
{
86
if (usage & (PIPE_MAP_DISCARD_RANGE |
87
PIPE_MAP_DISCARD_WHOLE_RESOURCE))
88
return false;
89
90
if (res->clean_mask & (1 << level))
91
return false;
92
93
return true;
94
}
95
96
static enum virgl_transfer_map_type
97
virgl_resource_transfer_prepare(struct virgl_context *vctx,
98
struct virgl_transfer *xfer)
99
{
100
struct virgl_screen *vs = virgl_screen(vctx->base.screen);
101
struct virgl_winsys *vws = vs->vws;
102
struct virgl_resource *res = virgl_resource(xfer->base.resource);
103
enum virgl_transfer_map_type map_type = VIRGL_TRANSFER_MAP_HW_RES;
104
bool flush;
105
bool readback;
106
bool wait;
107
108
/* there is no way to map the host storage currently */
109
if (xfer->base.usage & PIPE_MAP_DIRECTLY)
110
return VIRGL_TRANSFER_MAP_ERROR;
111
112
/* We break the logic down into four steps
113
*
114
* step 1: determine the required operations independently
115
* step 2: look for chances to skip the operations
116
* step 3: resolve dependencies between the operations
117
* step 4: execute the operations
118
*/
119
120
flush = virgl_res_needs_flush(vctx, xfer);
121
readback = virgl_res_needs_readback(vctx, res, xfer->base.usage,
122
xfer->base.level);
123
/* We need to wait for all cmdbufs, current or previous, that access the
124
* resource to finish unless synchronization is disabled.
125
*/
126
wait = !(xfer->base.usage & PIPE_MAP_UNSYNCHRONIZED);
127
128
/* When the transfer range consists of only uninitialized data, we can
129
* assume the GPU is not accessing the range and readback is unnecessary.
130
* We can proceed as if PIPE_MAP_UNSYNCHRONIZED and
131
* PIPE_MAP_DISCARD_RANGE are set.
132
*/
133
if (res->b.target == PIPE_BUFFER &&
134
!util_ranges_intersect(&res->valid_buffer_range, xfer->base.box.x,
135
xfer->base.box.x + xfer->base.box.width) &&
136
likely(!(virgl_debug & VIRGL_DEBUG_XFER))) {
137
flush = false;
138
readback = false;
139
wait = false;
140
}
141
142
/* When the resource is busy but its content can be discarded, we can
143
* replace its HW resource or use a staging buffer to avoid waiting.
144
*/
145
if (wait &&
146
(xfer->base.usage & (PIPE_MAP_DISCARD_RANGE |
147
PIPE_MAP_DISCARD_WHOLE_RESOURCE)) &&
148
likely(!(virgl_debug & VIRGL_DEBUG_XFER))) {
149
bool can_realloc = false;
150
bool can_staging = false;
151
152
/* A PIPE_MAP_DISCARD_WHOLE_RESOURCE transfer may be followed by
153
* PIPE_MAP_UNSYNCHRONIZED transfers to non-overlapping regions.
154
* It cannot be treated as a PIPE_MAP_DISCARD_RANGE transfer,
155
* otherwise those following unsynchronized transfers may overwrite
156
* valid data.
157
*/
158
if (xfer->base.usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
159
can_realloc = virgl_can_rebind_resource(vctx, &res->b);
160
} else {
161
can_staging = vctx->supports_staging;
162
}
163
164
/* discard implies no readback */
165
assert(!readback);
166
167
if (can_realloc || can_staging) {
168
/* Both map types have some costs. Do them only when the resource is
169
* (or will be) busy for real. Otherwise, set wait to false.
170
*/
171
wait = (flush || vws->resource_is_busy(vws, res->hw_res));
172
if (wait) {
173
map_type = (can_realloc) ?
174
VIRGL_TRANSFER_MAP_REALLOC :
175
VIRGL_TRANSFER_MAP_STAGING;
176
wait = false;
177
178
/* There is normally no need to flush either, unless the amount of
179
* memory we are using for staging resources starts growing, in
180
* which case we want to flush to keep our memory consumption in
181
* check.
182
*/
183
flush = (vctx->queued_staging_res_size >
184
VIRGL_QUEUED_STAGING_RES_SIZE_LIMIT);
185
}
186
}
187
}
188
189
/* readback has some implications */
190
if (readback) {
191
/* Readback is yet another command and is transparent to the state
192
* trackers. It should be waited for in all cases, including when
193
* PIPE_MAP_UNSYNCHRONIZED is set.
194
*/
195
wait = true;
196
197
/* When the transfer queue has pending writes to this transfer's region,
198
* we have to flush before readback.
199
*/
200
if (!flush && virgl_transfer_queue_is_queued(&vctx->queue, xfer))
201
flush = true;
202
}
203
204
if (flush)
205
vctx->base.flush(&vctx->base, NULL, 0);
206
207
/* If we are not allowed to block, and we know that we will have to wait,
208
* either because the resource is busy, or because it will become busy due
209
* to a readback, return early to avoid performing an incomplete
210
* transfer_get. Such an incomplete transfer_get may finish at any time,
211
* during which another unsynchronized map could write to the resource
212
* contents, leaving the contents in an undefined state.
213
*/
214
if ((xfer->base.usage & PIPE_MAP_DONTBLOCK) &&
215
(readback || (wait && vws->resource_is_busy(vws, res->hw_res))))
216
return VIRGL_TRANSFER_MAP_ERROR;
217
218
if (readback) {
219
vws->transfer_get(vws, res->hw_res, &xfer->base.box, xfer->base.stride,
220
xfer->l_stride, xfer->offset, xfer->base.level);
221
}
222
223
if (wait)
224
vws->resource_wait(vws, res->hw_res);
225
226
return map_type;
227
}
228
229
/* Calculate the minimum size of the memory required to service a resource
230
* transfer map. Also return the stride and layer_stride for the corresponding
231
* layout.
232
*/
233
static unsigned
234
virgl_transfer_map_size(struct virgl_transfer *vtransfer,
235
unsigned *out_stride,
236
unsigned *out_layer_stride)
237
{
238
struct pipe_resource *pres = vtransfer->base.resource;
239
struct pipe_box *box = &vtransfer->base.box;
240
unsigned stride;
241
unsigned layer_stride;
242
unsigned size;
243
244
assert(out_stride);
245
assert(out_layer_stride);
246
247
stride = util_format_get_stride(pres->format, box->width);
248
layer_stride = util_format_get_2d_size(pres->format, stride, box->height);
249
250
if (pres->target == PIPE_TEXTURE_CUBE ||
251
pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
252
pres->target == PIPE_TEXTURE_3D ||
253
pres->target == PIPE_TEXTURE_2D_ARRAY) {
254
size = box->depth * layer_stride;
255
} else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
256
size = box->depth * stride;
257
} else {
258
size = layer_stride;
259
}
260
261
*out_stride = stride;
262
*out_layer_stride = layer_stride;
263
264
return size;
265
}
266
267
/* Maps a region from staging to service the transfer. */
268
static void *
269
virgl_staging_map(struct virgl_context *vctx,
270
struct virgl_transfer *vtransfer)
271
{
272
struct virgl_resource *vres = virgl_resource(vtransfer->base.resource);
273
unsigned size;
274
unsigned align_offset;
275
unsigned stride;
276
unsigned layer_stride;
277
void *map_addr;
278
bool alloc_succeeded;
279
280
assert(vctx->supports_staging);
281
282
size = virgl_transfer_map_size(vtransfer, &stride, &layer_stride);
283
284
/* For buffers we need to ensure that the start of the buffer would be
285
* aligned to VIRGL_MAP_BUFFER_ALIGNMENT, even if our transfer doesn't
286
* actually include it. To achieve this we may need to allocate a slightly
287
* larger range from the upload buffer, and later update the uploader
288
* resource offset and map address to point to the requested x coordinate
289
* within that range.
290
*
291
* 0 A 2A 3A
292
* |-------|---bbbb|bbbbb--|
293
* |--------| ==> size
294
* |---| ==> align_offset
295
* |------------| ==> allocation of size + align_offset
296
*/
297
align_offset = vres->b.target == PIPE_BUFFER ?
298
vtransfer->base.box.x % VIRGL_MAP_BUFFER_ALIGNMENT :
299
0;
300
301
alloc_succeeded =
302
virgl_staging_alloc(&vctx->staging, size + align_offset,
303
VIRGL_MAP_BUFFER_ALIGNMENT,
304
&vtransfer->copy_src_offset,
305
&vtransfer->copy_src_hw_res,
306
&map_addr);
307
if (alloc_succeeded) {
308
/* Update source offset and address to point to the requested x coordinate
309
* if we have an align_offset (see above for more information). */
310
vtransfer->copy_src_offset += align_offset;
311
map_addr += align_offset;
312
313
/* Mark as dirty, since we are updating the host side resource
314
* without going through the corresponding guest side resource, and
315
* hence the two will diverge.
316
*/
317
virgl_resource_dirty(vres, vtransfer->base.level);
318
319
/* We are using the minimum required size to hold the contents,
320
* possibly using a layout different from the layout of the resource,
321
* so update the transfer strides accordingly.
322
*/
323
vtransfer->base.stride = stride;
324
vtransfer->base.layer_stride = layer_stride;
325
326
/* Track the total size of active staging resources. */
327
vctx->queued_staging_res_size += size + align_offset;
328
}
329
330
return map_addr;
331
}
332
333
static bool
334
virgl_resource_realloc(struct virgl_context *vctx, struct virgl_resource *res)
335
{
336
struct virgl_screen *vs = virgl_screen(vctx->base.screen);
337
const struct pipe_resource *templ = &res->b;
338
unsigned vbind, vflags;
339
struct virgl_hw_res *hw_res;
340
341
vbind = pipe_to_virgl_bind(vs, templ->bind);
342
vflags = pipe_to_virgl_flags(vs, templ->flags);
343
hw_res = vs->vws->resource_create(vs->vws,
344
templ->target,
345
templ->format,
346
vbind,
347
templ->width0,
348
templ->height0,
349
templ->depth0,
350
templ->array_size,
351
templ->last_level,
352
templ->nr_samples,
353
vflags,
354
res->metadata.total_size);
355
if (!hw_res)
356
return false;
357
358
vs->vws->resource_reference(vs->vws, &res->hw_res, NULL);
359
res->hw_res = hw_res;
360
361
/* We can safely clear the range here, since it will be repopulated in the
362
* following rebind operation, according to the active buffer binds.
363
*/
364
util_range_set_empty(&res->valid_buffer_range);
365
366
/* count toward the staging resource size limit */
367
vctx->queued_staging_res_size += res->metadata.total_size;
368
369
virgl_rebind_resource(vctx, &res->b);
370
371
return true;
372
}
373
374
void *
375
virgl_resource_transfer_map(struct pipe_context *ctx,
376
struct pipe_resource *resource,
377
unsigned level,
378
unsigned usage,
379
const struct pipe_box *box,
380
struct pipe_transfer **transfer)
381
{
382
struct virgl_context *vctx = virgl_context(ctx);
383
struct virgl_winsys *vws = virgl_screen(ctx->screen)->vws;
384
struct virgl_resource *vres = virgl_resource(resource);
385
struct virgl_transfer *trans;
386
enum virgl_transfer_map_type map_type;
387
void *map_addr;
388
389
/* Multisampled resources require resolve before mapping. */
390
assert(resource->nr_samples <= 1);
391
392
trans = virgl_resource_create_transfer(vctx, resource,
393
&vres->metadata, level, usage, box);
394
395
map_type = virgl_resource_transfer_prepare(vctx, trans);
396
switch (map_type) {
397
case VIRGL_TRANSFER_MAP_REALLOC:
398
if (!virgl_resource_realloc(vctx, vres)) {
399
map_addr = NULL;
400
break;
401
}
402
vws->resource_reference(vws, &trans->hw_res, vres->hw_res);
403
FALLTHROUGH;
404
case VIRGL_TRANSFER_MAP_HW_RES:
405
trans->hw_res_map = vws->resource_map(vws, vres->hw_res);
406
if (trans->hw_res_map)
407
map_addr = trans->hw_res_map + trans->offset;
408
else
409
map_addr = NULL;
410
break;
411
case VIRGL_TRANSFER_MAP_STAGING:
412
map_addr = virgl_staging_map(vctx, trans);
413
/* Copy transfers don't make use of hw_res_map at the moment. */
414
trans->hw_res_map = NULL;
415
break;
416
case VIRGL_TRANSFER_MAP_ERROR:
417
default:
418
trans->hw_res_map = NULL;
419
map_addr = NULL;
420
break;
421
}
422
423
if (!map_addr) {
424
virgl_resource_destroy_transfer(vctx, trans);
425
return NULL;
426
}
427
428
if (vres->b.target == PIPE_BUFFER) {
429
/* For the checks below to be able to use 'usage', we assume that
430
* transfer preparation doesn't affect the usage.
431
*/
432
assert(usage == trans->base.usage);
433
434
/* If we are doing a whole resource discard with a hw_res map, the buffer
435
* storage can now be considered unused and we don't care about previous
436
* contents. We can thus mark the storage as uninitialized, but only if
437
* the buffer is not host writable (in which case we can't clear the
438
* valid range, since that would result in missed readbacks in future
439
* transfers). We only do this for VIRGL_TRANSFER_MAP_HW_RES, since for
440
* VIRGL_TRANSFER_MAP_REALLOC we already take care of the buffer range
441
* when reallocating and rebinding, and VIRGL_TRANSFER_MAP_STAGING is not
442
* currently used for whole resource discards.
443
*/
444
if (map_type == VIRGL_TRANSFER_MAP_HW_RES &&
445
(usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) &&
446
(vres->clean_mask & 1)) {
447
util_range_set_empty(&vres->valid_buffer_range);
448
}
449
450
if (usage & PIPE_MAP_WRITE)
451
util_range_add(&vres->b, &vres->valid_buffer_range, box->x, box->x + box->width);
452
}
453
454
*transfer = &trans->base;
455
return map_addr;
456
}
457
458
static void virgl_resource_layout(struct pipe_resource *pt,
459
struct virgl_resource_metadata *metadata,
460
uint32_t plane,
461
uint32_t winsys_stride,
462
uint32_t plane_offset,
463
uint64_t modifier)
464
{
465
unsigned level, nblocksy;
466
unsigned width = pt->width0;
467
unsigned height = pt->height0;
468
unsigned depth = pt->depth0;
469
unsigned buffer_size = 0;
470
471
for (level = 0; level <= pt->last_level; level++) {
472
unsigned slices;
473
474
if (pt->target == PIPE_TEXTURE_CUBE)
475
slices = 6;
476
else if (pt->target == PIPE_TEXTURE_3D)
477
slices = depth;
478
else
479
slices = pt->array_size;
480
481
nblocksy = util_format_get_nblocksy(pt->format, height);
482
metadata->stride[level] = winsys_stride ? winsys_stride :
483
util_format_get_stride(pt->format, width);
484
metadata->layer_stride[level] = nblocksy * metadata->stride[level];
485
metadata->level_offset[level] = buffer_size;
486
487
buffer_size += slices * metadata->layer_stride[level];
488
489
width = u_minify(width, 1);
490
height = u_minify(height, 1);
491
depth = u_minify(depth, 1);
492
}
493
494
metadata->plane = plane;
495
metadata->plane_offset = plane_offset;
496
metadata->modifier = modifier;
497
if (pt->nr_samples <= 1)
498
metadata->total_size = buffer_size;
499
else /* don't create guest backing store for MSAA */
500
metadata->total_size = 0;
501
}
502
503
static struct pipe_resource *virgl_resource_create(struct pipe_screen *screen,
504
const struct pipe_resource *templ)
505
{
506
unsigned vbind, vflags;
507
struct virgl_screen *vs = virgl_screen(screen);
508
struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
509
510
res->b = *templ;
511
res->b.screen = &vs->base;
512
pipe_reference_init(&res->b.reference, 1);
513
vbind = pipe_to_virgl_bind(vs, templ->bind);
514
vflags = pipe_to_virgl_flags(vs, templ->flags);
515
virgl_resource_layout(&res->b, &res->metadata, 0, 0, 0, 0);
516
517
if ((vs->caps.caps.v2.capability_bits & VIRGL_CAP_APP_TWEAK_SUPPORT) &&
518
vs->tweak_gles_emulate_bgra &&
519
(templ->format == PIPE_FORMAT_B8G8R8A8_SRGB ||
520
templ->format == PIPE_FORMAT_B8G8R8A8_UNORM ||
521
templ->format == PIPE_FORMAT_B8G8R8X8_SRGB ||
522
templ->format == PIPE_FORMAT_B8G8R8X8_UNORM)) {
523
vbind |= VIRGL_BIND_PREFER_EMULATED_BGRA;
524
}
525
526
res->hw_res = vs->vws->resource_create(vs->vws, templ->target,
527
templ->format, vbind,
528
templ->width0,
529
templ->height0,
530
templ->depth0,
531
templ->array_size,
532
templ->last_level,
533
templ->nr_samples,
534
vflags,
535
res->metadata.total_size);
536
if (!res->hw_res) {
537
FREE(res);
538
return NULL;
539
}
540
541
res->clean_mask = (1 << VR_MAX_TEXTURE_2D_LEVELS) - 1;
542
543
if (templ->target == PIPE_BUFFER) {
544
util_range_init(&res->valid_buffer_range);
545
virgl_buffer_init(res);
546
} else {
547
virgl_texture_init(res);
548
}
549
550
return &res->b;
551
552
}
553
554
static struct pipe_resource *virgl_resource_from_handle(struct pipe_screen *screen,
555
const struct pipe_resource *templ,
556
struct winsys_handle *whandle,
557
unsigned usage)
558
{
559
uint32_t winsys_stride, plane_offset, plane;
560
uint64_t modifier;
561
struct virgl_screen *vs = virgl_screen(screen);
562
if (templ->target == PIPE_BUFFER)
563
return NULL;
564
565
struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
566
res->b = *templ;
567
res->b.screen = &vs->base;
568
pipe_reference_init(&res->b.reference, 1);
569
570
plane = winsys_stride = plane_offset = modifier = 0;
571
res->hw_res = vs->vws->resource_create_from_handle(vs->vws, whandle,
572
&plane,
573
&winsys_stride,
574
&plane_offset,
575
&modifier,
576
&res->blob_mem);
577
578
/* do not use winsys returns for guest storage info of classic resource */
579
if (!res->blob_mem) {
580
winsys_stride = 0;
581
plane_offset = 0;
582
modifier = 0;
583
}
584
585
virgl_resource_layout(&res->b, &res->metadata, plane, winsys_stride,
586
plane_offset, modifier);
587
if (!res->hw_res) {
588
FREE(res);
589
return NULL;
590
}
591
592
/* assign blob resource a type in case it was created untyped */
593
if (res->blob_mem && plane == 0 &&
594
(vs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_UNTYPED_RESOURCE)) {
595
uint32_t plane_strides[VIRGL_MAX_PLANE_COUNT];
596
uint32_t plane_offsets[VIRGL_MAX_PLANE_COUNT];
597
uint32_t plane_count = 0;
598
struct pipe_resource *iter = &res->b;
599
600
do {
601
struct virgl_resource *plane = virgl_resource(iter);
602
603
/* must be a plain 2D texture sharing the same hw_res */
604
if (plane->b.target != PIPE_TEXTURE_2D ||
605
plane->b.depth0 != 1 ||
606
plane->b.array_size != 1 ||
607
plane->b.last_level != 0 ||
608
plane->b.nr_samples > 1 ||
609
plane->hw_res != res->hw_res ||
610
plane_count >= VIRGL_MAX_PLANE_COUNT) {
611
vs->vws->resource_reference(vs->vws, &res->hw_res, NULL);
612
FREE(res);
613
return NULL;
614
}
615
616
plane_strides[plane_count] = plane->metadata.stride[0];
617
plane_offsets[plane_count] = plane->metadata.plane_offset;
618
plane_count++;
619
iter = iter->next;
620
} while (iter);
621
622
vs->vws->resource_set_type(vs->vws,
623
res->hw_res,
624
pipe_to_virgl_format(res->b.format),
625
pipe_to_virgl_bind(vs, res->b.bind),
626
res->b.width0,
627
res->b.height0,
628
usage,
629
res->metadata.modifier,
630
plane_count,
631
plane_strides,
632
plane_offsets);
633
}
634
635
virgl_texture_init(res);
636
637
return &res->b;
638
}
639
640
void virgl_init_screen_resource_functions(struct pipe_screen *screen)
641
{
642
screen->resource_create = virgl_resource_create;
643
screen->resource_from_handle = virgl_resource_from_handle;
644
screen->resource_get_handle = virgl_resource_get_handle;
645
screen->resource_destroy = virgl_resource_destroy;
646
}
647
648
static void virgl_buffer_subdata(struct pipe_context *pipe,
649
struct pipe_resource *resource,
650
unsigned usage, unsigned offset,
651
unsigned size, const void *data)
652
{
653
struct virgl_context *vctx = virgl_context(pipe);
654
struct virgl_resource *vbuf = virgl_resource(resource);
655
656
/* We can try virgl_transfer_queue_extend_buffer when there is no
657
* flush/readback/wait required. Based on virgl_resource_transfer_prepare,
658
* the simplest way to make sure that is the case is to check the valid
659
* buffer range.
660
*/
661
if (!util_ranges_intersect(&vbuf->valid_buffer_range,
662
offset, offset + size) &&
663
likely(!(virgl_debug & VIRGL_DEBUG_XFER)) &&
664
virgl_transfer_queue_extend_buffer(&vctx->queue,
665
vbuf->hw_res, offset, size, data)) {
666
util_range_add(&vbuf->b, &vbuf->valid_buffer_range, offset, offset + size);
667
return;
668
}
669
670
u_default_buffer_subdata(pipe, resource, usage, offset, size, data);
671
}
672
673
void virgl_init_context_resource_functions(struct pipe_context *ctx)
674
{
675
ctx->buffer_map = virgl_resource_transfer_map;
676
ctx->texture_map = virgl_texture_transfer_map;
677
ctx->transfer_flush_region = virgl_buffer_transfer_flush_region;
678
ctx->buffer_unmap = virgl_buffer_transfer_unmap;
679
ctx->texture_unmap = virgl_texture_transfer_unmap;
680
ctx->buffer_subdata = virgl_buffer_subdata;
681
ctx->texture_subdata = u_default_texture_subdata;
682
}
683
684
685
struct virgl_transfer *
686
virgl_resource_create_transfer(struct virgl_context *vctx,
687
struct pipe_resource *pres,
688
const struct virgl_resource_metadata *metadata,
689
unsigned level, unsigned usage,
690
const struct pipe_box *box)
691
{
692
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
693
struct virgl_transfer *trans;
694
enum pipe_format format = pres->format;
695
const unsigned blocksy = box->y / util_format_get_blockheight(format);
696
const unsigned blocksx = box->x / util_format_get_blockwidth(format);
697
698
unsigned offset = metadata->plane_offset + metadata->level_offset[level];
699
if (pres->target == PIPE_TEXTURE_CUBE ||
700
pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
701
pres->target == PIPE_TEXTURE_3D ||
702
pres->target == PIPE_TEXTURE_2D_ARRAY) {
703
offset += box->z * metadata->layer_stride[level];
704
}
705
else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
706
offset += box->z * metadata->stride[level];
707
assert(box->y == 0);
708
} else if (pres->target == PIPE_BUFFER) {
709
assert(box->y == 0 && box->z == 0);
710
} else {
711
assert(box->z == 0);
712
}
713
714
offset += blocksy * metadata->stride[level];
715
offset += blocksx * util_format_get_blocksize(format);
716
717
trans = slab_alloc(&vctx->transfer_pool);
718
if (!trans)
719
return NULL;
720
721
/* note that trans is not zero-initialized */
722
trans->base.resource = NULL;
723
pipe_resource_reference(&trans->base.resource, pres);
724
trans->hw_res = NULL;
725
vws->resource_reference(vws, &trans->hw_res, virgl_resource(pres)->hw_res);
726
727
trans->base.level = level;
728
trans->base.usage = usage;
729
trans->base.box = *box;
730
trans->base.stride = metadata->stride[level];
731
trans->base.layer_stride = metadata->layer_stride[level];
732
trans->offset = offset;
733
util_range_init(&trans->range);
734
trans->copy_src_hw_res = NULL;
735
trans->copy_src_offset = 0;
736
trans->resolve_transfer = NULL;
737
738
if (trans->base.resource->target != PIPE_TEXTURE_3D &&
739
trans->base.resource->target != PIPE_TEXTURE_CUBE &&
740
trans->base.resource->target != PIPE_TEXTURE_1D_ARRAY &&
741
trans->base.resource->target != PIPE_TEXTURE_2D_ARRAY &&
742
trans->base.resource->target != PIPE_TEXTURE_CUBE_ARRAY)
743
trans->l_stride = 0;
744
else
745
trans->l_stride = trans->base.layer_stride;
746
747
return trans;
748
}
749
750
void virgl_resource_destroy_transfer(struct virgl_context *vctx,
751
struct virgl_transfer *trans)
752
{
753
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
754
755
vws->resource_reference(vws, &trans->copy_src_hw_res, NULL);
756
757
util_range_destroy(&trans->range);
758
vws->resource_reference(vws, &trans->hw_res, NULL);
759
pipe_resource_reference(&trans->base.resource, NULL);
760
slab_free(&vctx->transfer_pool, trans);
761
}
762
763
void virgl_resource_destroy(struct pipe_screen *screen,
764
struct pipe_resource *resource)
765
{
766
struct virgl_screen *vs = virgl_screen(screen);
767
struct virgl_resource *res = virgl_resource(resource);
768
769
if (res->b.target == PIPE_BUFFER)
770
util_range_destroy(&res->valid_buffer_range);
771
772
vs->vws->resource_reference(vs->vws, &res->hw_res, NULL);
773
FREE(res);
774
}
775
776
bool virgl_resource_get_handle(struct pipe_screen *screen,
777
struct pipe_context *context,
778
struct pipe_resource *resource,
779
struct winsys_handle *whandle,
780
unsigned usage)
781
{
782
struct virgl_screen *vs = virgl_screen(screen);
783
struct virgl_resource *res = virgl_resource(resource);
784
785
if (res->b.target == PIPE_BUFFER)
786
return false;
787
788
return vs->vws->resource_get_handle(vs->vws, res->hw_res,
789
res->metadata.stride[0],
790
whandle);
791
}
792
793
void virgl_resource_dirty(struct virgl_resource *res, uint32_t level)
794
{
795
if (res) {
796
if (res->b.target == PIPE_BUFFER)
797
res->clean_mask &= ~1;
798
else
799
res->clean_mask &= ~(1 << level);
800
}
801
}
802
803