Path: blob/21.2-virgl/src/gallium/drivers/virgl/virgl_resource.c
4570 views
/*1* Copyright 2014, 2015 Red Hat.2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* on the rights to use, copy, modify, merge, publish, distribute, sub7* license, and/or sell copies of the Software, and to permit persons to whom8* the Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL17* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,18* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR19* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE20* USE OR OTHER DEALINGS IN THE SOFTWARE.21*/22#include "util/format/u_format.h"23#include "util/u_inlines.h"24#include "util/u_memory.h"25#include "util/u_upload_mgr.h"26#include "virgl_context.h"27#include "virgl_resource.h"28#include "virgl_screen.h"29#include "virgl_staging_mgr.h"3031/* A (soft) limit for the amount of memory we want to allow for queued staging32* resources. This is used to decide when we should force a flush, in order to33* avoid exhausting virtio-gpu memory.34*/35#define VIRGL_QUEUED_STAGING_RES_SIZE_LIMIT (128 * 1024 * 1024)3637enum virgl_transfer_map_type {38VIRGL_TRANSFER_MAP_ERROR = -1,39VIRGL_TRANSFER_MAP_HW_RES,4041/* Map a range of a staging buffer. The updated contents should be transferred42* with a copy transfer.43*/44VIRGL_TRANSFER_MAP_STAGING,4546/* Reallocate the underlying virgl_hw_res. */47VIRGL_TRANSFER_MAP_REALLOC,48};4950/* We need to flush to properly sync the transfer with the current cmdbuf.51* But there are cases where the flushing can be skipped:52*53* - synchronization is disabled54* - the resource is not referenced by the current cmdbuf55*/56static bool virgl_res_needs_flush(struct virgl_context *vctx,57struct virgl_transfer *trans)58{59struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;60struct virgl_resource *res = virgl_resource(trans->base.resource);6162if (trans->base.usage & PIPE_MAP_UNSYNCHRONIZED)63return false;6465if (!vws->res_is_referenced(vws, vctx->cbuf, res->hw_res))66return false;6768return true;69}7071/* We need to read back from the host storage to make sure the guest storage72* is up-to-date. But there are cases where the readback can be skipped:73*74* - the content can be discarded75* - the host storage is read-only76*77* Note that PIPE_MAP_WRITE without discard bits requires readback.78* PIPE_MAP_READ becomes irrelevant. PIPE_MAP_UNSYNCHRONIZED and79* PIPE_MAP_FLUSH_EXPLICIT are also irrelevant.80*/81static bool virgl_res_needs_readback(struct virgl_context *vctx,82struct virgl_resource *res,83unsigned usage, unsigned level)84{85if (usage & (PIPE_MAP_DISCARD_RANGE |86PIPE_MAP_DISCARD_WHOLE_RESOURCE))87return false;8889if (res->clean_mask & (1 << level))90return false;9192return true;93}9495static enum virgl_transfer_map_type96virgl_resource_transfer_prepare(struct virgl_context *vctx,97struct virgl_transfer *xfer)98{99struct virgl_screen *vs = virgl_screen(vctx->base.screen);100struct virgl_winsys *vws = vs->vws;101struct virgl_resource *res = virgl_resource(xfer->base.resource);102enum virgl_transfer_map_type map_type = VIRGL_TRANSFER_MAP_HW_RES;103bool flush;104bool readback;105bool wait;106107/* there is no way to map the host storage currently */108if (xfer->base.usage & PIPE_MAP_DIRECTLY)109return VIRGL_TRANSFER_MAP_ERROR;110111/* We break the logic down into four steps112*113* step 1: determine the required operations independently114* step 2: look for chances to skip the operations115* step 3: resolve dependencies between the operations116* step 4: execute the operations117*/118119flush = virgl_res_needs_flush(vctx, xfer);120readback = virgl_res_needs_readback(vctx, res, xfer->base.usage,121xfer->base.level);122/* We need to wait for all cmdbufs, current or previous, that access the123* resource to finish unless synchronization is disabled.124*/125wait = !(xfer->base.usage & PIPE_MAP_UNSYNCHRONIZED);126127/* When the transfer range consists of only uninitialized data, we can128* assume the GPU is not accessing the range and readback is unnecessary.129* We can proceed as if PIPE_MAP_UNSYNCHRONIZED and130* PIPE_MAP_DISCARD_RANGE are set.131*/132if (res->b.target == PIPE_BUFFER &&133!util_ranges_intersect(&res->valid_buffer_range, xfer->base.box.x,134xfer->base.box.x + xfer->base.box.width) &&135likely(!(virgl_debug & VIRGL_DEBUG_XFER))) {136flush = false;137readback = false;138wait = false;139}140141/* When the resource is busy but its content can be discarded, we can142* replace its HW resource or use a staging buffer to avoid waiting.143*/144if (wait &&145(xfer->base.usage & (PIPE_MAP_DISCARD_RANGE |146PIPE_MAP_DISCARD_WHOLE_RESOURCE)) &&147likely(!(virgl_debug & VIRGL_DEBUG_XFER))) {148bool can_realloc = false;149bool can_staging = false;150151/* A PIPE_MAP_DISCARD_WHOLE_RESOURCE transfer may be followed by152* PIPE_MAP_UNSYNCHRONIZED transfers to non-overlapping regions.153* It cannot be treated as a PIPE_MAP_DISCARD_RANGE transfer,154* otherwise those following unsynchronized transfers may overwrite155* valid data.156*/157if (xfer->base.usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {158can_realloc = virgl_can_rebind_resource(vctx, &res->b);159} else {160can_staging = vctx->supports_staging;161}162163/* discard implies no readback */164assert(!readback);165166if (can_realloc || can_staging) {167/* Both map types have some costs. Do them only when the resource is168* (or will be) busy for real. Otherwise, set wait to false.169*/170wait = (flush || vws->resource_is_busy(vws, res->hw_res));171if (wait) {172map_type = (can_realloc) ?173VIRGL_TRANSFER_MAP_REALLOC :174VIRGL_TRANSFER_MAP_STAGING;175wait = false;176177/* There is normally no need to flush either, unless the amount of178* memory we are using for staging resources starts growing, in179* which case we want to flush to keep our memory consumption in180* check.181*/182flush = (vctx->queued_staging_res_size >183VIRGL_QUEUED_STAGING_RES_SIZE_LIMIT);184}185}186}187188/* readback has some implications */189if (readback) {190/* Readback is yet another command and is transparent to the state191* trackers. It should be waited for in all cases, including when192* PIPE_MAP_UNSYNCHRONIZED is set.193*/194wait = true;195196/* When the transfer queue has pending writes to this transfer's region,197* we have to flush before readback.198*/199if (!flush && virgl_transfer_queue_is_queued(&vctx->queue, xfer))200flush = true;201}202203if (flush)204vctx->base.flush(&vctx->base, NULL, 0);205206/* If we are not allowed to block, and we know that we will have to wait,207* either because the resource is busy, or because it will become busy due208* to a readback, return early to avoid performing an incomplete209* transfer_get. Such an incomplete transfer_get may finish at any time,210* during which another unsynchronized map could write to the resource211* contents, leaving the contents in an undefined state.212*/213if ((xfer->base.usage & PIPE_MAP_DONTBLOCK) &&214(readback || (wait && vws->resource_is_busy(vws, res->hw_res))))215return VIRGL_TRANSFER_MAP_ERROR;216217if (readback) {218vws->transfer_get(vws, res->hw_res, &xfer->base.box, xfer->base.stride,219xfer->l_stride, xfer->offset, xfer->base.level);220}221222if (wait)223vws->resource_wait(vws, res->hw_res);224225return map_type;226}227228/* Calculate the minimum size of the memory required to service a resource229* transfer map. Also return the stride and layer_stride for the corresponding230* layout.231*/232static unsigned233virgl_transfer_map_size(struct virgl_transfer *vtransfer,234unsigned *out_stride,235unsigned *out_layer_stride)236{237struct pipe_resource *pres = vtransfer->base.resource;238struct pipe_box *box = &vtransfer->base.box;239unsigned stride;240unsigned layer_stride;241unsigned size;242243assert(out_stride);244assert(out_layer_stride);245246stride = util_format_get_stride(pres->format, box->width);247layer_stride = util_format_get_2d_size(pres->format, stride, box->height);248249if (pres->target == PIPE_TEXTURE_CUBE ||250pres->target == PIPE_TEXTURE_CUBE_ARRAY ||251pres->target == PIPE_TEXTURE_3D ||252pres->target == PIPE_TEXTURE_2D_ARRAY) {253size = box->depth * layer_stride;254} else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {255size = box->depth * stride;256} else {257size = layer_stride;258}259260*out_stride = stride;261*out_layer_stride = layer_stride;262263return size;264}265266/* Maps a region from staging to service the transfer. */267static void *268virgl_staging_map(struct virgl_context *vctx,269struct virgl_transfer *vtransfer)270{271struct virgl_resource *vres = virgl_resource(vtransfer->base.resource);272unsigned size;273unsigned align_offset;274unsigned stride;275unsigned layer_stride;276void *map_addr;277bool alloc_succeeded;278279assert(vctx->supports_staging);280281size = virgl_transfer_map_size(vtransfer, &stride, &layer_stride);282283/* For buffers we need to ensure that the start of the buffer would be284* aligned to VIRGL_MAP_BUFFER_ALIGNMENT, even if our transfer doesn't285* actually include it. To achieve this we may need to allocate a slightly286* larger range from the upload buffer, and later update the uploader287* resource offset and map address to point to the requested x coordinate288* within that range.289*290* 0 A 2A 3A291* |-------|---bbbb|bbbbb--|292* |--------| ==> size293* |---| ==> align_offset294* |------------| ==> allocation of size + align_offset295*/296align_offset = vres->b.target == PIPE_BUFFER ?297vtransfer->base.box.x % VIRGL_MAP_BUFFER_ALIGNMENT :2980;299300alloc_succeeded =301virgl_staging_alloc(&vctx->staging, size + align_offset,302VIRGL_MAP_BUFFER_ALIGNMENT,303&vtransfer->copy_src_offset,304&vtransfer->copy_src_hw_res,305&map_addr);306if (alloc_succeeded) {307/* Update source offset and address to point to the requested x coordinate308* if we have an align_offset (see above for more information). */309vtransfer->copy_src_offset += align_offset;310map_addr += align_offset;311312/* Mark as dirty, since we are updating the host side resource313* without going through the corresponding guest side resource, and314* hence the two will diverge.315*/316virgl_resource_dirty(vres, vtransfer->base.level);317318/* We are using the minimum required size to hold the contents,319* possibly using a layout different from the layout of the resource,320* so update the transfer strides accordingly.321*/322vtransfer->base.stride = stride;323vtransfer->base.layer_stride = layer_stride;324325/* Track the total size of active staging resources. */326vctx->queued_staging_res_size += size + align_offset;327}328329return map_addr;330}331332static bool333virgl_resource_realloc(struct virgl_context *vctx, struct virgl_resource *res)334{335struct virgl_screen *vs = virgl_screen(vctx->base.screen);336const struct pipe_resource *templ = &res->b;337unsigned vbind, vflags;338struct virgl_hw_res *hw_res;339340vbind = pipe_to_virgl_bind(vs, templ->bind);341vflags = pipe_to_virgl_flags(vs, templ->flags);342hw_res = vs->vws->resource_create(vs->vws,343templ->target,344templ->format,345vbind,346templ->width0,347templ->height0,348templ->depth0,349templ->array_size,350templ->last_level,351templ->nr_samples,352vflags,353res->metadata.total_size);354if (!hw_res)355return false;356357vs->vws->resource_reference(vs->vws, &res->hw_res, NULL);358res->hw_res = hw_res;359360/* We can safely clear the range here, since it will be repopulated in the361* following rebind operation, according to the active buffer binds.362*/363util_range_set_empty(&res->valid_buffer_range);364365/* count toward the staging resource size limit */366vctx->queued_staging_res_size += res->metadata.total_size;367368virgl_rebind_resource(vctx, &res->b);369370return true;371}372373void *374virgl_resource_transfer_map(struct pipe_context *ctx,375struct pipe_resource *resource,376unsigned level,377unsigned usage,378const struct pipe_box *box,379struct pipe_transfer **transfer)380{381struct virgl_context *vctx = virgl_context(ctx);382struct virgl_winsys *vws = virgl_screen(ctx->screen)->vws;383struct virgl_resource *vres = virgl_resource(resource);384struct virgl_transfer *trans;385enum virgl_transfer_map_type map_type;386void *map_addr;387388/* Multisampled resources require resolve before mapping. */389assert(resource->nr_samples <= 1);390391trans = virgl_resource_create_transfer(vctx, resource,392&vres->metadata, level, usage, box);393394map_type = virgl_resource_transfer_prepare(vctx, trans);395switch (map_type) {396case VIRGL_TRANSFER_MAP_REALLOC:397if (!virgl_resource_realloc(vctx, vres)) {398map_addr = NULL;399break;400}401vws->resource_reference(vws, &trans->hw_res, vres->hw_res);402FALLTHROUGH;403case VIRGL_TRANSFER_MAP_HW_RES:404trans->hw_res_map = vws->resource_map(vws, vres->hw_res);405if (trans->hw_res_map)406map_addr = trans->hw_res_map + trans->offset;407else408map_addr = NULL;409break;410case VIRGL_TRANSFER_MAP_STAGING:411map_addr = virgl_staging_map(vctx, trans);412/* Copy transfers don't make use of hw_res_map at the moment. */413trans->hw_res_map = NULL;414break;415case VIRGL_TRANSFER_MAP_ERROR:416default:417trans->hw_res_map = NULL;418map_addr = NULL;419break;420}421422if (!map_addr) {423virgl_resource_destroy_transfer(vctx, trans);424return NULL;425}426427if (vres->b.target == PIPE_BUFFER) {428/* For the checks below to be able to use 'usage', we assume that429* transfer preparation doesn't affect the usage.430*/431assert(usage == trans->base.usage);432433/* If we are doing a whole resource discard with a hw_res map, the buffer434* storage can now be considered unused and we don't care about previous435* contents. We can thus mark the storage as uninitialized, but only if436* the buffer is not host writable (in which case we can't clear the437* valid range, since that would result in missed readbacks in future438* transfers). We only do this for VIRGL_TRANSFER_MAP_HW_RES, since for439* VIRGL_TRANSFER_MAP_REALLOC we already take care of the buffer range440* when reallocating and rebinding, and VIRGL_TRANSFER_MAP_STAGING is not441* currently used for whole resource discards.442*/443if (map_type == VIRGL_TRANSFER_MAP_HW_RES &&444(usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) &&445(vres->clean_mask & 1)) {446util_range_set_empty(&vres->valid_buffer_range);447}448449if (usage & PIPE_MAP_WRITE)450util_range_add(&vres->b, &vres->valid_buffer_range, box->x, box->x + box->width);451}452453*transfer = &trans->base;454return map_addr;455}456457static void virgl_resource_layout(struct pipe_resource *pt,458struct virgl_resource_metadata *metadata,459uint32_t plane,460uint32_t winsys_stride,461uint32_t plane_offset,462uint64_t modifier)463{464unsigned level, nblocksy;465unsigned width = pt->width0;466unsigned height = pt->height0;467unsigned depth = pt->depth0;468unsigned buffer_size = 0;469470for (level = 0; level <= pt->last_level; level++) {471unsigned slices;472473if (pt->target == PIPE_TEXTURE_CUBE)474slices = 6;475else if (pt->target == PIPE_TEXTURE_3D)476slices = depth;477else478slices = pt->array_size;479480nblocksy = util_format_get_nblocksy(pt->format, height);481metadata->stride[level] = winsys_stride ? winsys_stride :482util_format_get_stride(pt->format, width);483metadata->layer_stride[level] = nblocksy * metadata->stride[level];484metadata->level_offset[level] = buffer_size;485486buffer_size += slices * metadata->layer_stride[level];487488width = u_minify(width, 1);489height = u_minify(height, 1);490depth = u_minify(depth, 1);491}492493metadata->plane = plane;494metadata->plane_offset = plane_offset;495metadata->modifier = modifier;496if (pt->nr_samples <= 1)497metadata->total_size = buffer_size;498else /* don't create guest backing store for MSAA */499metadata->total_size = 0;500}501502static struct pipe_resource *virgl_resource_create(struct pipe_screen *screen,503const struct pipe_resource *templ)504{505unsigned vbind, vflags;506struct virgl_screen *vs = virgl_screen(screen);507struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);508509res->b = *templ;510res->b.screen = &vs->base;511pipe_reference_init(&res->b.reference, 1);512vbind = pipe_to_virgl_bind(vs, templ->bind);513vflags = pipe_to_virgl_flags(vs, templ->flags);514virgl_resource_layout(&res->b, &res->metadata, 0, 0, 0, 0);515516if ((vs->caps.caps.v2.capability_bits & VIRGL_CAP_APP_TWEAK_SUPPORT) &&517vs->tweak_gles_emulate_bgra &&518(templ->format == PIPE_FORMAT_B8G8R8A8_SRGB ||519templ->format == PIPE_FORMAT_B8G8R8A8_UNORM ||520templ->format == PIPE_FORMAT_B8G8R8X8_SRGB ||521templ->format == PIPE_FORMAT_B8G8R8X8_UNORM)) {522vbind |= VIRGL_BIND_PREFER_EMULATED_BGRA;523}524525res->hw_res = vs->vws->resource_create(vs->vws, templ->target,526templ->format, vbind,527templ->width0,528templ->height0,529templ->depth0,530templ->array_size,531templ->last_level,532templ->nr_samples,533vflags,534res->metadata.total_size);535if (!res->hw_res) {536FREE(res);537return NULL;538}539540res->clean_mask = (1 << VR_MAX_TEXTURE_2D_LEVELS) - 1;541542if (templ->target == PIPE_BUFFER) {543util_range_init(&res->valid_buffer_range);544virgl_buffer_init(res);545} else {546virgl_texture_init(res);547}548549return &res->b;550551}552553static struct pipe_resource *virgl_resource_from_handle(struct pipe_screen *screen,554const struct pipe_resource *templ,555struct winsys_handle *whandle,556unsigned usage)557{558uint32_t winsys_stride, plane_offset, plane;559uint64_t modifier;560struct virgl_screen *vs = virgl_screen(screen);561if (templ->target == PIPE_BUFFER)562return NULL;563564struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);565res->b = *templ;566res->b.screen = &vs->base;567pipe_reference_init(&res->b.reference, 1);568569plane = winsys_stride = plane_offset = modifier = 0;570res->hw_res = vs->vws->resource_create_from_handle(vs->vws, whandle,571&plane,572&winsys_stride,573&plane_offset,574&modifier,575&res->blob_mem);576577/* do not use winsys returns for guest storage info of classic resource */578if (!res->blob_mem) {579winsys_stride = 0;580plane_offset = 0;581modifier = 0;582}583584virgl_resource_layout(&res->b, &res->metadata, plane, winsys_stride,585plane_offset, modifier);586if (!res->hw_res) {587FREE(res);588return NULL;589}590591/* assign blob resource a type in case it was created untyped */592if (res->blob_mem && plane == 0 &&593(vs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_UNTYPED_RESOURCE)) {594uint32_t plane_strides[VIRGL_MAX_PLANE_COUNT];595uint32_t plane_offsets[VIRGL_MAX_PLANE_COUNT];596uint32_t plane_count = 0;597struct pipe_resource *iter = &res->b;598599do {600struct virgl_resource *plane = virgl_resource(iter);601602/* must be a plain 2D texture sharing the same hw_res */603if (plane->b.target != PIPE_TEXTURE_2D ||604plane->b.depth0 != 1 ||605plane->b.array_size != 1 ||606plane->b.last_level != 0 ||607plane->b.nr_samples > 1 ||608plane->hw_res != res->hw_res ||609plane_count >= VIRGL_MAX_PLANE_COUNT) {610vs->vws->resource_reference(vs->vws, &res->hw_res, NULL);611FREE(res);612return NULL;613}614615plane_strides[plane_count] = plane->metadata.stride[0];616plane_offsets[plane_count] = plane->metadata.plane_offset;617plane_count++;618iter = iter->next;619} while (iter);620621vs->vws->resource_set_type(vs->vws,622res->hw_res,623pipe_to_virgl_format(res->b.format),624pipe_to_virgl_bind(vs, res->b.bind),625res->b.width0,626res->b.height0,627usage,628res->metadata.modifier,629plane_count,630plane_strides,631plane_offsets);632}633634virgl_texture_init(res);635636return &res->b;637}638639void virgl_init_screen_resource_functions(struct pipe_screen *screen)640{641screen->resource_create = virgl_resource_create;642screen->resource_from_handle = virgl_resource_from_handle;643screen->resource_get_handle = virgl_resource_get_handle;644screen->resource_destroy = virgl_resource_destroy;645}646647static void virgl_buffer_subdata(struct pipe_context *pipe,648struct pipe_resource *resource,649unsigned usage, unsigned offset,650unsigned size, const void *data)651{652struct virgl_context *vctx = virgl_context(pipe);653struct virgl_resource *vbuf = virgl_resource(resource);654655/* We can try virgl_transfer_queue_extend_buffer when there is no656* flush/readback/wait required. Based on virgl_resource_transfer_prepare,657* the simplest way to make sure that is the case is to check the valid658* buffer range.659*/660if (!util_ranges_intersect(&vbuf->valid_buffer_range,661offset, offset + size) &&662likely(!(virgl_debug & VIRGL_DEBUG_XFER)) &&663virgl_transfer_queue_extend_buffer(&vctx->queue,664vbuf->hw_res, offset, size, data)) {665util_range_add(&vbuf->b, &vbuf->valid_buffer_range, offset, offset + size);666return;667}668669u_default_buffer_subdata(pipe, resource, usage, offset, size, data);670}671672void virgl_init_context_resource_functions(struct pipe_context *ctx)673{674ctx->buffer_map = virgl_resource_transfer_map;675ctx->texture_map = virgl_texture_transfer_map;676ctx->transfer_flush_region = virgl_buffer_transfer_flush_region;677ctx->buffer_unmap = virgl_buffer_transfer_unmap;678ctx->texture_unmap = virgl_texture_transfer_unmap;679ctx->buffer_subdata = virgl_buffer_subdata;680ctx->texture_subdata = u_default_texture_subdata;681}682683684struct virgl_transfer *685virgl_resource_create_transfer(struct virgl_context *vctx,686struct pipe_resource *pres,687const struct virgl_resource_metadata *metadata,688unsigned level, unsigned usage,689const struct pipe_box *box)690{691struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;692struct virgl_transfer *trans;693enum pipe_format format = pres->format;694const unsigned blocksy = box->y / util_format_get_blockheight(format);695const unsigned blocksx = box->x / util_format_get_blockwidth(format);696697unsigned offset = metadata->plane_offset + metadata->level_offset[level];698if (pres->target == PIPE_TEXTURE_CUBE ||699pres->target == PIPE_TEXTURE_CUBE_ARRAY ||700pres->target == PIPE_TEXTURE_3D ||701pres->target == PIPE_TEXTURE_2D_ARRAY) {702offset += box->z * metadata->layer_stride[level];703}704else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {705offset += box->z * metadata->stride[level];706assert(box->y == 0);707} else if (pres->target == PIPE_BUFFER) {708assert(box->y == 0 && box->z == 0);709} else {710assert(box->z == 0);711}712713offset += blocksy * metadata->stride[level];714offset += blocksx * util_format_get_blocksize(format);715716trans = slab_alloc(&vctx->transfer_pool);717if (!trans)718return NULL;719720/* note that trans is not zero-initialized */721trans->base.resource = NULL;722pipe_resource_reference(&trans->base.resource, pres);723trans->hw_res = NULL;724vws->resource_reference(vws, &trans->hw_res, virgl_resource(pres)->hw_res);725726trans->base.level = level;727trans->base.usage = usage;728trans->base.box = *box;729trans->base.stride = metadata->stride[level];730trans->base.layer_stride = metadata->layer_stride[level];731trans->offset = offset;732util_range_init(&trans->range);733trans->copy_src_hw_res = NULL;734trans->copy_src_offset = 0;735trans->resolve_transfer = NULL;736737if (trans->base.resource->target != PIPE_TEXTURE_3D &&738trans->base.resource->target != PIPE_TEXTURE_CUBE &&739trans->base.resource->target != PIPE_TEXTURE_1D_ARRAY &&740trans->base.resource->target != PIPE_TEXTURE_2D_ARRAY &&741trans->base.resource->target != PIPE_TEXTURE_CUBE_ARRAY)742trans->l_stride = 0;743else744trans->l_stride = trans->base.layer_stride;745746return trans;747}748749void virgl_resource_destroy_transfer(struct virgl_context *vctx,750struct virgl_transfer *trans)751{752struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;753754vws->resource_reference(vws, &trans->copy_src_hw_res, NULL);755756util_range_destroy(&trans->range);757vws->resource_reference(vws, &trans->hw_res, NULL);758pipe_resource_reference(&trans->base.resource, NULL);759slab_free(&vctx->transfer_pool, trans);760}761762void virgl_resource_destroy(struct pipe_screen *screen,763struct pipe_resource *resource)764{765struct virgl_screen *vs = virgl_screen(screen);766struct virgl_resource *res = virgl_resource(resource);767768if (res->b.target == PIPE_BUFFER)769util_range_destroy(&res->valid_buffer_range);770771vs->vws->resource_reference(vs->vws, &res->hw_res, NULL);772FREE(res);773}774775bool virgl_resource_get_handle(struct pipe_screen *screen,776struct pipe_context *context,777struct pipe_resource *resource,778struct winsys_handle *whandle,779unsigned usage)780{781struct virgl_screen *vs = virgl_screen(screen);782struct virgl_resource *res = virgl_resource(resource);783784if (res->b.target == PIPE_BUFFER)785return false;786787return vs->vws->resource_get_handle(vs->vws, res->hw_res,788res->metadata.stride[0],789whandle);790}791792void virgl_resource_dirty(struct virgl_resource *res, uint32_t level)793{794if (res) {795if (res->b.target == PIPE_BUFFER)796res->clean_mask &= ~1;797else798res->clean_mask &= ~(1 << level);799}800}801802803