Path: blob/21.2-virgl/src/gallium/drivers/r600/r600_texture.c
4570 views
/*1* Copyright 2010 Jerome Glisse <[email protected]>2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* on the rights to use, copy, modify, merge, publish, distribute, sub7* license, and/or sell copies of the Software, and to permit persons to whom8* the Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL17* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,18* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR19* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE20* USE OR OTHER DEALINGS IN THE SOFTWARE.21*22* Authors:23* Jerome Glisse24* Corbin Simpson25*/26#include "r600_pipe_common.h"27#include "r600_cs.h"28#include "r600_query.h"29#include "util/format/u_format.h"30#include "util/u_log.h"31#include "util/u_memory.h"32#include "util/u_pack_color.h"33#include "util/u_surface.h"34#include "util/os_time.h"35#include "frontend/winsys_handle.h"36#include <errno.h>37#include <inttypes.h>3839static void r600_texture_discard_cmask(struct r600_common_screen *rscreen,40struct r600_texture *rtex);41static enum radeon_surf_mode42r600_choose_tiling(struct r600_common_screen *rscreen,43const struct pipe_resource *templ);444546bool r600_prepare_for_dma_blit(struct r600_common_context *rctx,47struct r600_texture *rdst,48unsigned dst_level, unsigned dstx,49unsigned dsty, unsigned dstz,50struct r600_texture *rsrc,51unsigned src_level,52const struct pipe_box *src_box)53{54if (!rctx->dma.cs.priv)55return false;5657if (rdst->surface.bpe != rsrc->surface.bpe)58return false;5960/* MSAA: Blits don't exist in the real world. */61if (rsrc->resource.b.b.nr_samples > 1 ||62rdst->resource.b.b.nr_samples > 1)63return false;6465/* Depth-stencil surfaces:66* When dst is linear, the DB->CB copy preserves HTILE.67* When dst is tiled, the 3D path must be used to update HTILE.68*/69if (rsrc->is_depth || rdst->is_depth)70return false;7172/* CMASK as:73* src: Both texture and SDMA paths need decompression. Use SDMA.74* dst: If overwriting the whole texture, discard CMASK and use75* SDMA. Otherwise, use the 3D path.76*/77if (rdst->cmask.size && rdst->dirty_level_mask & (1 << dst_level)) {78/* The CMASK clear is only enabled for the first level. */79assert(dst_level == 0);80if (!util_texrange_covers_whole_level(&rdst->resource.b.b, dst_level,81dstx, dsty, dstz, src_box->width,82src_box->height, src_box->depth))83return false;8485r600_texture_discard_cmask(rctx->screen, rdst);86}8788/* All requirements are met. Prepare textures for SDMA. */89if (rsrc->cmask.size && rsrc->dirty_level_mask & (1 << src_level))90rctx->b.flush_resource(&rctx->b, &rsrc->resource.b.b);9192assert(!(rsrc->dirty_level_mask & (1 << src_level)));93assert(!(rdst->dirty_level_mask & (1 << dst_level)));9495return true;96}9798/* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */99static void r600_copy_region_with_blit(struct pipe_context *pipe,100struct pipe_resource *dst,101unsigned dst_level,102unsigned dstx, unsigned dsty, unsigned dstz,103struct pipe_resource *src,104unsigned src_level,105const struct pipe_box *src_box)106{107struct pipe_blit_info blit;108109memset(&blit, 0, sizeof(blit));110blit.src.resource = src;111blit.src.format = src->format;112blit.src.level = src_level;113blit.src.box = *src_box;114blit.dst.resource = dst;115blit.dst.format = dst->format;116blit.dst.level = dst_level;117blit.dst.box.x = dstx;118blit.dst.box.y = dsty;119blit.dst.box.z = dstz;120blit.dst.box.width = src_box->width;121blit.dst.box.height = src_box->height;122blit.dst.box.depth = src_box->depth;123blit.mask = util_format_get_mask(src->format) &124util_format_get_mask(dst->format);125blit.filter = PIPE_TEX_FILTER_NEAREST;126127if (blit.mask) {128pipe->blit(pipe, &blit);129}130}131132/* Copy from a full GPU texture to a transfer's staging one. */133static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)134{135struct r600_common_context *rctx = (struct r600_common_context*)ctx;136struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;137struct pipe_resource *dst = &rtransfer->staging->b.b;138struct pipe_resource *src = transfer->resource;139140if (src->nr_samples > 1) {141r600_copy_region_with_blit(ctx, dst, 0, 0, 0, 0,142src, transfer->level, &transfer->box);143return;144}145146rctx->dma_copy(ctx, dst, 0, 0, 0, 0, src, transfer->level,147&transfer->box);148}149150/* Copy from a transfer's staging texture to a full GPU one. */151static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)152{153struct r600_common_context *rctx = (struct r600_common_context*)ctx;154struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;155struct pipe_resource *dst = transfer->resource;156struct pipe_resource *src = &rtransfer->staging->b.b;157struct pipe_box sbox;158159u_box_3d(0, 0, 0, transfer->box.width, transfer->box.height, transfer->box.depth, &sbox);160161if (dst->nr_samples > 1) {162r600_copy_region_with_blit(ctx, dst, transfer->level,163transfer->box.x, transfer->box.y, transfer->box.z,164src, 0, &sbox);165return;166}167168rctx->dma_copy(ctx, dst, transfer->level,169transfer->box.x, transfer->box.y, transfer->box.z,170src, 0, &sbox);171}172173static unsigned r600_texture_get_offset(struct r600_common_screen *rscreen,174struct r600_texture *rtex, unsigned level,175const struct pipe_box *box,176unsigned *stride,177unsigned *layer_stride)178{179*stride = rtex->surface.u.legacy.level[level].nblk_x *180rtex->surface.bpe;181assert((uint64_t)rtex->surface.u.legacy.level[level].slice_size_dw * 4 <= UINT_MAX);182*layer_stride = (uint64_t)rtex->surface.u.legacy.level[level].slice_size_dw * 4;183184if (!box)185return (uint64_t)rtex->surface.u.legacy.level[level].offset_256B * 256;186187/* Each texture is an array of mipmap levels. Each level is188* an array of slices. */189return (uint64_t)rtex->surface.u.legacy.level[level].offset_256B * 256 +190box->z * (uint64_t)rtex->surface.u.legacy.level[level].slice_size_dw * 4 +191(box->y / rtex->surface.blk_h *192rtex->surface.u.legacy.level[level].nblk_x +193box->x / rtex->surface.blk_w) * rtex->surface.bpe;194}195196static int r600_init_surface(struct r600_common_screen *rscreen,197struct radeon_surf *surface,198const struct pipe_resource *ptex,199enum radeon_surf_mode array_mode,200unsigned pitch_in_bytes_override,201unsigned offset,202bool is_imported,203bool is_scanout,204bool is_flushed_depth)205{206const struct util_format_description *desc =207util_format_description(ptex->format);208bool is_depth, is_stencil;209int r;210unsigned i, bpe, flags = 0;211212is_depth = util_format_has_depth(desc);213is_stencil = util_format_has_stencil(desc);214215if (rscreen->chip_class >= EVERGREEN && !is_flushed_depth &&216ptex->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {217bpe = 4; /* stencil is allocated separately on evergreen */218} else {219bpe = util_format_get_blocksize(ptex->format);220assert(util_is_power_of_two_or_zero(bpe));221}222223if (!is_flushed_depth && is_depth) {224flags |= RADEON_SURF_ZBUFFER;225226if (is_stencil)227flags |= RADEON_SURF_SBUFFER;228}229230if (ptex->bind & PIPE_BIND_SCANOUT || is_scanout) {231/* This should catch bugs in gallium users setting incorrect flags. */232assert(ptex->nr_samples <= 1 &&233ptex->array_size == 1 &&234ptex->depth0 == 1 &&235ptex->last_level == 0 &&236!(flags & RADEON_SURF_Z_OR_SBUFFER));237238flags |= RADEON_SURF_SCANOUT;239}240241if (ptex->bind & PIPE_BIND_SHARED)242flags |= RADEON_SURF_SHAREABLE;243if (is_imported)244flags |= RADEON_SURF_IMPORTED | RADEON_SURF_SHAREABLE;245246r = rscreen->ws->surface_init(rscreen->ws, ptex,247flags, bpe, array_mode, surface);248if (r) {249return r;250}251252if (pitch_in_bytes_override &&253pitch_in_bytes_override != surface->u.legacy.level[0].nblk_x * bpe) {254/* old ddx on evergreen over estimate alignment for 1d, only 1 level255* for those256*/257surface->u.legacy.level[0].nblk_x = pitch_in_bytes_override / bpe;258surface->u.legacy.level[0].slice_size_dw =259((uint64_t)pitch_in_bytes_override * surface->u.legacy.level[0].nblk_y) / 4;260}261262if (offset) {263for (i = 0; i < ARRAY_SIZE(surface->u.legacy.level); ++i)264surface->u.legacy.level[i].offset_256B += offset / 256;265}266267return 0;268}269270static void r600_texture_init_metadata(struct r600_common_screen *rscreen,271struct r600_texture *rtex,272struct radeon_bo_metadata *metadata)273{274struct radeon_surf *surface = &rtex->surface;275276memset(metadata, 0, sizeof(*metadata));277278metadata->u.legacy.microtile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D ?279RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;280metadata->u.legacy.macrotile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D ?281RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;282metadata->u.legacy.pipe_config = surface->u.legacy.pipe_config;283metadata->u.legacy.bankw = surface->u.legacy.bankw;284metadata->u.legacy.bankh = surface->u.legacy.bankh;285metadata->u.legacy.tile_split = surface->u.legacy.tile_split;286metadata->u.legacy.mtilea = surface->u.legacy.mtilea;287metadata->u.legacy.num_banks = surface->u.legacy.num_banks;288metadata->u.legacy.stride = surface->u.legacy.level[0].nblk_x * surface->bpe;289metadata->u.legacy.scanout = (surface->flags & RADEON_SURF_SCANOUT) != 0;290}291292static void r600_surface_import_metadata(struct r600_common_screen *rscreen,293struct radeon_surf *surf,294struct radeon_bo_metadata *metadata,295enum radeon_surf_mode *array_mode,296bool *is_scanout)297{298surf->u.legacy.pipe_config = metadata->u.legacy.pipe_config;299surf->u.legacy.bankw = metadata->u.legacy.bankw;300surf->u.legacy.bankh = metadata->u.legacy.bankh;301surf->u.legacy.tile_split = metadata->u.legacy.tile_split;302surf->u.legacy.mtilea = metadata->u.legacy.mtilea;303surf->u.legacy.num_banks = metadata->u.legacy.num_banks;304305if (metadata->u.legacy.macrotile == RADEON_LAYOUT_TILED)306*array_mode = RADEON_SURF_MODE_2D;307else if (metadata->u.legacy.microtile == RADEON_LAYOUT_TILED)308*array_mode = RADEON_SURF_MODE_1D;309else310*array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;311312*is_scanout = metadata->u.legacy.scanout;313}314315static void r600_eliminate_fast_color_clear(struct r600_common_context *rctx,316struct r600_texture *rtex)317{318struct r600_common_screen *rscreen = rctx->screen;319struct pipe_context *ctx = &rctx->b;320321if (ctx == rscreen->aux_context)322mtx_lock(&rscreen->aux_context_lock);323324ctx->flush_resource(ctx, &rtex->resource.b.b);325ctx->flush(ctx, NULL, 0);326327if (ctx == rscreen->aux_context)328mtx_unlock(&rscreen->aux_context_lock);329}330331static void r600_texture_discard_cmask(struct r600_common_screen *rscreen,332struct r600_texture *rtex)333{334if (!rtex->cmask.size)335return;336337assert(rtex->resource.b.b.nr_samples <= 1);338339/* Disable CMASK. */340memset(&rtex->cmask, 0, sizeof(rtex->cmask));341rtex->cmask.base_address_reg = rtex->resource.gpu_address >> 8;342rtex->dirty_level_mask = 0;343344rtex->cb_color_info &= ~EG_S_028C70_FAST_CLEAR(1);345346if (rtex->cmask_buffer != &rtex->resource)347r600_resource_reference(&rtex->cmask_buffer, NULL);348349/* Notify all contexts about the change. */350p_atomic_inc(&rscreen->dirty_tex_counter);351p_atomic_inc(&rscreen->compressed_colortex_counter);352}353354static void r600_reallocate_texture_inplace(struct r600_common_context *rctx,355struct r600_texture *rtex,356unsigned new_bind_flag,357bool invalidate_storage)358{359struct pipe_screen *screen = rctx->b.screen;360struct r600_texture *new_tex;361struct pipe_resource templ = rtex->resource.b.b;362unsigned i;363364templ.bind |= new_bind_flag;365366/* r600g doesn't react to dirty_tex_descriptor_counter */367if (rctx->chip_class < GFX6)368return;369370if (rtex->resource.b.is_shared)371return;372373if (new_bind_flag == PIPE_BIND_LINEAR) {374if (rtex->surface.is_linear)375return;376377/* This fails with MSAA, depth, and compressed textures. */378if (r600_choose_tiling(rctx->screen, &templ) !=379RADEON_SURF_MODE_LINEAR_ALIGNED)380return;381}382383new_tex = (struct r600_texture*)screen->resource_create(screen, &templ);384if (!new_tex)385return;386387/* Copy the pixels to the new texture. */388if (!invalidate_storage) {389for (i = 0; i <= templ.last_level; i++) {390struct pipe_box box;391392u_box_3d(0, 0, 0,393u_minify(templ.width0, i), u_minify(templ.height0, i),394util_num_layers(&templ, i), &box);395396rctx->dma_copy(&rctx->b, &new_tex->resource.b.b, i, 0, 0, 0,397&rtex->resource.b.b, i, &box);398}399}400401if (new_bind_flag == PIPE_BIND_LINEAR) {402r600_texture_discard_cmask(rctx->screen, rtex);403}404405/* Replace the structure fields of rtex. */406rtex->resource.b.b.bind = templ.bind;407pb_reference(&rtex->resource.buf, new_tex->resource.buf);408rtex->resource.gpu_address = new_tex->resource.gpu_address;409rtex->resource.vram_usage = new_tex->resource.vram_usage;410rtex->resource.gart_usage = new_tex->resource.gart_usage;411rtex->resource.bo_size = new_tex->resource.bo_size;412rtex->resource.bo_alignment = new_tex->resource.bo_alignment;413rtex->resource.domains = new_tex->resource.domains;414rtex->resource.flags = new_tex->resource.flags;415rtex->size = new_tex->size;416rtex->db_render_format = new_tex->db_render_format;417rtex->db_compatible = new_tex->db_compatible;418rtex->can_sample_z = new_tex->can_sample_z;419rtex->can_sample_s = new_tex->can_sample_s;420rtex->surface = new_tex->surface;421rtex->fmask = new_tex->fmask;422rtex->cmask = new_tex->cmask;423rtex->cb_color_info = new_tex->cb_color_info;424rtex->last_msaa_resolve_target_micro_mode = new_tex->last_msaa_resolve_target_micro_mode;425rtex->htile_offset = new_tex->htile_offset;426rtex->depth_cleared = new_tex->depth_cleared;427rtex->stencil_cleared = new_tex->stencil_cleared;428rtex->non_disp_tiling = new_tex->non_disp_tiling;429rtex->framebuffers_bound = new_tex->framebuffers_bound;430431if (new_bind_flag == PIPE_BIND_LINEAR) {432assert(!rtex->htile_offset);433assert(!rtex->cmask.size);434assert(!rtex->fmask.size);435assert(!rtex->is_depth);436}437438r600_texture_reference(&new_tex, NULL);439440p_atomic_inc(&rctx->screen->dirty_tex_counter);441}442443static void r600_texture_get_info(struct pipe_screen* screen,444struct pipe_resource *resource,445unsigned *pstride,446unsigned *poffset)447{448struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;449struct r600_texture *rtex = (struct r600_texture*)resource;450unsigned stride = 0;451unsigned offset = 0;452453if (!rscreen || !rtex)454return;455456if (resource->target != PIPE_BUFFER) {457offset = (uint64_t)rtex->surface.u.legacy.level[0].offset_256B * 256;458stride = rtex->surface.u.legacy.level[0].nblk_x *459rtex->surface.bpe;460}461462if (pstride)463*pstride = stride;464465if (poffset)466*poffset = offset;467}468469static bool r600_texture_get_handle(struct pipe_screen* screen,470struct pipe_context *ctx,471struct pipe_resource *resource,472struct winsys_handle *whandle,473unsigned usage)474{475struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;476struct r600_common_context *rctx;477struct r600_resource *res = (struct r600_resource*)resource;478struct r600_texture *rtex = (struct r600_texture*)resource;479struct radeon_bo_metadata metadata;480bool update_metadata = false;481unsigned stride, offset, slice_size;482483ctx = threaded_context_unwrap_sync(ctx);484rctx = (struct r600_common_context*)(ctx ? ctx : rscreen->aux_context);485486if (resource->target != PIPE_BUFFER) {487/* This is not supported now, but it might be required for OpenCL488* interop in the future.489*/490if (resource->nr_samples > 1 || rtex->is_depth)491return false;492493/* Move a suballocated texture into a non-suballocated allocation. */494if (rscreen->ws->buffer_is_suballocated(res->buf) ||495rtex->surface.tile_swizzle) {496assert(!res->b.is_shared);497r600_reallocate_texture_inplace(rctx, rtex,498PIPE_BIND_SHARED, false);499rctx->b.flush(&rctx->b, NULL, 0);500assert(res->b.b.bind & PIPE_BIND_SHARED);501assert(res->flags & RADEON_FLAG_NO_SUBALLOC);502assert(rtex->surface.tile_swizzle == 0);503}504505if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) &&506rtex->cmask.size) {507/* Eliminate fast clear (CMASK) */508r600_eliminate_fast_color_clear(rctx, rtex);509510/* Disable CMASK if flush_resource isn't going511* to be called.512*/513if (rtex->cmask.size)514r600_texture_discard_cmask(rscreen, rtex);515}516517/* Set metadata. */518if (!res->b.is_shared || update_metadata) {519r600_texture_init_metadata(rscreen, rtex, &metadata);520521rscreen->ws->buffer_set_metadata(rscreen->ws, res->buf, &metadata, NULL);522}523524slice_size = (uint64_t)rtex->surface.u.legacy.level[0].slice_size_dw * 4;525} else {526/* Move a suballocated buffer into a non-suballocated allocation. */527if (rscreen->ws->buffer_is_suballocated(res->buf)) {528assert(!res->b.is_shared);529530/* Allocate a new buffer with PIPE_BIND_SHARED. */531struct pipe_resource templ = res->b.b;532templ.bind |= PIPE_BIND_SHARED;533534struct pipe_resource *newb =535screen->resource_create(screen, &templ);536if (!newb)537return false;538539/* Copy the old buffer contents to the new one. */540struct pipe_box box;541u_box_1d(0, newb->width0, &box);542rctx->b.resource_copy_region(&rctx->b, newb, 0, 0, 0, 0,543&res->b.b, 0, &box);544/* Move the new buffer storage to the old pipe_resource. */545r600_replace_buffer_storage(&rctx->b, &res->b.b, newb);546pipe_resource_reference(&newb, NULL);547548assert(res->b.b.bind & PIPE_BIND_SHARED);549assert(res->flags & RADEON_FLAG_NO_SUBALLOC);550}551552/* Buffers */553slice_size = 0;554}555556r600_texture_get_info(screen, resource, &stride, &offset);557558if (res->b.is_shared) {559/* USAGE_EXPLICIT_FLUSH must be cleared if at least one user560* doesn't set it.561*/562res->external_usage |= usage & ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;563if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))564res->external_usage &= ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;565} else {566res->b.is_shared = true;567res->external_usage = usage;568}569570whandle->stride = stride;571whandle->offset = offset + slice_size * whandle->layer;572573return rscreen->ws->buffer_get_handle(rscreen->ws, res->buf, whandle);574}575576void r600_texture_destroy(struct pipe_screen *screen, struct pipe_resource *ptex)577{578struct r600_texture *rtex = (struct r600_texture*)ptex;579struct r600_resource *resource = &rtex->resource;580581r600_texture_reference(&rtex->flushed_depth_texture, NULL);582pipe_resource_reference((struct pipe_resource**)&resource->immed_buffer, NULL);583584if (rtex->cmask_buffer != &rtex->resource) {585r600_resource_reference(&rtex->cmask_buffer, NULL);586}587pb_reference(&resource->buf, NULL);588FREE(rtex);589}590591/* The number of samples can be specified independently of the texture. */592void r600_texture_get_fmask_info(struct r600_common_screen *rscreen,593struct r600_texture *rtex,594unsigned nr_samples,595struct r600_fmask_info *out)596{597/* FMASK is allocated like an ordinary texture. */598struct pipe_resource templ = rtex->resource.b.b;599struct radeon_surf fmask = {};600unsigned flags, bpe;601602memset(out, 0, sizeof(*out));603604templ.nr_samples = 1;605flags = rtex->surface.flags | RADEON_SURF_FMASK;606607/* Use the same parameters and tile mode. */608fmask.u.legacy.bankw = rtex->surface.u.legacy.bankw;609fmask.u.legacy.bankh = rtex->surface.u.legacy.bankh;610fmask.u.legacy.mtilea = rtex->surface.u.legacy.mtilea;611fmask.u.legacy.tile_split = rtex->surface.u.legacy.tile_split;612613if (nr_samples <= 4)614fmask.u.legacy.bankh = 4;615616switch (nr_samples) {617case 2:618case 4:619bpe = 1;620break;621case 8:622bpe = 4;623break;624default:625R600_ERR("Invalid sample count for FMASK allocation.\n");626return;627}628629/* Overallocate FMASK on R600-R700 to fix colorbuffer corruption.630* This can be fixed by writing a separate FMASK allocator specifically631* for R600-R700 asics. */632if (rscreen->chip_class <= R700) {633bpe *= 2;634}635636if (rscreen->ws->surface_init(rscreen->ws, &templ,637flags, bpe, RADEON_SURF_MODE_2D, &fmask)) {638R600_ERR("Got error in surface_init while allocating FMASK.\n");639return;640}641642assert(fmask.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);643644out->slice_tile_max = (fmask.u.legacy.level[0].nblk_x * fmask.u.legacy.level[0].nblk_y) / 64;645if (out->slice_tile_max)646out->slice_tile_max -= 1;647648out->tile_mode_index = fmask.u.legacy.tiling_index[0];649out->pitch_in_pixels = fmask.u.legacy.level[0].nblk_x;650out->bank_height = fmask.u.legacy.bankh;651out->tile_swizzle = fmask.tile_swizzle;652out->alignment = MAX2(256, 1 << fmask.surf_alignment_log2);653out->size = fmask.surf_size;654}655656static void r600_texture_allocate_fmask(struct r600_common_screen *rscreen,657struct r600_texture *rtex)658{659r600_texture_get_fmask_info(rscreen, rtex,660rtex->resource.b.b.nr_samples, &rtex->fmask);661662rtex->fmask.offset = align64(rtex->size, rtex->fmask.alignment);663rtex->size = rtex->fmask.offset + rtex->fmask.size;664}665666void r600_texture_get_cmask_info(struct r600_common_screen *rscreen,667struct r600_texture *rtex,668struct r600_cmask_info *out)669{670unsigned cmask_tile_width = 8;671unsigned cmask_tile_height = 8;672unsigned cmask_tile_elements = cmask_tile_width * cmask_tile_height;673unsigned element_bits = 4;674unsigned cmask_cache_bits = 1024;675unsigned num_pipes = rscreen->info.num_tile_pipes;676unsigned pipe_interleave_bytes = rscreen->info.pipe_interleave_bytes;677678unsigned elements_per_macro_tile = (cmask_cache_bits / element_bits) * num_pipes;679unsigned pixels_per_macro_tile = elements_per_macro_tile * cmask_tile_elements;680unsigned sqrt_pixels_per_macro_tile = sqrt(pixels_per_macro_tile);681unsigned macro_tile_width = util_next_power_of_two(sqrt_pixels_per_macro_tile);682unsigned macro_tile_height = pixels_per_macro_tile / macro_tile_width;683684unsigned pitch_elements = align(rtex->resource.b.b.width0, macro_tile_width);685unsigned height = align(rtex->resource.b.b.height0, macro_tile_height);686687unsigned base_align = num_pipes * pipe_interleave_bytes;688unsigned slice_bytes =689((pitch_elements * height * element_bits + 7) / 8) / cmask_tile_elements;690691assert(macro_tile_width % 128 == 0);692assert(macro_tile_height % 128 == 0);693694out->slice_tile_max = ((pitch_elements * height) / (128*128)) - 1;695out->alignment = MAX2(256, base_align);696out->size = util_num_layers(&rtex->resource.b.b, 0) *697align(slice_bytes, base_align);698}699700static void r600_texture_allocate_cmask(struct r600_common_screen *rscreen,701struct r600_texture *rtex)702{703r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);704705rtex->cmask.offset = align64(rtex->size, rtex->cmask.alignment);706rtex->size = rtex->cmask.offset + rtex->cmask.size;707708rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1);709}710711static void r600_texture_alloc_cmask_separate(struct r600_common_screen *rscreen,712struct r600_texture *rtex)713{714if (rtex->cmask_buffer)715return;716717assert(rtex->cmask.size == 0);718719r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);720721rtex->cmask_buffer = (struct r600_resource *)722r600_aligned_buffer_create(&rscreen->b,723R600_RESOURCE_FLAG_UNMAPPABLE,724PIPE_USAGE_DEFAULT,725rtex->cmask.size,726rtex->cmask.alignment);727if (rtex->cmask_buffer == NULL) {728rtex->cmask.size = 0;729return;730}731732/* update colorbuffer state bits */733rtex->cmask.base_address_reg = rtex->cmask_buffer->gpu_address >> 8;734735rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1);736737p_atomic_inc(&rscreen->compressed_colortex_counter);738}739740void eg_resource_alloc_immed(struct r600_common_screen *rscreen,741struct r600_resource *res,742unsigned immed_size)743{744res->immed_buffer = (struct r600_resource *)745pipe_buffer_create(&rscreen->b, PIPE_BIND_CUSTOM,746PIPE_USAGE_DEFAULT, immed_size);747}748749static void r600_texture_get_htile_size(struct r600_common_screen *rscreen,750struct r600_texture *rtex)751{752unsigned cl_width, cl_height, width, height;753unsigned slice_elements, slice_bytes, pipe_interleave_bytes, base_align;754unsigned num_pipes = rscreen->info.num_tile_pipes;755756rtex->surface.meta_size = 0;757758if (rscreen->chip_class <= EVERGREEN &&759rscreen->info.drm_minor < 26)760return;761762/* HW bug on R6xx. */763if (rscreen->chip_class == R600 &&764(rtex->resource.b.b.width0 > 7680 ||765rtex->resource.b.b.height0 > 7680))766return;767768switch (num_pipes) {769case 1:770cl_width = 32;771cl_height = 16;772break;773case 2:774cl_width = 32;775cl_height = 32;776break;777case 4:778cl_width = 64;779cl_height = 32;780break;781case 8:782cl_width = 64;783cl_height = 64;784break;785case 16:786cl_width = 128;787cl_height = 64;788break;789default:790assert(0);791return;792}793794width = align(rtex->surface.u.legacy.level[0].nblk_x, cl_width * 8);795height = align(rtex->surface.u.legacy.level[0].nblk_y, cl_height * 8);796797slice_elements = (width * height) / (8 * 8);798slice_bytes = slice_elements * 4;799800pipe_interleave_bytes = rscreen->info.pipe_interleave_bytes;801base_align = num_pipes * pipe_interleave_bytes;802803rtex->surface.meta_alignment_log2 = util_logbase2(base_align);804rtex->surface.meta_size =805util_num_layers(&rtex->resource.b.b, 0) *806align(slice_bytes, base_align);807}808809static void r600_texture_allocate_htile(struct r600_common_screen *rscreen,810struct r600_texture *rtex)811{812r600_texture_get_htile_size(rscreen, rtex);813814if (!rtex->surface.meta_size)815return;816817rtex->htile_offset = align(rtex->size, 1 << rtex->surface.meta_alignment_log2);818rtex->size = rtex->htile_offset + rtex->surface.meta_size;819}820821void r600_print_texture_info(struct r600_common_screen *rscreen,822struct r600_texture *rtex, struct u_log_context *log)823{824int i;825826/* Common parameters. */827u_log_printf(log, " Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "828"blk_h=%u, array_size=%u, last_level=%u, "829"bpe=%u, nsamples=%u, flags=0x%"PRIx64", %s\n",830rtex->resource.b.b.width0, rtex->resource.b.b.height0,831rtex->resource.b.b.depth0, rtex->surface.blk_w,832rtex->surface.blk_h,833rtex->resource.b.b.array_size, rtex->resource.b.b.last_level,834rtex->surface.bpe, rtex->resource.b.b.nr_samples,835rtex->surface.flags, util_format_short_name(rtex->resource.b.b.format));836837u_log_printf(log, " Layout: size=%"PRIu64", alignment=%u, bankw=%u, "838"bankh=%u, nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n",839rtex->surface.surf_size, 1 << rtex->surface.surf_alignment_log2, rtex->surface.u.legacy.bankw,840rtex->surface.u.legacy.bankh, rtex->surface.u.legacy.num_banks, rtex->surface.u.legacy.mtilea,841rtex->surface.u.legacy.tile_split, rtex->surface.u.legacy.pipe_config,842(rtex->surface.flags & RADEON_SURF_SCANOUT) != 0);843844if (rtex->fmask.size)845u_log_printf(log, " FMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, pitch_in_pixels=%u, "846"bankh=%u, slice_tile_max=%u, tile_mode_index=%u\n",847rtex->fmask.offset, rtex->fmask.size, rtex->fmask.alignment,848rtex->fmask.pitch_in_pixels, rtex->fmask.bank_height,849rtex->fmask.slice_tile_max, rtex->fmask.tile_mode_index);850851if (rtex->cmask.size)852u_log_printf(log, " CMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, "853"slice_tile_max=%u\n",854rtex->cmask.offset, rtex->cmask.size, rtex->cmask.alignment,855rtex->cmask.slice_tile_max);856857if (rtex->htile_offset)858u_log_printf(log, " HTile: offset=%"PRIu64", size=%u "859"alignment=%u\n",860rtex->htile_offset, rtex->surface.meta_size,8611 << rtex->surface.meta_alignment_log2);862863for (i = 0; i <= rtex->resource.b.b.last_level; i++)864u_log_printf(log, " Level[%i]: offset=%"PRIu64", slice_size=%"PRIu64", "865"npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "866"mode=%u, tiling_index = %u\n",867i, (uint64_t)rtex->surface.u.legacy.level[i].offset_256B * 256,868(uint64_t)rtex->surface.u.legacy.level[i].slice_size_dw * 4,869u_minify(rtex->resource.b.b.width0, i),870u_minify(rtex->resource.b.b.height0, i),871u_minify(rtex->resource.b.b.depth0, i),872rtex->surface.u.legacy.level[i].nblk_x,873rtex->surface.u.legacy.level[i].nblk_y,874rtex->surface.u.legacy.level[i].mode,875rtex->surface.u.legacy.tiling_index[i]);876877if (rtex->surface.has_stencil) {878u_log_printf(log, " StencilLayout: tilesplit=%u\n",879rtex->surface.u.legacy.stencil_tile_split);880for (i = 0; i <= rtex->resource.b.b.last_level; i++) {881u_log_printf(log, " StencilLevel[%i]: offset=%"PRIu64", "882"slice_size=%"PRIu64", npix_x=%u, "883"npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "884"mode=%u, tiling_index = %u\n",885i, (uint64_t)rtex->surface.u.legacy.zs.stencil_level[i].offset_256B * 256,886(uint64_t)rtex->surface.u.legacy.zs.stencil_level[i].slice_size_dw * 4,887u_minify(rtex->resource.b.b.width0, i),888u_minify(rtex->resource.b.b.height0, i),889u_minify(rtex->resource.b.b.depth0, i),890rtex->surface.u.legacy.zs.stencil_level[i].nblk_x,891rtex->surface.u.legacy.zs.stencil_level[i].nblk_y,892rtex->surface.u.legacy.zs.stencil_level[i].mode,893rtex->surface.u.legacy.zs.stencil_tiling_index[i]);894}895}896}897898/* Common processing for r600_texture_create and r600_texture_from_handle */899static struct r600_texture *900r600_texture_create_object(struct pipe_screen *screen,901const struct pipe_resource *base,902struct pb_buffer *buf,903struct radeon_surf *surface)904{905struct r600_texture *rtex;906struct r600_resource *resource;907struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;908909rtex = CALLOC_STRUCT(r600_texture);910if (!rtex)911return NULL;912913resource = &rtex->resource;914resource->b.b = *base;915pipe_reference_init(&resource->b.b.reference, 1);916resource->b.b.screen = screen;917918/* don't include stencil-only formats which we don't support for rendering */919rtex->is_depth = util_format_has_depth(util_format_description(rtex->resource.b.b.format));920921rtex->surface = *surface;922rtex->size = rtex->surface.surf_size;923rtex->db_render_format = base->format;924925/* Tiled depth textures utilize the non-displayable tile order.926* This must be done after r600_setup_surface.927* Applies to R600-Cayman. */928rtex->non_disp_tiling = rtex->is_depth && rtex->surface.u.legacy.level[0].mode >= RADEON_SURF_MODE_1D;929/* Applies to GCN. */930rtex->last_msaa_resolve_target_micro_mode = rtex->surface.micro_tile_mode;931932if (rtex->is_depth) {933if (base->flags & (R600_RESOURCE_FLAG_TRANSFER |934R600_RESOURCE_FLAG_FLUSHED_DEPTH) ||935rscreen->chip_class >= EVERGREEN) {936rtex->can_sample_z = !rtex->surface.u.legacy.depth_adjusted;937rtex->can_sample_s = !rtex->surface.u.legacy.stencil_adjusted;938} else {939if (rtex->resource.b.b.nr_samples <= 1 &&940(rtex->resource.b.b.format == PIPE_FORMAT_Z16_UNORM ||941rtex->resource.b.b.format == PIPE_FORMAT_Z32_FLOAT))942rtex->can_sample_z = true;943}944945if (!(base->flags & (R600_RESOURCE_FLAG_TRANSFER |946R600_RESOURCE_FLAG_FLUSHED_DEPTH))) {947rtex->db_compatible = true;948949if (!(rscreen->debug_flags & DBG_NO_HYPERZ))950r600_texture_allocate_htile(rscreen, rtex);951}952} else {953if (base->nr_samples > 1) {954if (!buf) {955r600_texture_allocate_fmask(rscreen, rtex);956r600_texture_allocate_cmask(rscreen, rtex);957rtex->cmask_buffer = &rtex->resource;958}959if (!rtex->fmask.size || !rtex->cmask.size) {960FREE(rtex);961return NULL;962}963}964}965966/* Now create the backing buffer. */967if (!buf) {968r600_init_resource_fields(rscreen, resource, rtex->size,9691 << rtex->surface.surf_alignment_log2);970971if (!r600_alloc_resource(rscreen, resource)) {972FREE(rtex);973return NULL;974}975} else {976resource->buf = buf;977resource->gpu_address = rscreen->ws->buffer_get_virtual_address(resource->buf);978resource->bo_size = buf->size;979resource->bo_alignment = 1 << buf->alignment_log2;980resource->domains = rscreen->ws->buffer_get_initial_domain(resource->buf);981if (resource->domains & RADEON_DOMAIN_VRAM)982resource->vram_usage = buf->size;983else if (resource->domains & RADEON_DOMAIN_GTT)984resource->gart_usage = buf->size;985}986987if (rtex->cmask.size) {988/* Initialize the cmask to 0xCC (= compressed state). */989r600_screen_clear_buffer(rscreen, &rtex->cmask_buffer->b.b,990rtex->cmask.offset, rtex->cmask.size,9910xCCCCCCCC);992}993if (rtex->htile_offset) {994uint32_t clear_value = 0;995996r600_screen_clear_buffer(rscreen, &rtex->resource.b.b,997rtex->htile_offset,998rtex->surface.meta_size,999clear_value);1000}10011002/* Initialize the CMASK base register value. */1003rtex->cmask.base_address_reg =1004(rtex->resource.gpu_address + rtex->cmask.offset) >> 8;10051006if (rscreen->debug_flags & DBG_VM) {1007fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Texture %ix%ix%i, %i levels, %i samples, %s\n",1008rtex->resource.gpu_address,1009rtex->resource.gpu_address + rtex->resource.buf->size,1010base->width0, base->height0, util_num_layers(base, 0), base->last_level+1,1011base->nr_samples ? base->nr_samples : 1, util_format_short_name(base->format));1012}10131014if (rscreen->debug_flags & DBG_TEX) {1015puts("Texture:");1016struct u_log_context log;1017u_log_context_init(&log);1018r600_print_texture_info(rscreen, rtex, &log);1019u_log_new_page_print(&log, stdout);1020fflush(stdout);1021u_log_context_destroy(&log);1022}10231024return rtex;1025}10261027static enum radeon_surf_mode1028r600_choose_tiling(struct r600_common_screen *rscreen,1029const struct pipe_resource *templ)1030{1031const struct util_format_description *desc = util_format_description(templ->format);1032bool force_tiling = templ->flags & R600_RESOURCE_FLAG_FORCE_TILING;1033bool is_depth_stencil = util_format_is_depth_or_stencil(templ->format) &&1034!(templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH);10351036/* MSAA resources must be 2D tiled. */1037if (templ->nr_samples > 1)1038return RADEON_SURF_MODE_2D;10391040/* Transfer resources should be linear. */1041if (templ->flags & R600_RESOURCE_FLAG_TRANSFER)1042return RADEON_SURF_MODE_LINEAR_ALIGNED;10431044/* r600g: force tiling on TEXTURE_2D and TEXTURE_3D compute resources. */1045if (rscreen->chip_class >= R600 && rscreen->chip_class <= CAYMAN &&1046(templ->bind & PIPE_BIND_COMPUTE_RESOURCE) &&1047(templ->target == PIPE_TEXTURE_2D ||1048templ->target == PIPE_TEXTURE_3D))1049force_tiling = true;10501051/* Handle common candidates for the linear mode.1052* Compressed textures and DB surfaces must always be tiled.1053*/1054if (!force_tiling &&1055!is_depth_stencil &&1056!util_format_is_compressed(templ->format)) {1057if (rscreen->debug_flags & DBG_NO_TILING)1058return RADEON_SURF_MODE_LINEAR_ALIGNED;10591060/* Tiling doesn't work with the 422 (SUBSAMPLED) formats on R600+. */1061if (desc->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED)1062return RADEON_SURF_MODE_LINEAR_ALIGNED;10631064if (templ->bind & PIPE_BIND_LINEAR)1065return RADEON_SURF_MODE_LINEAR_ALIGNED;10661067/* 1D textures should be linear - fixes image operations on 1d */1068if (templ->target == PIPE_TEXTURE_1D ||1069templ->target == PIPE_TEXTURE_1D_ARRAY)1070return RADEON_SURF_MODE_LINEAR_ALIGNED;10711072/* Textures likely to be mapped often. */1073if (templ->usage == PIPE_USAGE_STAGING ||1074templ->usage == PIPE_USAGE_STREAM)1075return RADEON_SURF_MODE_LINEAR_ALIGNED;1076}10771078/* Make small textures 1D tiled. */1079if (templ->width0 <= 16 || templ->height0 <= 16 ||1080(rscreen->debug_flags & DBG_NO_2D_TILING))1081return RADEON_SURF_MODE_1D;10821083/* The allocator will switch to 1D if needed. */1084return RADEON_SURF_MODE_2D;1085}10861087struct pipe_resource *r600_texture_create(struct pipe_screen *screen,1088const struct pipe_resource *templ)1089{1090struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;1091struct radeon_surf surface = {0};1092bool is_flushed_depth = templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH;1093int r;10941095r = r600_init_surface(rscreen, &surface, templ,1096r600_choose_tiling(rscreen, templ), 0, 0,1097false, false, is_flushed_depth);1098if (r) {1099return NULL;1100}11011102return (struct pipe_resource *)1103r600_texture_create_object(screen, templ, NULL, &surface);1104}11051106static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,1107const struct pipe_resource *templ,1108struct winsys_handle *whandle,1109unsigned usage)1110{1111struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;1112struct pb_buffer *buf = NULL;1113enum radeon_surf_mode array_mode;1114struct radeon_surf surface = {};1115int r;1116struct radeon_bo_metadata metadata = {};1117struct r600_texture *rtex;1118bool is_scanout;11191120/* Support only 2D textures without mipmaps */1121if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||1122templ->depth0 != 1 || templ->last_level != 0)1123return NULL;11241125buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle,1126rscreen->info.max_alignment);1127if (!buf)1128return NULL;11291130rscreen->ws->buffer_get_metadata(rscreen->ws, buf, &metadata, NULL);1131r600_surface_import_metadata(rscreen, &surface, &metadata,1132&array_mode, &is_scanout);11331134r = r600_init_surface(rscreen, &surface, templ, array_mode,1135whandle->stride, whandle->offset,1136true, is_scanout, false);1137if (r) {1138return NULL;1139}11401141rtex = r600_texture_create_object(screen, templ, buf, &surface);1142if (!rtex)1143return NULL;11441145rtex->resource.b.is_shared = true;1146rtex->resource.external_usage = usage;11471148assert(rtex->surface.tile_swizzle == 0);1149return &rtex->resource.b.b;1150}11511152bool r600_init_flushed_depth_texture(struct pipe_context *ctx,1153struct pipe_resource *texture,1154struct r600_texture **staging)1155{1156struct r600_texture *rtex = (struct r600_texture*)texture;1157struct pipe_resource resource;1158struct r600_texture **flushed_depth_texture = staging ?1159staging : &rtex->flushed_depth_texture;1160enum pipe_format pipe_format = texture->format;11611162if (!staging) {1163if (rtex->flushed_depth_texture)1164return true; /* it's ready */11651166if (!rtex->can_sample_z && rtex->can_sample_s) {1167switch (pipe_format) {1168case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:1169/* Save memory by not allocating the S plane. */1170pipe_format = PIPE_FORMAT_Z32_FLOAT;1171break;1172case PIPE_FORMAT_Z24_UNORM_S8_UINT:1173case PIPE_FORMAT_S8_UINT_Z24_UNORM:1174/* Save memory bandwidth by not copying the1175* stencil part during flush.1176*1177* This potentially increases memory bandwidth1178* if an application uses both Z and S texturing1179* simultaneously (a flushed Z24S8 texture1180* would be stored compactly), but how often1181* does that really happen?1182*/1183pipe_format = PIPE_FORMAT_Z24X8_UNORM;1184break;1185default:;1186}1187} else if (!rtex->can_sample_s && rtex->can_sample_z) {1188assert(util_format_has_stencil(util_format_description(pipe_format)));11891190/* DB->CB copies to an 8bpp surface don't work. */1191pipe_format = PIPE_FORMAT_X24S8_UINT;1192}1193}11941195memset(&resource, 0, sizeof(resource));1196resource.target = texture->target;1197resource.format = pipe_format;1198resource.width0 = texture->width0;1199resource.height0 = texture->height0;1200resource.depth0 = texture->depth0;1201resource.array_size = texture->array_size;1202resource.last_level = texture->last_level;1203resource.nr_samples = texture->nr_samples;1204resource.usage = staging ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;1205resource.bind = texture->bind & ~PIPE_BIND_DEPTH_STENCIL;1206resource.flags = texture->flags | R600_RESOURCE_FLAG_FLUSHED_DEPTH;12071208if (staging)1209resource.flags |= R600_RESOURCE_FLAG_TRANSFER;12101211*flushed_depth_texture = (struct r600_texture *)ctx->screen->resource_create(ctx->screen, &resource);1212if (*flushed_depth_texture == NULL) {1213R600_ERR("failed to create temporary texture to hold flushed depth\n");1214return false;1215}12161217(*flushed_depth_texture)->non_disp_tiling = false;1218return true;1219}12201221/**1222* Initialize the pipe_resource descriptor to be of the same size as the box,1223* which is supposed to hold a subregion of the texture "orig" at the given1224* mipmap level.1225*/1226static void r600_init_temp_resource_from_box(struct pipe_resource *res,1227struct pipe_resource *orig,1228const struct pipe_box *box,1229unsigned level, unsigned flags)1230{1231memset(res, 0, sizeof(*res));1232res->format = orig->format;1233res->width0 = box->width;1234res->height0 = box->height;1235res->depth0 = 1;1236res->array_size = 1;1237res->usage = flags & R600_RESOURCE_FLAG_TRANSFER ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;1238res->flags = flags;12391240/* We must set the correct texture target and dimensions for a 3D box. */1241if (box->depth > 1 && util_max_layer(orig, level) > 0) {1242res->target = PIPE_TEXTURE_2D_ARRAY;1243res->array_size = box->depth;1244} else {1245res->target = PIPE_TEXTURE_2D;1246}1247}12481249static bool r600_can_invalidate_texture(struct r600_common_screen *rscreen,1250struct r600_texture *rtex,1251unsigned transfer_usage,1252const struct pipe_box *box)1253{1254/* r600g doesn't react to dirty_tex_descriptor_counter */1255return rscreen->chip_class >= GFX6 &&1256!rtex->resource.b.is_shared &&1257!(transfer_usage & PIPE_MAP_READ) &&1258rtex->resource.b.b.last_level == 0 &&1259util_texrange_covers_whole_level(&rtex->resource.b.b, 0,1260box->x, box->y, box->z,1261box->width, box->height,1262box->depth);1263}12641265static void r600_texture_invalidate_storage(struct r600_common_context *rctx,1266struct r600_texture *rtex)1267{1268struct r600_common_screen *rscreen = rctx->screen;12691270/* There is no point in discarding depth and tiled buffers. */1271assert(!rtex->is_depth);1272assert(rtex->surface.is_linear);12731274/* Reallocate the buffer in the same pipe_resource. */1275r600_alloc_resource(rscreen, &rtex->resource);12761277/* Initialize the CMASK base address (needed even without CMASK). */1278rtex->cmask.base_address_reg =1279(rtex->resource.gpu_address + rtex->cmask.offset) >> 8;12801281p_atomic_inc(&rscreen->dirty_tex_counter);12821283rctx->num_alloc_tex_transfer_bytes += rtex->size;1284}12851286void *r600_texture_transfer_map(struct pipe_context *ctx,1287struct pipe_resource *texture,1288unsigned level,1289unsigned usage,1290const struct pipe_box *box,1291struct pipe_transfer **ptransfer)1292{1293struct r600_common_context *rctx = (struct r600_common_context*)ctx;1294struct r600_texture *rtex = (struct r600_texture*)texture;1295struct r600_transfer *trans;1296struct r600_resource *buf;1297unsigned offset = 0;1298char *map;1299bool use_staging_texture = false;13001301assert(!(texture->flags & R600_RESOURCE_FLAG_TRANSFER));1302assert(box->width && box->height && box->depth);13031304/* Depth textures use staging unconditionally. */1305if (!rtex->is_depth) {1306/* Degrade the tile mode if we get too many transfers on APUs.1307* On dGPUs, the staging texture is always faster.1308* Only count uploads that are at least 4x4 pixels large.1309*/1310if (!rctx->screen->info.has_dedicated_vram &&1311level == 0 &&1312box->width >= 4 && box->height >= 4 &&1313p_atomic_inc_return(&rtex->num_level0_transfers) == 10) {1314bool can_invalidate =1315r600_can_invalidate_texture(rctx->screen, rtex,1316usage, box);13171318r600_reallocate_texture_inplace(rctx, rtex,1319PIPE_BIND_LINEAR,1320can_invalidate);1321}13221323/* Tiled textures need to be converted into a linear texture for CPU1324* access. The staging texture is always linear and is placed in GART.1325*1326* Reading from VRAM or GTT WC is slow, always use the staging1327* texture in this case.1328*1329* Use the staging texture for uploads if the underlying BO1330* is busy.1331*/1332if (!rtex->surface.is_linear)1333use_staging_texture = true;1334else if (usage & PIPE_MAP_READ)1335use_staging_texture =1336rtex->resource.domains & RADEON_DOMAIN_VRAM ||1337rtex->resource.flags & RADEON_FLAG_GTT_WC;1338/* Write & linear only: */1339else if (r600_rings_is_buffer_referenced(rctx, rtex->resource.buf,1340RADEON_USAGE_READWRITE) ||1341!rctx->ws->buffer_wait(rctx->ws, rtex->resource.buf, 0,1342RADEON_USAGE_READWRITE)) {1343/* It's busy. */1344if (r600_can_invalidate_texture(rctx->screen, rtex,1345usage, box))1346r600_texture_invalidate_storage(rctx, rtex);1347else1348use_staging_texture = true;1349}1350}13511352trans = CALLOC_STRUCT(r600_transfer);1353if (!trans)1354return NULL;1355pipe_resource_reference(&trans->b.b.resource, texture);1356trans->b.b.level = level;1357trans->b.b.usage = usage;1358trans->b.b.box = *box;13591360if (rtex->is_depth) {1361struct r600_texture *staging_depth;13621363if (rtex->resource.b.b.nr_samples > 1) {1364/* MSAA depth buffers need to be converted to single sample buffers.1365*1366* Mapping MSAA depth buffers can occur if ReadPixels is called1367* with a multisample GLX visual.1368*1369* First downsample the depth buffer to a temporary texture,1370* then decompress the temporary one to staging.1371*1372* Only the region being mapped is transfered.1373*/1374struct pipe_resource resource;13751376r600_init_temp_resource_from_box(&resource, texture, box, level, 0);13771378if (!r600_init_flushed_depth_texture(ctx, &resource, &staging_depth)) {1379R600_ERR("failed to create temporary texture to hold untiled copy\n");1380FREE(trans);1381return NULL;1382}13831384if (usage & PIPE_MAP_READ) {1385struct pipe_resource *temp = ctx->screen->resource_create(ctx->screen, &resource);1386if (!temp) {1387R600_ERR("failed to create a temporary depth texture\n");1388FREE(trans);1389return NULL;1390}13911392r600_copy_region_with_blit(ctx, temp, 0, 0, 0, 0, texture, level, box);1393rctx->blit_decompress_depth(ctx, (struct r600_texture*)temp, staging_depth,13940, 0, 0, box->depth, 0, 0);1395pipe_resource_reference(&temp, NULL);1396}13971398/* Just get the strides. */1399r600_texture_get_offset(rctx->screen, staging_depth, level, NULL,1400&trans->b.b.stride,1401&trans->b.b.layer_stride);1402} else {1403/* XXX: only readback the rectangle which is being mapped? */1404/* XXX: when discard is true, no need to read back from depth texture */1405if (!r600_init_flushed_depth_texture(ctx, texture, &staging_depth)) {1406R600_ERR("failed to create temporary texture to hold untiled copy\n");1407FREE(trans);1408return NULL;1409}14101411rctx->blit_decompress_depth(ctx, rtex, staging_depth,1412level, level,1413box->z, box->z + box->depth - 1,14140, 0);14151416offset = r600_texture_get_offset(rctx->screen, staging_depth,1417level, box,1418&trans->b.b.stride,1419&trans->b.b.layer_stride);1420}14211422trans->staging = (struct r600_resource*)staging_depth;1423buf = trans->staging;1424} else if (use_staging_texture) {1425struct pipe_resource resource;1426struct r600_texture *staging;14271428r600_init_temp_resource_from_box(&resource, texture, box, level,1429R600_RESOURCE_FLAG_TRANSFER);1430resource.usage = (usage & PIPE_MAP_READ) ?1431PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;14321433/* Create the temporary texture. */1434staging = (struct r600_texture*)ctx->screen->resource_create(ctx->screen, &resource);1435if (!staging) {1436R600_ERR("failed to create temporary texture to hold untiled copy\n");1437FREE(trans);1438return NULL;1439}1440trans->staging = &staging->resource;14411442/* Just get the strides. */1443r600_texture_get_offset(rctx->screen, staging, 0, NULL,1444&trans->b.b.stride,1445&trans->b.b.layer_stride);14461447if (usage & PIPE_MAP_READ)1448r600_copy_to_staging_texture(ctx, trans);1449else1450usage |= PIPE_MAP_UNSYNCHRONIZED;14511452buf = trans->staging;1453} else {1454/* the resource is mapped directly */1455offset = r600_texture_get_offset(rctx->screen, rtex, level, box,1456&trans->b.b.stride,1457&trans->b.b.layer_stride);1458buf = &rtex->resource;1459}14601461if (!(map = r600_buffer_map_sync_with_rings(rctx, buf, usage))) {1462r600_resource_reference(&trans->staging, NULL);1463FREE(trans);1464return NULL;1465}14661467*ptransfer = &trans->b.b;1468return map + offset;1469}14701471void r600_texture_transfer_unmap(struct pipe_context *ctx,1472struct pipe_transfer* transfer)1473{1474struct r600_common_context *rctx = (struct r600_common_context*)ctx;1475struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;1476struct pipe_resource *texture = transfer->resource;1477struct r600_texture *rtex = (struct r600_texture*)texture;14781479if ((transfer->usage & PIPE_MAP_WRITE) && rtransfer->staging) {1480if (rtex->is_depth && rtex->resource.b.b.nr_samples <= 1) {1481ctx->resource_copy_region(ctx, texture, transfer->level,1482transfer->box.x, transfer->box.y, transfer->box.z,1483&rtransfer->staging->b.b, transfer->level,1484&transfer->box);1485} else {1486r600_copy_from_staging_texture(ctx, rtransfer);1487}1488}14891490if (rtransfer->staging) {1491rctx->num_alloc_tex_transfer_bytes += rtransfer->staging->buf->size;1492r600_resource_reference(&rtransfer->staging, NULL);1493}14941495/* Heuristic for {upload, draw, upload, draw, ..}:1496*1497* Flush the gfx IB if we've allocated too much texture storage.1498*1499* The idea is that we don't want to build IBs that use too much1500* memory and put pressure on the kernel memory manager and we also1501* want to make temporary and invalidated buffers go idle ASAP to1502* decrease the total memory usage or make them reusable. The memory1503* usage will be slightly higher than given here because of the buffer1504* cache in the winsys.1505*1506* The result is that the kernel memory manager is never a bottleneck.1507*/1508if (rctx->num_alloc_tex_transfer_bytes > rctx->screen->info.gart_size / 4) {1509rctx->gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);1510rctx->num_alloc_tex_transfer_bytes = 0;1511}15121513pipe_resource_reference(&transfer->resource, NULL);1514FREE(transfer);1515}15161517struct pipe_surface *r600_create_surface_custom(struct pipe_context *pipe,1518struct pipe_resource *texture,1519const struct pipe_surface *templ,1520unsigned width0, unsigned height0,1521unsigned width, unsigned height)1522{1523struct r600_surface *surface = CALLOC_STRUCT(r600_surface);15241525if (!surface)1526return NULL;15271528assert(templ->u.tex.first_layer <= util_max_layer(texture, templ->u.tex.level));1529assert(templ->u.tex.last_layer <= util_max_layer(texture, templ->u.tex.level));15301531pipe_reference_init(&surface->base.reference, 1);1532pipe_resource_reference(&surface->base.texture, texture);1533surface->base.context = pipe;1534surface->base.format = templ->format;1535surface->base.width = width;1536surface->base.height = height;1537surface->base.u = templ->u;15381539surface->width0 = width0;1540surface->height0 = height0;15411542return &surface->base;1543}15441545static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,1546struct pipe_resource *tex,1547const struct pipe_surface *templ)1548{1549unsigned level = templ->u.tex.level;1550unsigned width = u_minify(tex->width0, level);1551unsigned height = u_minify(tex->height0, level);1552unsigned width0 = tex->width0;1553unsigned height0 = tex->height0;15541555if (tex->target != PIPE_BUFFER && templ->format != tex->format) {1556const struct util_format_description *tex_desc1557= util_format_description(tex->format);1558const struct util_format_description *templ_desc1559= util_format_description(templ->format);15601561assert(tex_desc->block.bits == templ_desc->block.bits);15621563/* Adjust size of surface if and only if the block width or1564* height is changed. */1565if (tex_desc->block.width != templ_desc->block.width ||1566tex_desc->block.height != templ_desc->block.height) {1567unsigned nblks_x = util_format_get_nblocksx(tex->format, width);1568unsigned nblks_y = util_format_get_nblocksy(tex->format, height);15691570width = nblks_x * templ_desc->block.width;1571height = nblks_y * templ_desc->block.height;15721573width0 = util_format_get_nblocksx(tex->format, width0);1574height0 = util_format_get_nblocksy(tex->format, height0);1575}1576}15771578return r600_create_surface_custom(pipe, tex, templ,1579width0, height0,1580width, height);1581}15821583static void r600_surface_destroy(struct pipe_context *pipe,1584struct pipe_surface *surface)1585{1586struct r600_surface *surf = (struct r600_surface*)surface;1587r600_resource_reference(&surf->cb_buffer_fmask, NULL);1588r600_resource_reference(&surf->cb_buffer_cmask, NULL);1589pipe_resource_reference(&surface->texture, NULL);1590FREE(surface);1591}15921593static void r600_clear_texture(struct pipe_context *pipe,1594struct pipe_resource *tex,1595unsigned level,1596const struct pipe_box *box,1597const void *data)1598{1599struct pipe_screen *screen = pipe->screen;1600struct r600_texture *rtex = (struct r600_texture*)tex;1601struct pipe_surface tmpl = {{0}};1602struct pipe_surface *sf;16031604tmpl.format = tex->format;1605tmpl.u.tex.first_layer = box->z;1606tmpl.u.tex.last_layer = box->z + box->depth - 1;1607tmpl.u.tex.level = level;1608sf = pipe->create_surface(pipe, tex, &tmpl);1609if (!sf)1610return;16111612if (rtex->is_depth) {1613unsigned clear;1614float depth;1615uint8_t stencil = 0;16161617/* Depth is always present. */1618clear = PIPE_CLEAR_DEPTH;1619util_format_unpack_z_float(tex->format, &depth, data, 1);16201621if (rtex->surface.has_stencil) {1622clear |= PIPE_CLEAR_STENCIL;1623util_format_unpack_s_8uint(tex->format, &stencil, data, 1);1624}16251626pipe->clear_depth_stencil(pipe, sf, clear, depth, stencil,1627box->x, box->y,1628box->width, box->height, false);1629} else {1630union pipe_color_union color;16311632util_format_unpack_rgba(tex->format, color.ui, data, 1);16331634if (screen->is_format_supported(screen, tex->format,1635tex->target, 0, 0,1636PIPE_BIND_RENDER_TARGET)) {1637pipe->clear_render_target(pipe, sf, &color,1638box->x, box->y,1639box->width, box->height, false);1640} else {1641/* Software fallback - just for R9G9B9E5_FLOAT */1642util_clear_render_target(pipe, sf, &color,1643box->x, box->y,1644box->width, box->height);1645}1646}1647pipe_surface_reference(&sf, NULL);1648}16491650unsigned r600_translate_colorswap(enum pipe_format format, bool do_endian_swap)1651{1652const struct util_format_description *desc = util_format_description(format);16531654#define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == PIPE_SWIZZLE_##swz)16551656if (format == PIPE_FORMAT_R11G11B10_FLOAT) /* isn't plain */1657return V_0280A0_SWAP_STD;16581659if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN)1660return ~0U;16611662switch (desc->nr_channels) {1663case 1:1664if (HAS_SWIZZLE(0,X))1665return V_0280A0_SWAP_STD; /* X___ */1666else if (HAS_SWIZZLE(3,X))1667return V_0280A0_SWAP_ALT_REV; /* ___X */1668break;1669case 2:1670if ((HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,Y)) ||1671(HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,NONE)) ||1672(HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,Y)))1673return V_0280A0_SWAP_STD; /* XY__ */1674else if ((HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,X)) ||1675(HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,NONE)) ||1676(HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,X)))1677/* YX__ */1678return (do_endian_swap ? V_0280A0_SWAP_STD : V_0280A0_SWAP_STD_REV);1679else if (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(3,Y))1680return V_0280A0_SWAP_ALT; /* X__Y */1681else if (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(3,X))1682return V_0280A0_SWAP_ALT_REV; /* Y__X */1683break;1684case 3:1685if (HAS_SWIZZLE(0,X))1686return (do_endian_swap ? V_0280A0_SWAP_STD_REV : V_0280A0_SWAP_STD);1687else if (HAS_SWIZZLE(0,Z))1688return V_0280A0_SWAP_STD_REV; /* ZYX */1689break;1690case 4:1691/* check the middle channels, the 1st and 4th channel can be NONE */1692if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,Z)) {1693return V_0280A0_SWAP_STD; /* XYZW */1694} else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,Y)) {1695return V_0280A0_SWAP_STD_REV; /* WZYX */1696} else if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,X)) {1697return V_0280A0_SWAP_ALT; /* ZYXW */1698} else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,W)) {1699/* YZWX */1700if (desc->is_array)1701return V_0280A0_SWAP_ALT_REV;1702else1703return (do_endian_swap ? V_0280A0_SWAP_ALT : V_0280A0_SWAP_ALT_REV);1704}1705break;1706}1707return ~0U;1708}17091710/* FAST COLOR CLEAR */17111712static void evergreen_set_clear_color(struct r600_texture *rtex,1713enum pipe_format surface_format,1714const union pipe_color_union *color)1715{1716union util_color uc;17171718memset(&uc, 0, sizeof(uc));17191720if (rtex->surface.bpe == 16) {1721/* DCC fast clear only:1722* CLEAR_WORD0 = R = G = B1723* CLEAR_WORD1 = A1724*/1725assert(color->ui[0] == color->ui[1] &&1726color->ui[0] == color->ui[2]);1727uc.ui[0] = color->ui[0];1728uc.ui[1] = color->ui[3];1729} else {1730util_pack_color_union(surface_format, &uc, color);1731}17321733memcpy(rtex->color_clear_value, &uc, 2 * sizeof(uint32_t));1734}17351736void evergreen_do_fast_color_clear(struct r600_common_context *rctx,1737struct pipe_framebuffer_state *fb,1738struct r600_atom *fb_state,1739unsigned *buffers, ubyte *dirty_cbufs,1740const union pipe_color_union *color)1741{1742int i;17431744/* This function is broken in BE, so just disable this path for now */1745#if UTIL_ARCH_BIG_ENDIAN1746return;1747#endif17481749if (rctx->render_cond)1750return;17511752for (i = 0; i < fb->nr_cbufs; i++) {1753struct r600_texture *tex;1754unsigned clear_bit = PIPE_CLEAR_COLOR0 << i;17551756if (!fb->cbufs[i])1757continue;17581759/* if this colorbuffer is not being cleared */1760if (!(*buffers & clear_bit))1761continue;17621763tex = (struct r600_texture *)fb->cbufs[i]->texture;17641765/* the clear is allowed if all layers are bound */1766if (fb->cbufs[i]->u.tex.first_layer != 0 ||1767fb->cbufs[i]->u.tex.last_layer != util_max_layer(&tex->resource.b.b, 0)) {1768continue;1769}17701771/* cannot clear mipmapped textures */1772if (fb->cbufs[i]->texture->last_level != 0) {1773continue;1774}17751776/* only supported on tiled surfaces */1777if (tex->surface.is_linear) {1778continue;1779}17801781/* shared textures can't use fast clear without an explicit flush,1782* because there is no way to communicate the clear color among1783* all clients1784*/1785if (tex->resource.b.is_shared &&1786!(tex->resource.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))1787continue;17881789/* Use a slow clear for small surfaces where the cost of1790* the eliminate pass can be higher than the benefit of fast1791* clear. AMDGPU-pro does this, but the numbers may differ.1792*1793* This helps on both dGPUs and APUs, even small ones.1794*/1795if (tex->resource.b.b.nr_samples <= 1 &&1796tex->resource.b.b.width0 * tex->resource.b.b.height0 <= 300 * 300)1797continue;17981799{1800/* 128-bit formats are unusupported */1801if (tex->surface.bpe > 8) {1802continue;1803}18041805/* ensure CMASK is enabled */1806r600_texture_alloc_cmask_separate(rctx->screen, tex);1807if (tex->cmask.size == 0) {1808continue;1809}18101811/* Do the fast clear. */1812rctx->clear_buffer(&rctx->b, &tex->cmask_buffer->b.b,1813tex->cmask.offset, tex->cmask.size, 0,1814R600_COHERENCY_CB_META);18151816bool need_compressed_update = !tex->dirty_level_mask;18171818tex->dirty_level_mask |= 1 << fb->cbufs[i]->u.tex.level;18191820if (need_compressed_update)1821p_atomic_inc(&rctx->screen->compressed_colortex_counter);1822}18231824evergreen_set_clear_color(tex, fb->cbufs[i]->format, color);18251826if (dirty_cbufs)1827*dirty_cbufs |= 1 << i;1828rctx->set_atom_dirty(rctx, fb_state, true);1829*buffers &= ~clear_bit;1830}1831}18321833static struct pipe_memory_object *1834r600_memobj_from_handle(struct pipe_screen *screen,1835struct winsys_handle *whandle,1836bool dedicated)1837{1838struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;1839struct r600_memory_object *memobj = CALLOC_STRUCT(r600_memory_object);1840struct pb_buffer *buf = NULL;18411842if (!memobj)1843return NULL;18441845buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle,1846rscreen->info.max_alignment);1847if (!buf) {1848free(memobj);1849return NULL;1850}18511852memobj->b.dedicated = dedicated;1853memobj->buf = buf;1854memobj->stride = whandle->stride;1855memobj->offset = whandle->offset;18561857return (struct pipe_memory_object *)memobj;18581859}18601861static void1862r600_memobj_destroy(struct pipe_screen *screen,1863struct pipe_memory_object *_memobj)1864{1865struct r600_memory_object *memobj = (struct r600_memory_object *)_memobj;18661867pb_reference(&memobj->buf, NULL);1868free(memobj);1869}18701871static struct pipe_resource *1872r600_texture_from_memobj(struct pipe_screen *screen,1873const struct pipe_resource *templ,1874struct pipe_memory_object *_memobj,1875uint64_t offset)1876{1877int r;1878struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;1879struct r600_memory_object *memobj = (struct r600_memory_object *)_memobj;1880struct r600_texture *rtex;1881struct radeon_surf surface = {};1882struct radeon_bo_metadata metadata = {};1883enum radeon_surf_mode array_mode;1884bool is_scanout;1885struct pb_buffer *buf = NULL;18861887if (memobj->b.dedicated) {1888rscreen->ws->buffer_get_metadata(rscreen->ws, memobj->buf, &metadata, NULL);1889r600_surface_import_metadata(rscreen, &surface, &metadata,1890&array_mode, &is_scanout);1891} else {1892/**1893* The bo metadata is unset for un-dedicated images. So we fall1894* back to linear. See answer to question 5 of the1895* VK_KHX_external_memory spec for some details.1896*1897* It is possible that this case isn't going to work if the1898* surface pitch isn't correctly aligned by default.1899*1900* In order to support it correctly we require multi-image1901* metadata to be syncrhonized between radv and radeonsi. The1902* semantics of associating multiple image metadata to a memory1903* object on the vulkan export side are not concretely defined1904* either.1905*1906* All the use cases we are aware of at the moment for memory1907* objects use dedicated allocations. So lets keep the initial1908* implementation simple.1909*1910* A possible alternative is to attempt to reconstruct the1911* tiling information when the TexParameter TEXTURE_TILING_EXT1912* is set.1913*/1914array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;1915is_scanout = false;19161917}19181919r = r600_init_surface(rscreen, &surface, templ,1920array_mode, memobj->stride,1921offset, true, is_scanout,1922false);1923if (r)1924return NULL;19251926rtex = r600_texture_create_object(screen, templ, memobj->buf, &surface);1927if (!rtex)1928return NULL;19291930/* r600_texture_create_object doesn't increment refcount of1931* memobj->buf, so increment it here.1932*/1933pb_reference(&buf, memobj->buf);19341935rtex->resource.b.is_shared = true;1936rtex->resource.external_usage = PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE;19371938return &rtex->resource.b.b;1939}19401941void r600_init_screen_texture_functions(struct r600_common_screen *rscreen)1942{1943rscreen->b.resource_from_handle = r600_texture_from_handle;1944rscreen->b.resource_get_handle = r600_texture_get_handle;1945rscreen->b.resource_get_info = r600_texture_get_info;1946rscreen->b.resource_from_memobj = r600_texture_from_memobj;1947rscreen->b.memobj_create_from_handle = r600_memobj_from_handle;1948rscreen->b.memobj_destroy = r600_memobj_destroy;1949}19501951void r600_init_context_texture_functions(struct r600_common_context *rctx)1952{1953rctx->b.create_surface = r600_create_surface;1954rctx->b.surface_destroy = r600_surface_destroy;1955rctx->b.clear_texture = r600_clear_texture;1956}195719581959