Path: blob/21.2-virgl/src/gallium/drivers/r600/r600_buffer_common.c
4570 views
/*1* Copyright 2013 Advanced Micro Devices, Inc.2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* on the rights to use, copy, modify, merge, publish, distribute, sub7* license, and/or sell copies of the Software, and to permit persons to whom8* the Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL17* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,18* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR19* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE20* USE OR OTHER DEALINGS IN THE SOFTWARE.21*22* Authors:23* Marek Olšák24*/2526#include "r600_cs.h"27#include "evergreen_compute.h"28#include "util/u_memory.h"29#include "util/u_upload_mgr.h"30#include <inttypes.h>31#include <stdio.h>3233bool r600_rings_is_buffer_referenced(struct r600_common_context *ctx,34struct pb_buffer *buf,35enum radeon_bo_usage usage)36{37if (ctx->ws->cs_is_buffer_referenced(&ctx->gfx.cs, buf, usage)) {38return true;39}40if (radeon_emitted(&ctx->dma.cs, 0) &&41ctx->ws->cs_is_buffer_referenced(&ctx->dma.cs, buf, usage)) {42return true;43}44return false;45}4647void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,48struct r600_resource *resource,49unsigned usage)50{51enum radeon_bo_usage rusage = RADEON_USAGE_READWRITE;52bool busy = false;5354assert(!(resource->flags & RADEON_FLAG_SPARSE));5556if (usage & PIPE_MAP_UNSYNCHRONIZED) {57return ctx->ws->buffer_map(ctx->ws, resource->buf, NULL, usage);58}5960if (!(usage & PIPE_MAP_WRITE)) {61/* have to wait for the last write */62rusage = RADEON_USAGE_WRITE;63}6465if (radeon_emitted(&ctx->gfx.cs, ctx->initial_gfx_cs_size) &&66ctx->ws->cs_is_buffer_referenced(&ctx->gfx.cs,67resource->buf, rusage)) {68if (usage & PIPE_MAP_DONTBLOCK) {69ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);70return NULL;71} else {72ctx->gfx.flush(ctx, 0, NULL);73busy = true;74}75}76if (radeon_emitted(&ctx->dma.cs, 0) &&77ctx->ws->cs_is_buffer_referenced(&ctx->dma.cs,78resource->buf, rusage)) {79if (usage & PIPE_MAP_DONTBLOCK) {80ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);81return NULL;82} else {83ctx->dma.flush(ctx, 0, NULL);84busy = true;85}86}8788if (busy || !ctx->ws->buffer_wait(ctx->ws, resource->buf, 0, rusage)) {89if (usage & PIPE_MAP_DONTBLOCK) {90return NULL;91} else {92/* We will be wait for the GPU. Wait for any offloaded93* CS flush to complete to avoid busy-waiting in the winsys. */94ctx->ws->cs_sync_flush(&ctx->gfx.cs);95if (ctx->dma.cs.priv)96ctx->ws->cs_sync_flush(&ctx->dma.cs);97}98}99100/* Setting the CS to NULL will prevent doing checks we have done already. */101return ctx->ws->buffer_map(ctx->ws, resource->buf, NULL, usage);102}103104void r600_init_resource_fields(struct r600_common_screen *rscreen,105struct r600_resource *res,106uint64_t size, unsigned alignment)107{108struct r600_texture *rtex = (struct r600_texture*)res;109110res->bo_size = size;111res->bo_alignment = alignment;112res->flags = 0;113res->texture_handle_allocated = false;114res->image_handle_allocated = false;115116switch (res->b.b.usage) {117case PIPE_USAGE_STREAM:118res->flags = RADEON_FLAG_GTT_WC;119FALLTHROUGH;120case PIPE_USAGE_STAGING:121/* Transfers are likely to occur more often with these122* resources. */123res->domains = RADEON_DOMAIN_GTT;124break;125case PIPE_USAGE_DYNAMIC:126/* Older kernels didn't always flush the HDP cache before127* CS execution128*/129if (rscreen->info.drm_minor < 40) {130res->domains = RADEON_DOMAIN_GTT;131res->flags |= RADEON_FLAG_GTT_WC;132break;133}134FALLTHROUGH;135case PIPE_USAGE_DEFAULT:136case PIPE_USAGE_IMMUTABLE:137default:138/* Not listing GTT here improves performance in some139* apps. */140res->domains = RADEON_DOMAIN_VRAM;141res->flags |= RADEON_FLAG_GTT_WC;142break;143}144145if (res->b.b.target == PIPE_BUFFER &&146res->b.b.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT |147PIPE_RESOURCE_FLAG_MAP_COHERENT)) {148/* Use GTT for all persistent mappings with older149* kernels, because they didn't always flush the HDP150* cache before CS execution.151*152* Write-combined CPU mappings are fine, the kernel153* ensures all CPU writes finish before the GPU154* executes a command stream.155*/156if (rscreen->info.drm_minor < 40)157res->domains = RADEON_DOMAIN_GTT;158}159160/* Tiled textures are unmappable. Always put them in VRAM. */161if ((res->b.b.target != PIPE_BUFFER && !rtex->surface.is_linear) ||162res->flags & R600_RESOURCE_FLAG_UNMAPPABLE) {163res->domains = RADEON_DOMAIN_VRAM;164res->flags |= RADEON_FLAG_NO_CPU_ACCESS |165RADEON_FLAG_GTT_WC;166}167168/* Displayable and shareable surfaces are not suballocated. */169if (res->b.b.bind & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT))170res->flags |= RADEON_FLAG_NO_SUBALLOC; /* shareable */171else172res->flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;173174if (rscreen->debug_flags & DBG_NO_WC)175res->flags &= ~RADEON_FLAG_GTT_WC;176177/* Set expected VRAM and GART usage for the buffer. */178res->vram_usage = 0;179res->gart_usage = 0;180181if (res->domains & RADEON_DOMAIN_VRAM)182res->vram_usage = size;183else if (res->domains & RADEON_DOMAIN_GTT)184res->gart_usage = size;185}186187bool r600_alloc_resource(struct r600_common_screen *rscreen,188struct r600_resource *res)189{190struct pb_buffer *old_buf, *new_buf;191192/* Allocate a new resource. */193new_buf = rscreen->ws->buffer_create(rscreen->ws, res->bo_size,194res->bo_alignment,195res->domains, res->flags);196if (!new_buf) {197return false;198}199200/* Replace the pointer such that if res->buf wasn't NULL, it won't be201* NULL. This should prevent crashes with multiple contexts using202* the same buffer where one of the contexts invalidates it while203* the others are using it. */204old_buf = res->buf;205res->buf = new_buf; /* should be atomic */206207if (rscreen->info.r600_has_virtual_memory)208res->gpu_address = rscreen->ws->buffer_get_virtual_address(res->buf);209else210res->gpu_address = 0;211212pb_reference(&old_buf, NULL);213214util_range_set_empty(&res->valid_buffer_range);215216/* Print debug information. */217if (rscreen->debug_flags & DBG_VM && res->b.b.target == PIPE_BUFFER) {218fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Buffer %"PRIu64" bytes\n",219res->gpu_address, res->gpu_address + res->buf->size,220res->buf->size);221}222return true;223}224225void r600_buffer_destroy(struct pipe_screen *screen, struct pipe_resource *buf)226{227struct r600_resource *rbuffer = r600_resource(buf);228229threaded_resource_deinit(buf);230util_range_destroy(&rbuffer->valid_buffer_range);231pipe_resource_reference((struct pipe_resource**)&rbuffer->immed_buffer, NULL);232pb_reference(&rbuffer->buf, NULL);233FREE(rbuffer);234}235236static bool237r600_invalidate_buffer(struct r600_common_context *rctx,238struct r600_resource *rbuffer)239{240/* Shared buffers can't be reallocated. */241if (rbuffer->b.is_shared)242return false;243244/* Sparse buffers can't be reallocated. */245if (rbuffer->flags & RADEON_FLAG_SPARSE)246return false;247248/* In AMD_pinned_memory, the user pointer association only gets249* broken when the buffer is explicitly re-allocated.250*/251if (rbuffer->b.is_user_ptr)252return false;253254/* Check if mapping this buffer would cause waiting for the GPU. */255if (r600_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||256!rctx->ws->buffer_wait(rctx->ws, rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {257rctx->invalidate_buffer(&rctx->b, &rbuffer->b.b);258} else {259util_range_set_empty(&rbuffer->valid_buffer_range);260}261262return true;263}264265/* Replace the storage of dst with src. */266void r600_replace_buffer_storage(struct pipe_context *ctx,267struct pipe_resource *dst,268struct pipe_resource *src)269{270struct r600_common_context *rctx = (struct r600_common_context *)ctx;271struct r600_resource *rdst = r600_resource(dst);272struct r600_resource *rsrc = r600_resource(src);273uint64_t old_gpu_address = rdst->gpu_address;274275pb_reference(&rdst->buf, rsrc->buf);276rdst->gpu_address = rsrc->gpu_address;277rdst->b.b.bind = rsrc->b.b.bind;278rdst->flags = rsrc->flags;279280assert(rdst->vram_usage == rsrc->vram_usage);281assert(rdst->gart_usage == rsrc->gart_usage);282assert(rdst->bo_size == rsrc->bo_size);283assert(rdst->bo_alignment == rsrc->bo_alignment);284assert(rdst->domains == rsrc->domains);285286rctx->rebind_buffer(ctx, dst, old_gpu_address);287}288289void r600_invalidate_resource(struct pipe_context *ctx,290struct pipe_resource *resource)291{292struct r600_common_context *rctx = (struct r600_common_context*)ctx;293struct r600_resource *rbuffer = r600_resource(resource);294295/* We currently only do anyting here for buffers */296if (resource->target == PIPE_BUFFER)297(void)r600_invalidate_buffer(rctx, rbuffer);298}299300static void *r600_buffer_get_transfer(struct pipe_context *ctx,301struct pipe_resource *resource,302unsigned usage,303const struct pipe_box *box,304struct pipe_transfer **ptransfer,305void *data, struct r600_resource *staging,306unsigned offset)307{308struct r600_common_context *rctx = (struct r600_common_context*)ctx;309struct r600_transfer *transfer;310311if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC)312transfer = slab_alloc(&rctx->pool_transfers_unsync);313else314transfer = slab_alloc(&rctx->pool_transfers);315316transfer->b.b.resource = NULL;317pipe_resource_reference(&transfer->b.b.resource, resource);318transfer->b.b.level = 0;319transfer->b.b.usage = usage;320transfer->b.b.box = *box;321transfer->b.b.stride = 0;322transfer->b.b.layer_stride = 0;323transfer->b.staging = NULL;324transfer->b.b.offset = offset;325transfer->staging = staging;326*ptransfer = &transfer->b.b;327return data;328}329330static bool r600_can_dma_copy_buffer(struct r600_common_context *rctx,331unsigned dstx, unsigned srcx, unsigned size)332{333bool dword_aligned = !(dstx % 4) && !(srcx % 4) && !(size % 4);334335return rctx->screen->has_cp_dma ||336(dword_aligned && (rctx->dma.cs.priv ||337rctx->screen->has_streamout));338339}340341void *r600_buffer_transfer_map(struct pipe_context *ctx,342struct pipe_resource *resource,343unsigned level,344unsigned usage,345const struct pipe_box *box,346struct pipe_transfer **ptransfer)347{348struct r600_common_context *rctx = (struct r600_common_context*)ctx;349struct r600_common_screen *rscreen = (struct r600_common_screen*)ctx->screen;350struct r600_resource *rbuffer = r600_resource(resource);351uint8_t *data;352353if (r600_resource(resource)->compute_global_bo) {354return r600_compute_global_transfer_map(ctx, resource, level, usage, box, ptransfer);355}356357assert(box->x + box->width <= resource->width0);358359/* From GL_AMD_pinned_memory issues:360*361* 4) Is glMapBuffer on a shared buffer guaranteed to return the362* same system address which was specified at creation time?363*364* RESOLVED: NO. The GL implementation might return a different365* virtual mapping of that memory, although the same physical366* page will be used.367*368* So don't ever use staging buffers.369*/370if (rbuffer->b.is_user_ptr)371usage |= PIPE_MAP_PERSISTENT;372373/* See if the buffer range being mapped has never been initialized,374* in which case it can be mapped unsynchronized. */375if (!(usage & (PIPE_MAP_UNSYNCHRONIZED |376TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED)) &&377usage & PIPE_MAP_WRITE &&378!rbuffer->b.is_shared &&379!util_ranges_intersect(&rbuffer->valid_buffer_range, box->x, box->x + box->width)) {380usage |= PIPE_MAP_UNSYNCHRONIZED;381}382383/* If discarding the entire range, discard the whole resource instead. */384if (usage & PIPE_MAP_DISCARD_RANGE &&385box->x == 0 && box->width == resource->width0) {386usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;387}388389if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE &&390!(usage & (PIPE_MAP_UNSYNCHRONIZED |391TC_TRANSFER_MAP_NO_INVALIDATE))) {392assert(usage & PIPE_MAP_WRITE);393394if (r600_invalidate_buffer(rctx, rbuffer)) {395/* At this point, the buffer is always idle. */396usage |= PIPE_MAP_UNSYNCHRONIZED;397} else {398/* Fall back to a temporary buffer. */399usage |= PIPE_MAP_DISCARD_RANGE;400}401}402403if ((usage & PIPE_MAP_DISCARD_RANGE) &&404!(rscreen->debug_flags & DBG_NO_DISCARD_RANGE) &&405((!(usage & (PIPE_MAP_UNSYNCHRONIZED |406PIPE_MAP_PERSISTENT)) &&407r600_can_dma_copy_buffer(rctx, box->x, 0, box->width)) ||408(rbuffer->flags & RADEON_FLAG_SPARSE))) {409assert(usage & PIPE_MAP_WRITE);410411/* Check if mapping this buffer would cause waiting for the GPU.412*/413if (rbuffer->flags & RADEON_FLAG_SPARSE ||414r600_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||415!rctx->ws->buffer_wait(rctx->ws, rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {416/* Do a wait-free write-only transfer using a temporary buffer. */417unsigned offset;418struct r600_resource *staging = NULL;419420u_upload_alloc(ctx->stream_uploader, 0,421box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT),422rctx->screen->info.tcc_cache_line_size,423&offset, (struct pipe_resource**)&staging,424(void**)&data);425426if (staging) {427data += box->x % R600_MAP_BUFFER_ALIGNMENT;428return r600_buffer_get_transfer(ctx, resource, usage, box,429ptransfer, data, staging, offset);430} else if (rbuffer->flags & RADEON_FLAG_SPARSE) {431return NULL;432}433} else {434/* At this point, the buffer is always idle (we checked it above). */435usage |= PIPE_MAP_UNSYNCHRONIZED;436}437}438/* Use a staging buffer in cached GTT for reads. */439else if (((usage & PIPE_MAP_READ) &&440!(usage & PIPE_MAP_PERSISTENT) &&441(rbuffer->domains & RADEON_DOMAIN_VRAM ||442rbuffer->flags & RADEON_FLAG_GTT_WC) &&443r600_can_dma_copy_buffer(rctx, 0, box->x, box->width)) ||444(rbuffer->flags & RADEON_FLAG_SPARSE)) {445struct r600_resource *staging;446447assert(!(usage & TC_TRANSFER_MAP_THREADED_UNSYNC));448staging = (struct r600_resource*) pipe_buffer_create(449ctx->screen, 0, PIPE_USAGE_STAGING,450box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT));451if (staging) {452/* Copy the VRAM buffer to the staging buffer. */453rctx->dma_copy(ctx, &staging->b.b, 0,454box->x % R600_MAP_BUFFER_ALIGNMENT,4550, 0, resource, 0, box);456457data = r600_buffer_map_sync_with_rings(rctx, staging,458usage & ~PIPE_MAP_UNSYNCHRONIZED);459if (!data) {460r600_resource_reference(&staging, NULL);461return NULL;462}463data += box->x % R600_MAP_BUFFER_ALIGNMENT;464465return r600_buffer_get_transfer(ctx, resource, usage, box,466ptransfer, data, staging, 0);467} else if (rbuffer->flags & RADEON_FLAG_SPARSE) {468return NULL;469}470}471472data = r600_buffer_map_sync_with_rings(rctx, rbuffer, usage);473if (!data) {474return NULL;475}476data += box->x;477478return r600_buffer_get_transfer(ctx, resource, usage, box,479ptransfer, data, NULL, 0);480}481482static void r600_buffer_do_flush_region(struct pipe_context *ctx,483struct pipe_transfer *transfer,484const struct pipe_box *box)485{486struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;487struct r600_resource *rbuffer = r600_resource(transfer->resource);488489if (rtransfer->staging) {490struct pipe_resource *dst, *src;491unsigned soffset;492struct pipe_box dma_box;493494dst = transfer->resource;495src = &rtransfer->staging->b.b;496soffset = rtransfer->b.b.offset + box->x % R600_MAP_BUFFER_ALIGNMENT;497498u_box_1d(soffset, box->width, &dma_box);499500/* Copy the staging buffer into the original one. */501ctx->resource_copy_region(ctx, dst, 0, box->x, 0, 0, src, 0, &dma_box);502}503504util_range_add(&rbuffer->b.b, &rbuffer->valid_buffer_range, box->x,505box->x + box->width);506}507508void r600_buffer_flush_region(struct pipe_context *ctx,509struct pipe_transfer *transfer,510const struct pipe_box *rel_box)511{512unsigned required_usage = PIPE_MAP_WRITE |513PIPE_MAP_FLUSH_EXPLICIT;514515if (r600_resource(transfer->resource)->compute_global_bo)516return;517518if ((transfer->usage & required_usage) == required_usage) {519struct pipe_box box;520521u_box_1d(transfer->box.x + rel_box->x, rel_box->width, &box);522r600_buffer_do_flush_region(ctx, transfer, &box);523}524}525526void r600_buffer_transfer_unmap(struct pipe_context *ctx,527struct pipe_transfer *transfer)528{529struct r600_common_context *rctx = (struct r600_common_context*)ctx;530struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;531532if (r600_resource(transfer->resource)->compute_global_bo) {533r600_compute_global_transfer_unmap(ctx, transfer);534return;535}536537if (transfer->usage & PIPE_MAP_WRITE &&538!(transfer->usage & PIPE_MAP_FLUSH_EXPLICIT))539r600_buffer_do_flush_region(ctx, transfer, &transfer->box);540541r600_resource_reference(&rtransfer->staging, NULL);542assert(rtransfer->b.staging == NULL); /* for threaded context only */543pipe_resource_reference(&transfer->resource, NULL);544545/* Don't use pool_transfers_unsync. We are always in the driver546* thread. */547slab_free(&rctx->pool_transfers, transfer);548}549550void r600_buffer_subdata(struct pipe_context *ctx,551struct pipe_resource *buffer,552unsigned usage, unsigned offset,553unsigned size, const void *data)554{555struct pipe_transfer *transfer = NULL;556struct pipe_box box;557uint8_t *map = NULL;558559usage |= PIPE_MAP_WRITE;560561if (!(usage & PIPE_MAP_DIRECTLY))562usage |= PIPE_MAP_DISCARD_RANGE;563564u_box_1d(offset, size, &box);565map = r600_buffer_transfer_map(ctx, buffer, 0, usage, &box, &transfer);566if (!map)567return;568569memcpy(map, data, size);570r600_buffer_transfer_unmap(ctx, transfer);571}572573static struct r600_resource *574r600_alloc_buffer_struct(struct pipe_screen *screen,575const struct pipe_resource *templ)576{577struct r600_resource *rbuffer;578579rbuffer = MALLOC_STRUCT(r600_resource);580581rbuffer->b.b = *templ;582rbuffer->b.b.next = NULL;583pipe_reference_init(&rbuffer->b.b.reference, 1);584rbuffer->b.b.screen = screen;585586threaded_resource_init(&rbuffer->b.b);587588rbuffer->buf = NULL;589rbuffer->bind_history = 0;590rbuffer->immed_buffer = NULL;591rbuffer->compute_global_bo = false;592util_range_init(&rbuffer->valid_buffer_range);593return rbuffer;594}595596struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,597const struct pipe_resource *templ,598unsigned alignment)599{600struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;601struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);602603r600_init_resource_fields(rscreen, rbuffer, templ->width0, alignment);604605if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)606rbuffer->flags |= RADEON_FLAG_SPARSE;607608if (!r600_alloc_resource(rscreen, rbuffer)) {609FREE(rbuffer);610return NULL;611}612return &rbuffer->b.b;613}614615struct pipe_resource *r600_aligned_buffer_create(struct pipe_screen *screen,616unsigned flags,617unsigned usage,618unsigned size,619unsigned alignment)620{621struct pipe_resource buffer;622623memset(&buffer, 0, sizeof buffer);624buffer.target = PIPE_BUFFER;625buffer.format = PIPE_FORMAT_R8_UNORM;626buffer.bind = 0;627buffer.usage = usage;628buffer.flags = flags;629buffer.width0 = size;630buffer.height0 = 1;631buffer.depth0 = 1;632buffer.array_size = 1;633return r600_buffer_create(screen, &buffer, alignment);634}635636struct pipe_resource *637r600_buffer_from_user_memory(struct pipe_screen *screen,638const struct pipe_resource *templ,639void *user_memory)640{641struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;642struct radeon_winsys *ws = rscreen->ws;643struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);644645rbuffer->domains = RADEON_DOMAIN_GTT;646rbuffer->flags = 0;647rbuffer->b.is_user_ptr = true;648util_range_add(&rbuffer->b.b, &rbuffer->valid_buffer_range, 0, templ->width0);649util_range_add(&rbuffer->b.b, &rbuffer->b.valid_buffer_range, 0, templ->width0);650651/* Convert a user pointer to a buffer. */652rbuffer->buf = ws->buffer_from_ptr(ws, user_memory, templ->width0);653if (!rbuffer->buf) {654FREE(rbuffer);655return NULL;656}657658if (rscreen->info.r600_has_virtual_memory)659rbuffer->gpu_address =660ws->buffer_get_virtual_address(rbuffer->buf);661else662rbuffer->gpu_address = 0;663664rbuffer->vram_usage = 0;665rbuffer->gart_usage = templ->width0;666667return &rbuffer->b.b;668}669670671