Path: blob/21.2-virgl/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
4566 views
/**************************************************************************1*2* Copyright 2007-2010 VMware, Inc.3* All Rights Reserved.4*5* Permission is hereby granted, free of charge, to any person obtaining a6* copy of this software and associated documentation files (the7* "Software"), to deal in the Software without restriction, including8* without limitation the rights to use, copy, modify, merge, publish,9* distribute, sub license, and/or sell copies of the Software, and to10* permit persons to whom the Software is furnished to do so, subject to11* the following conditions:12*13* The above copyright notice and this permission notice (including the14* next paragraph) shall be included in all copies or substantial portions15* of the Software.16*17* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS18* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF19* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.20* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR21* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,22* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE23* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.24*25**************************************************************************/2627/**28* \file29* Implementation of fenced buffers.30*31* \author Jose Fonseca <jfonseca-at-vmware-dot-com>32* \author Thomas Hellström <thellstrom-at-vmware-dot-com>33*/343536#include "pipe/p_config.h"3738#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)39#include <unistd.h>40#include <sched.h>41#endif42#include <inttypes.h>4344#include "pipe/p_compiler.h"45#include "pipe/p_defines.h"46#include "util/u_debug.h"47#include "os/os_thread.h"48#include "util/u_memory.h"49#include "util/list.h"5051#include "pb_buffer.h"52#include "pb_buffer_fenced.h"53#include "pb_bufmgr.h"54555657/**58* Convenience macro (type safe).59*/60#define SUPER(__derived) (&(__derived)->base)616263struct fenced_manager64{65struct pb_manager base;66struct pb_manager *provider;67struct pb_fence_ops *ops;6869/**70* Maximum buffer size that can be safely allocated.71*/72pb_size max_buffer_size;7374/**75* Maximum cpu memory we can allocate before we start waiting for the76* GPU to idle.77*/78pb_size max_cpu_total_size;7980/**81* Following members are mutable and protected by this mutex.82*/83mtx_t mutex;8485/**86* Fenced buffer list.87*88* All fenced buffers are placed in this listed, ordered from the oldest89* fence to the newest fence.90*/91struct list_head fenced;92pb_size num_fenced;9394struct list_head unfenced;95pb_size num_unfenced;9697/**98* How much temporary CPU memory is being used to hold unvalidated buffers.99*/100pb_size cpu_total_size;101};102103104/**105* Fenced buffer.106*107* Wrapper around a pipe buffer which adds fencing and reference counting.108*/109struct fenced_buffer110{111/**112* Immutable members.113*/114115struct pb_buffer base;116struct fenced_manager *mgr;117118/**119* Following members are mutable and protected by fenced_manager::mutex.120*/121122struct list_head head;123124/**125* Buffer with storage.126*/127struct pb_buffer *buffer;128pb_size size;129struct pb_desc desc;130131/**132* Temporary CPU storage data. Used when there isn't enough GPU memory to133* store the buffer.134*/135void *data;136137/**138* A bitmask of PB_USAGE_CPU/GPU_READ/WRITE describing the current139* buffer usage.140*/141enum pb_usage_flags flags;142143unsigned mapcount;144145struct pb_validate *vl;146unsigned validation_flags;147148struct pipe_fence_handle *fence;149};150151152static inline struct fenced_manager *153fenced_manager(struct pb_manager *mgr)154{155assert(mgr);156return (struct fenced_manager *)mgr;157}158159160static inline struct fenced_buffer *161fenced_buffer(struct pb_buffer *buf)162{163assert(buf);164return (struct fenced_buffer *)buf;165}166167168static void169fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf);170171static enum pipe_error172fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,173struct fenced_buffer *fenced_buf);174175static void176fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf);177178static enum pipe_error179fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,180struct fenced_buffer *fenced_buf,181boolean wait);182183static enum pipe_error184fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf);185186static enum pipe_error187fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf);188189190/**191* Dump the fenced buffer list.192*193* Useful to understand failures to allocate buffers.194*/195static void196fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)197{198#ifdef DEBUG199struct pb_fence_ops *ops = fenced_mgr->ops;200struct list_head *curr, *next;201struct fenced_buffer *fenced_buf;202203debug_printf("%10s %7s %8s %7s %10s %s\n",204"buffer", "size", "refcount", "storage", "fence", "signalled");205206curr = fenced_mgr->unfenced.next;207next = curr->next;208while (curr != &fenced_mgr->unfenced) {209fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);210assert(!fenced_buf->fence);211debug_printf("%10p %"PRIu64" %8u %7s\n",212(void *) fenced_buf,213fenced_buf->base.size,214p_atomic_read(&fenced_buf->base.reference.count),215fenced_buf->buffer ? "gpu" : (fenced_buf->data ? "cpu" : "none"));216curr = next;217next = curr->next;218}219220curr = fenced_mgr->fenced.next;221next = curr->next;222while (curr != &fenced_mgr->fenced) {223int signaled;224fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);225assert(fenced_buf->buffer);226signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);227debug_printf("%10p %"PRIu64" %8u %7s %10p %s\n",228(void *) fenced_buf,229fenced_buf->base.size,230p_atomic_read(&fenced_buf->base.reference.count),231"gpu",232(void *) fenced_buf->fence,233signaled == 0 ? "y" : "n");234curr = next;235next = curr->next;236}237#else238(void)fenced_mgr;239#endif240}241242243static inline void244fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr,245struct fenced_buffer *fenced_buf)246{247assert(!pipe_is_referenced(&fenced_buf->base.reference));248249assert(!fenced_buf->fence);250assert(fenced_buf->head.prev);251assert(fenced_buf->head.next);252list_del(&fenced_buf->head);253assert(fenced_mgr->num_unfenced);254--fenced_mgr->num_unfenced;255256fenced_buffer_destroy_gpu_storage_locked(fenced_buf);257fenced_buffer_destroy_cpu_storage_locked(fenced_buf);258259FREE(fenced_buf);260}261262263/**264* Add the buffer to the fenced list.265*266* Reference count should be incremented before calling this function.267*/268static inline void269fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,270struct fenced_buffer *fenced_buf)271{272assert(pipe_is_referenced(&fenced_buf->base.reference));273assert(fenced_buf->flags & PB_USAGE_GPU_READ_WRITE);274assert(fenced_buf->fence);275276p_atomic_inc(&fenced_buf->base.reference.count);277278list_del(&fenced_buf->head);279assert(fenced_mgr->num_unfenced);280--fenced_mgr->num_unfenced;281list_addtail(&fenced_buf->head, &fenced_mgr->fenced);282++fenced_mgr->num_fenced;283}284285286/**287* Remove the buffer from the fenced list, and potentially destroy the buffer288* if the reference count reaches zero.289*290* Returns TRUE if the buffer was detroyed.291*/292static inline boolean293fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr,294struct fenced_buffer *fenced_buf)295{296struct pb_fence_ops *ops = fenced_mgr->ops;297298assert(fenced_buf->fence);299assert(fenced_buf->mgr == fenced_mgr);300301ops->fence_reference(ops, &fenced_buf->fence, NULL);302fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE;303304assert(fenced_buf->head.prev);305assert(fenced_buf->head.next);306307list_del(&fenced_buf->head);308assert(fenced_mgr->num_fenced);309--fenced_mgr->num_fenced;310311list_addtail(&fenced_buf->head, &fenced_mgr->unfenced);312++fenced_mgr->num_unfenced;313314if (p_atomic_dec_zero(&fenced_buf->base.reference.count)) {315fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);316return TRUE;317}318319return FALSE;320}321322323/**324* Wait for the fence to expire, and remove it from the fenced list.325*326* This function will release and re-acquire the mutex, so any copy of mutable327* state must be discarded after calling it.328*/329static inline enum pipe_error330fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,331struct fenced_buffer *fenced_buf)332{333struct pb_fence_ops *ops = fenced_mgr->ops;334enum pipe_error ret = PIPE_ERROR;335336#if 0337debug_warning("waiting for GPU");338#endif339340assert(pipe_is_referenced(&fenced_buf->base.reference));341assert(fenced_buf->fence);342343if (fenced_buf->fence) {344struct pipe_fence_handle *fence = NULL;345int finished;346boolean proceed;347348ops->fence_reference(ops, &fence, fenced_buf->fence);349350mtx_unlock(&fenced_mgr->mutex);351352finished = ops->fence_finish(ops, fenced_buf->fence, 0);353354mtx_lock(&fenced_mgr->mutex);355356assert(pipe_is_referenced(&fenced_buf->base.reference));357358/* Only proceed if the fence object didn't change in the meanwhile.359* Otherwise assume the work has been already carried out by another360* thread that re-aquired the lock before us.361*/362proceed = fence == fenced_buf->fence ? TRUE : FALSE;363364ops->fence_reference(ops, &fence, NULL);365366if (proceed && finished == 0) {367/* Remove from the fenced list. */368boolean destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);369370/* TODO: remove consequents buffers with the same fence? */371372assert(!destroyed);373(void) destroyed; /* silence unused var warning for non-debug build */374375fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE;376377ret = PIPE_OK;378}379}380381return ret;382}383384385/**386* Remove as many fenced buffers from the fenced list as possible.387*388* Returns TRUE if at least one buffer was removed.389*/390static boolean391fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr,392boolean wait)393{394struct pb_fence_ops *ops = fenced_mgr->ops;395struct list_head *curr, *next;396struct fenced_buffer *fenced_buf;397struct pipe_fence_handle *prev_fence = NULL;398boolean ret = FALSE;399400curr = fenced_mgr->fenced.next;401next = curr->next;402while (curr != &fenced_mgr->fenced) {403fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);404405if (fenced_buf->fence != prev_fence) {406int signaled;407408if (wait) {409signaled = ops->fence_finish(ops, fenced_buf->fence, 0);410411/* Don't return just now. Instead preemptively check if the412* following buffers' fences already expired, without further waits.413*/414wait = FALSE;415} else {416signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);417}418419if (signaled != 0) {420return ret;421}422423prev_fence = fenced_buf->fence;424} else {425/* This buffer's fence object is identical to the previous buffer's426* fence object, so no need to check the fence again.427*/428assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);429}430431fenced_buffer_remove_locked(fenced_mgr, fenced_buf);432433ret = TRUE;434435curr = next;436next = curr->next;437}438439return ret;440}441442443/**444* Try to free some GPU memory by backing it up into CPU memory.445*446* Returns TRUE if at least one buffer was freed.447*/448static boolean449fenced_manager_free_gpu_storage_locked(struct fenced_manager *fenced_mgr)450{451struct list_head *curr, *next;452struct fenced_buffer *fenced_buf;453454curr = fenced_mgr->unfenced.next;455next = curr->next;456while (curr != &fenced_mgr->unfenced) {457fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);458459/* We can only move storage if the buffer is not mapped and not460* validated.461*/462if (fenced_buf->buffer &&463!fenced_buf->mapcount &&464!fenced_buf->vl) {465enum pipe_error ret;466467ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);468if (ret == PIPE_OK) {469ret = fenced_buffer_copy_storage_to_cpu_locked(fenced_buf);470if (ret == PIPE_OK) {471fenced_buffer_destroy_gpu_storage_locked(fenced_buf);472return TRUE;473}474fenced_buffer_destroy_cpu_storage_locked(fenced_buf);475}476}477478curr = next;479next = curr->next;480}481482return FALSE;483}484485486/**487* Destroy CPU storage for this buffer.488*/489static void490fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf)491{492if (fenced_buf->data) {493align_free(fenced_buf->data);494fenced_buf->data = NULL;495assert(fenced_buf->mgr->cpu_total_size >= fenced_buf->size);496fenced_buf->mgr->cpu_total_size -= fenced_buf->size;497}498}499500501/**502* Create CPU storage for this buffer.503*/504static enum pipe_error505fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,506struct fenced_buffer *fenced_buf)507{508assert(!fenced_buf->data);509if (fenced_buf->data)510return PIPE_OK;511512if (fenced_mgr->cpu_total_size + fenced_buf->size > fenced_mgr->max_cpu_total_size)513return PIPE_ERROR_OUT_OF_MEMORY;514515fenced_buf->data = align_malloc(fenced_buf->size, fenced_buf->desc.alignment);516if (!fenced_buf->data)517return PIPE_ERROR_OUT_OF_MEMORY;518519fenced_mgr->cpu_total_size += fenced_buf->size;520521return PIPE_OK;522}523524525/**526* Destroy the GPU storage.527*/528static void529fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf)530{531if (fenced_buf->buffer) {532pb_reference(&fenced_buf->buffer, NULL);533}534}535536537/**538* Try to create GPU storage for this buffer.539*540* This function is a shorthand around pb_manager::create_buffer for541* fenced_buffer_create_gpu_storage_locked()'s benefit.542*/543static inline boolean544fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,545struct fenced_buffer *fenced_buf)546{547struct pb_manager *provider = fenced_mgr->provider;548549assert(!fenced_buf->buffer);550551fenced_buf->buffer = provider->create_buffer(fenced_mgr->provider,552fenced_buf->size,553&fenced_buf->desc);554return fenced_buf->buffer ? TRUE : FALSE;555}556557558/**559* Create GPU storage for this buffer.560*/561static enum pipe_error562fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,563struct fenced_buffer *fenced_buf,564boolean wait)565{566assert(!fenced_buf->buffer);567568/* Check for signaled buffers before trying to allocate. */569fenced_manager_check_signalled_locked(fenced_mgr, FALSE);570571fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);572573/* Keep trying while there is some sort of progress:574* - fences are expiring,575* - or buffers are being being swapped out from GPU memory into CPU memory.576*/577while (!fenced_buf->buffer &&578(fenced_manager_check_signalled_locked(fenced_mgr, FALSE) ||579fenced_manager_free_gpu_storage_locked(fenced_mgr))) {580fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);581}582583if (!fenced_buf->buffer && wait) {584/* Same as before, but this time around, wait to free buffers if585* necessary.586*/587while (!fenced_buf->buffer &&588(fenced_manager_check_signalled_locked(fenced_mgr, TRUE) ||589fenced_manager_free_gpu_storage_locked(fenced_mgr))) {590fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);591}592}593594if (!fenced_buf->buffer) {595if (0)596fenced_manager_dump_locked(fenced_mgr);597598/* Give up. */599return PIPE_ERROR_OUT_OF_MEMORY;600}601602return PIPE_OK;603}604605606static enum pipe_error607fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf)608{609uint8_t *map;610611assert(fenced_buf->data);612assert(fenced_buf->buffer);613614map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_WRITE, NULL);615if (!map)616return PIPE_ERROR;617618memcpy(map, fenced_buf->data, fenced_buf->size);619620pb_unmap(fenced_buf->buffer);621622return PIPE_OK;623}624625626static enum pipe_error627fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf)628{629const uint8_t *map;630631assert(fenced_buf->data);632assert(fenced_buf->buffer);633634map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_READ, NULL);635if (!map)636return PIPE_ERROR;637638memcpy(fenced_buf->data, map, fenced_buf->size);639640pb_unmap(fenced_buf->buffer);641642return PIPE_OK;643}644645646static void647fenced_buffer_destroy(void *winsys, struct pb_buffer *buf)648{649struct fenced_buffer *fenced_buf = fenced_buffer(buf);650struct fenced_manager *fenced_mgr = fenced_buf->mgr;651652assert(!pipe_is_referenced(&fenced_buf->base.reference));653654mtx_lock(&fenced_mgr->mutex);655656fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);657658mtx_unlock(&fenced_mgr->mutex);659}660661662static void *663fenced_buffer_map(struct pb_buffer *buf,664enum pb_usage_flags flags, void *flush_ctx)665{666struct fenced_buffer *fenced_buf = fenced_buffer(buf);667struct fenced_manager *fenced_mgr = fenced_buf->mgr;668struct pb_fence_ops *ops = fenced_mgr->ops;669void *map = NULL;670671mtx_lock(&fenced_mgr->mutex);672673assert(!(flags & PB_USAGE_GPU_READ_WRITE));674675/* Serialize writes. */676while ((fenced_buf->flags & PB_USAGE_GPU_WRITE) ||677((fenced_buf->flags & PB_USAGE_GPU_READ) &&678(flags & PB_USAGE_CPU_WRITE))) {679680/* Don't wait for the GPU to finish accessing it,681* if blocking is forbidden.682*/683if ((flags & PB_USAGE_DONTBLOCK) &&684ops->fence_signalled(ops, fenced_buf->fence, 0) != 0) {685goto done;686}687688if (flags & PB_USAGE_UNSYNCHRONIZED) {689break;690}691692/* Wait for the GPU to finish accessing. This will release and re-acquire693* the mutex, so all copies of mutable state must be discarded.694*/695fenced_buffer_finish_locked(fenced_mgr, fenced_buf);696}697698if (fenced_buf->buffer) {699map = pb_map(fenced_buf->buffer, flags, flush_ctx);700} else {701assert(fenced_buf->data);702map = fenced_buf->data;703}704705if (map) {706++fenced_buf->mapcount;707fenced_buf->flags |= flags & PB_USAGE_CPU_READ_WRITE;708}709710done:711mtx_unlock(&fenced_mgr->mutex);712713return map;714}715716717static void718fenced_buffer_unmap(struct pb_buffer *buf)719{720struct fenced_buffer *fenced_buf = fenced_buffer(buf);721struct fenced_manager *fenced_mgr = fenced_buf->mgr;722723mtx_lock(&fenced_mgr->mutex);724725assert(fenced_buf->mapcount);726if (fenced_buf->mapcount) {727if (fenced_buf->buffer)728pb_unmap(fenced_buf->buffer);729--fenced_buf->mapcount;730if (!fenced_buf->mapcount)731fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE;732}733734mtx_unlock(&fenced_mgr->mutex);735}736737738static enum pipe_error739fenced_buffer_validate(struct pb_buffer *buf,740struct pb_validate *vl,741enum pb_usage_flags flags)742{743struct fenced_buffer *fenced_buf = fenced_buffer(buf);744struct fenced_manager *fenced_mgr = fenced_buf->mgr;745enum pipe_error ret;746747mtx_lock(&fenced_mgr->mutex);748749if (!vl) {750/* Invalidate. */751fenced_buf->vl = NULL;752fenced_buf->validation_flags = 0;753ret = PIPE_OK;754goto done;755}756757assert(flags & PB_USAGE_GPU_READ_WRITE);758assert(!(flags & ~PB_USAGE_GPU_READ_WRITE));759flags &= PB_USAGE_GPU_READ_WRITE;760761/* Buffer cannot be validated in two different lists. */762if (fenced_buf->vl && fenced_buf->vl != vl) {763ret = PIPE_ERROR_RETRY;764goto done;765}766767if (fenced_buf->vl == vl &&768(fenced_buf->validation_flags & flags) == flags) {769/* Nothing to do -- buffer already validated. */770ret = PIPE_OK;771goto done;772}773774/* Create and update GPU storage. */775if (!fenced_buf->buffer) {776assert(!fenced_buf->mapcount);777778ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);779if (ret != PIPE_OK) {780goto done;781}782783ret = fenced_buffer_copy_storage_to_gpu_locked(fenced_buf);784if (ret != PIPE_OK) {785fenced_buffer_destroy_gpu_storage_locked(fenced_buf);786goto done;787}788789if (fenced_buf->mapcount) {790debug_printf("warning: validating a buffer while it is still mapped\n");791} else {792fenced_buffer_destroy_cpu_storage_locked(fenced_buf);793}794}795796ret = pb_validate(fenced_buf->buffer, vl, flags);797if (ret != PIPE_OK)798goto done;799800fenced_buf->vl = vl;801fenced_buf->validation_flags |= flags;802803done:804mtx_unlock(&fenced_mgr->mutex);805806return ret;807}808809810static void811fenced_buffer_fence(struct pb_buffer *buf,812struct pipe_fence_handle *fence)813{814struct fenced_buffer *fenced_buf = fenced_buffer(buf);815struct fenced_manager *fenced_mgr = fenced_buf->mgr;816struct pb_fence_ops *ops = fenced_mgr->ops;817818mtx_lock(&fenced_mgr->mutex);819820assert(pipe_is_referenced(&fenced_buf->base.reference));821assert(fenced_buf->buffer);822823if (fence != fenced_buf->fence) {824assert(fenced_buf->vl);825assert(fenced_buf->validation_flags);826827if (fenced_buf->fence) {828ASSERTED boolean destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);829assert(!destroyed);830}831if (fence) {832ops->fence_reference(ops, &fenced_buf->fence, fence);833fenced_buf->flags |= fenced_buf->validation_flags;834fenced_buffer_add_locked(fenced_mgr, fenced_buf);835}836837pb_fence(fenced_buf->buffer, fence);838839fenced_buf->vl = NULL;840fenced_buf->validation_flags = 0;841}842843mtx_unlock(&fenced_mgr->mutex);844}845846847static void848fenced_buffer_get_base_buffer(struct pb_buffer *buf,849struct pb_buffer **base_buf,850pb_size *offset)851{852struct fenced_buffer *fenced_buf = fenced_buffer(buf);853struct fenced_manager *fenced_mgr = fenced_buf->mgr;854855mtx_lock(&fenced_mgr->mutex);856857/* This should only be called when the buffer is validated. Typically858* when processing relocations.859*/860assert(fenced_buf->vl);861assert(fenced_buf->buffer);862863if (fenced_buf->buffer) {864pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);865} else {866*base_buf = buf;867*offset = 0;868}869870mtx_unlock(&fenced_mgr->mutex);871}872873874static const struct pb_vtbl875fenced_buffer_vtbl = {876fenced_buffer_destroy,877fenced_buffer_map,878fenced_buffer_unmap,879fenced_buffer_validate,880fenced_buffer_fence,881fenced_buffer_get_base_buffer882};883884885/**886* Wrap a buffer in a fenced buffer.887*/888static struct pb_buffer *889fenced_bufmgr_create_buffer(struct pb_manager *mgr,890pb_size size,891const struct pb_desc *desc)892{893struct fenced_manager *fenced_mgr = fenced_manager(mgr);894struct fenced_buffer *fenced_buf;895enum pipe_error ret;896897/* Don't stall the GPU, waste time evicting buffers, or waste memory898* trying to create a buffer that will most likely never fit into the899* graphics aperture.900*/901if (size > fenced_mgr->max_buffer_size) {902goto no_buffer;903}904905fenced_buf = CALLOC_STRUCT(fenced_buffer);906if (!fenced_buf)907goto no_buffer;908909pipe_reference_init(&fenced_buf->base.reference, 1);910fenced_buf->base.alignment_log2 = util_logbase2(desc->alignment);911fenced_buf->base.usage = desc->usage;912fenced_buf->base.size = size;913fenced_buf->size = size;914fenced_buf->desc = *desc;915916fenced_buf->base.vtbl = &fenced_buffer_vtbl;917fenced_buf->mgr = fenced_mgr;918919mtx_lock(&fenced_mgr->mutex);920921/* Try to create GPU storage without stalling. */922ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE);923924/* Attempt to use CPU memory to avoid stalling the GPU. */925if (ret != PIPE_OK) {926ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);927}928929/* Create GPU storage, waiting for some to be available. */930if (ret != PIPE_OK) {931ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);932}933934/* Give up. */935if (ret != PIPE_OK) {936goto no_storage;937}938939assert(fenced_buf->buffer || fenced_buf->data);940941list_addtail(&fenced_buf->head, &fenced_mgr->unfenced);942++fenced_mgr->num_unfenced;943mtx_unlock(&fenced_mgr->mutex);944945return &fenced_buf->base;946947no_storage:948mtx_unlock(&fenced_mgr->mutex);949FREE(fenced_buf);950no_buffer:951return NULL;952}953954955static void956fenced_bufmgr_flush(struct pb_manager *mgr)957{958struct fenced_manager *fenced_mgr = fenced_manager(mgr);959960mtx_lock(&fenced_mgr->mutex);961while (fenced_manager_check_signalled_locked(fenced_mgr, TRUE))962;963mtx_unlock(&fenced_mgr->mutex);964965assert(fenced_mgr->provider->flush);966if (fenced_mgr->provider->flush)967fenced_mgr->provider->flush(fenced_mgr->provider);968}969970971static void972fenced_bufmgr_destroy(struct pb_manager *mgr)973{974struct fenced_manager *fenced_mgr = fenced_manager(mgr);975976mtx_lock(&fenced_mgr->mutex);977978/* Wait on outstanding fences. */979while (fenced_mgr->num_fenced) {980mtx_unlock(&fenced_mgr->mutex);981#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)982sched_yield();983#endif984mtx_lock(&fenced_mgr->mutex);985while (fenced_manager_check_signalled_locked(fenced_mgr, TRUE))986;987}988989#ifdef DEBUG990/* assert(!fenced_mgr->num_unfenced); */991#endif992993mtx_unlock(&fenced_mgr->mutex);994mtx_destroy(&fenced_mgr->mutex);995996if (fenced_mgr->provider)997fenced_mgr->provider->destroy(fenced_mgr->provider);998999fenced_mgr->ops->destroy(fenced_mgr->ops);10001001FREE(fenced_mgr);1002}100310041005struct pb_manager *1006fenced_bufmgr_create(struct pb_manager *provider,1007struct pb_fence_ops *ops,1008pb_size max_buffer_size,1009pb_size max_cpu_total_size)1010{1011struct fenced_manager *fenced_mgr;10121013if (!provider)1014return NULL;10151016fenced_mgr = CALLOC_STRUCT(fenced_manager);1017if (!fenced_mgr)1018return NULL;10191020fenced_mgr->base.destroy = fenced_bufmgr_destroy;1021fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer;1022fenced_mgr->base.flush = fenced_bufmgr_flush;10231024fenced_mgr->provider = provider;1025fenced_mgr->ops = ops;1026fenced_mgr->max_buffer_size = max_buffer_size;1027fenced_mgr->max_cpu_total_size = max_cpu_total_size;10281029list_inithead(&fenced_mgr->fenced);1030fenced_mgr->num_fenced = 0;10311032list_inithead(&fenced_mgr->unfenced);1033fenced_mgr->num_unfenced = 0;10341035(void) mtx_init(&fenced_mgr->mutex, mtx_plain);10361037return &fenced_mgr->base;1038}103910401041