Path: blob/21.2-virgl/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c
4566 views
/**************************************************************************1*2* Copyright 2006-2008 VMware, Inc., USA3* All Rights Reserved.4*5* Permission is hereby granted, FREE of charge, to any person obtaining a6* copy of this software and associated documentation files (the7* "Software"), to deal in the Software without restriction, including8* without limitation the rights to use, copy, modify, merge, publish,9* distribute, sub license, and/or sell copies of the Software, and to10* permit persons to whom the Software is furnished to do so, subject to11* the following conditions:12*13* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR14* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,15* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL16* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,17* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR18* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE19* USE OR OTHER DEALINGS IN THE SOFTWARE.20*21* The above copyright notice and this permission notice (including the22* next paragraph) shall be included in all copies or substantial portions23* of the Software.24*25*26**************************************************************************/2728/**29* @file30* S-lab pool implementation.31*32* @sa http://en.wikipedia.org/wiki/Slab_allocation33*34* @author Thomas Hellstrom <thellstrom-at-vmware-dot-com>35* @author Jose Fonseca <[email protected]>36*/3738#include "pipe/p_compiler.h"39#include "util/u_debug.h"40#include "os/os_thread.h"41#include "pipe/p_defines.h"42#include "util/u_memory.h"43#include "util/list.h"4445#include "pb_buffer.h"46#include "pb_bufmgr.h"474849struct pb_slab;505152/**53* Buffer in a slab.54*55* Sub-allocation of a contiguous buffer.56*/57struct pb_slab_buffer58{59struct pb_buffer base;6061struct pb_slab *slab;6263struct list_head head;6465unsigned mapCount;6667/** Offset relative to the start of the slab buffer. */68pb_size start;69};707172/**73* Slab -- a contiguous piece of memory.74*/75struct pb_slab76{77struct list_head head;78struct list_head freeBuffers;79pb_size numBuffers;80pb_size numFree;8182struct pb_slab_buffer *buffers;83struct pb_slab_manager *mgr;8485/** Buffer from the provider */86struct pb_buffer *bo;8788void *virtual;89};909192/**93* It adds/removes slabs as needed in order to meet the allocation/destruction94* of individual buffers.95*/96struct pb_slab_manager97{98struct pb_manager base;99100/** From where we get our buffers */101struct pb_manager *provider;102103/** Size of the buffers we hand on downstream */104pb_size bufSize;105106/** Size of the buffers we request upstream */107pb_size slabSize;108109/**110* Alignment, usage to be used to allocate the slab buffers.111*112* We can only provide buffers which are consistent (in alignment, usage)113* with this description.114*/115struct pb_desc desc;116117/**118* Partial slabs119*120* Full slabs are not stored in any list. Empty slabs are destroyed121* immediatly.122*/123struct list_head slabs;124125mtx_t mutex;126};127128129/**130* Wrapper around several slabs, therefore capable of handling buffers of131* multiple sizes.132*133* This buffer manager just dispatches buffer allocations to the appropriate slab134* manager, according to the requested buffer size, or by passes the slab135* managers altogether for even greater sizes.136*137* The data of this structure remains constant after138* initialization and thus needs no mutex protection.139*/140struct pb_slab_range_manager141{142struct pb_manager base;143144struct pb_manager *provider;145146pb_size minBufSize;147pb_size maxBufSize;148149/** @sa pb_slab_manager::desc */150struct pb_desc desc;151152unsigned numBuckets;153pb_size *bucketSizes;154155/** Array of pb_slab_manager, one for each bucket size */156struct pb_manager **buckets;157};158159160static inline struct pb_slab_buffer *161pb_slab_buffer(struct pb_buffer *buf)162{163assert(buf);164return (struct pb_slab_buffer *)buf;165}166167168static inline struct pb_slab_manager *169pb_slab_manager(struct pb_manager *mgr)170{171assert(mgr);172return (struct pb_slab_manager *)mgr;173}174175176static inline struct pb_slab_range_manager *177pb_slab_range_manager(struct pb_manager *mgr)178{179assert(mgr);180return (struct pb_slab_range_manager *)mgr;181}182183184/**185* Delete a buffer from the slab delayed list and put186* it on the slab FREE list.187*/188static void189pb_slab_buffer_destroy(void *winsys, struct pb_buffer *_buf)190{191struct pb_slab_buffer *buf = pb_slab_buffer(_buf);192struct pb_slab *slab = buf->slab;193struct pb_slab_manager *mgr = slab->mgr;194struct list_head *list = &buf->head;195196mtx_lock(&mgr->mutex);197198assert(!pipe_is_referenced(&buf->base.reference));199200buf->mapCount = 0;201202list_del(list);203list_addtail(list, &slab->freeBuffers);204slab->numFree++;205206if (slab->head.next == &slab->head)207list_addtail(&slab->head, &mgr->slabs);208209/* If the slab becomes totally empty, free it */210if (slab->numFree == slab->numBuffers) {211list = &slab->head;212list_delinit(list);213pb_unmap(slab->bo);214pb_reference(&slab->bo, NULL);215FREE(slab->buffers);216FREE(slab);217}218219mtx_unlock(&mgr->mutex);220}221222223static void *224pb_slab_buffer_map(struct pb_buffer *_buf,225enum pb_usage_flags flags,226void *flush_ctx)227{228struct pb_slab_buffer *buf = pb_slab_buffer(_buf);229230/* XXX: it will be necessary to remap here to propagate flush_ctx */231232++buf->mapCount;233return (void *) ((uint8_t *) buf->slab->virtual + buf->start);234}235236237static void238pb_slab_buffer_unmap(struct pb_buffer *_buf)239{240struct pb_slab_buffer *buf = pb_slab_buffer(_buf);241242--buf->mapCount;243}244245246static enum pipe_error247pb_slab_buffer_validate(struct pb_buffer *_buf,248struct pb_validate *vl,249enum pb_usage_flags flags)250{251struct pb_slab_buffer *buf = pb_slab_buffer(_buf);252return pb_validate(buf->slab->bo, vl, flags);253}254255256static void257pb_slab_buffer_fence(struct pb_buffer *_buf,258struct pipe_fence_handle *fence)259{260struct pb_slab_buffer *buf = pb_slab_buffer(_buf);261pb_fence(buf->slab->bo, fence);262}263264265static void266pb_slab_buffer_get_base_buffer(struct pb_buffer *_buf,267struct pb_buffer **base_buf,268pb_size *offset)269{270struct pb_slab_buffer *buf = pb_slab_buffer(_buf);271pb_get_base_buffer(buf->slab->bo, base_buf, offset);272*offset += buf->start;273}274275276static const struct pb_vtbl277pb_slab_buffer_vtbl = {278pb_slab_buffer_destroy,279pb_slab_buffer_map,280pb_slab_buffer_unmap,281pb_slab_buffer_validate,282pb_slab_buffer_fence,283pb_slab_buffer_get_base_buffer284};285286287/**288* Create a new slab.289*290* Called when we ran out of free slabs.291*/292static enum pipe_error293pb_slab_create(struct pb_slab_manager *mgr)294{295struct pb_slab *slab;296struct pb_slab_buffer *buf;297unsigned numBuffers;298unsigned i;299enum pipe_error ret;300301slab = CALLOC_STRUCT(pb_slab);302if (!slab)303return PIPE_ERROR_OUT_OF_MEMORY;304305slab->bo = mgr->provider->create_buffer(mgr->provider, mgr->slabSize, &mgr->desc);306if(!slab->bo) {307ret = PIPE_ERROR_OUT_OF_MEMORY;308goto out_err0;309}310311/* Note down the slab virtual address. All mappings are accessed directly312* through this address so it is required that the buffer is mapped313* persistent */314slab->virtual = pb_map(slab->bo,315PB_USAGE_CPU_READ |316PB_USAGE_CPU_WRITE |317PB_USAGE_PERSISTENT, NULL);318if(!slab->virtual) {319ret = PIPE_ERROR_OUT_OF_MEMORY;320goto out_err1;321}322323numBuffers = slab->bo->size / mgr->bufSize;324325slab->buffers = CALLOC(numBuffers, sizeof(*slab->buffers));326if (!slab->buffers) {327ret = PIPE_ERROR_OUT_OF_MEMORY;328goto out_err1;329}330331list_inithead(&slab->head);332list_inithead(&slab->freeBuffers);333slab->numBuffers = numBuffers;334slab->numFree = 0;335slab->mgr = mgr;336337buf = slab->buffers;338for (i=0; i < numBuffers; ++i) {339pipe_reference_init(&buf->base.reference, 0);340buf->base.size = mgr->bufSize;341buf->base.alignment_log2 = 0;342buf->base.usage = 0;343buf->base.vtbl = &pb_slab_buffer_vtbl;344buf->slab = slab;345buf->start = i* mgr->bufSize;346buf->mapCount = 0;347list_addtail(&buf->head, &slab->freeBuffers);348slab->numFree++;349buf++;350}351352/* Add this slab to the list of partial slabs */353list_addtail(&slab->head, &mgr->slabs);354355return PIPE_OK;356357out_err1:358pb_reference(&slab->bo, NULL);359out_err0:360FREE(slab);361return ret;362}363364365static struct pb_buffer *366pb_slab_manager_create_buffer(struct pb_manager *_mgr,367pb_size size,368const struct pb_desc *desc)369{370struct pb_slab_manager *mgr = pb_slab_manager(_mgr);371static struct pb_slab_buffer *buf;372struct pb_slab *slab;373struct list_head *list;374375/* check size */376assert(size <= mgr->bufSize);377if(size > mgr->bufSize)378return NULL;379380/* check if we can provide the requested alignment */381assert(pb_check_alignment(desc->alignment, mgr->desc.alignment));382if(!pb_check_alignment(desc->alignment, mgr->desc.alignment))383return NULL;384assert(pb_check_alignment(desc->alignment, mgr->bufSize));385if(!pb_check_alignment(desc->alignment, mgr->bufSize))386return NULL;387388assert(pb_check_usage(desc->usage, mgr->desc.usage));389if(!pb_check_usage(desc->usage, mgr->desc.usage))390return NULL;391392mtx_lock(&mgr->mutex);393394/* Create a new slab, if we run out of partial slabs */395if (mgr->slabs.next == &mgr->slabs) {396(void) pb_slab_create(mgr);397if (mgr->slabs.next == &mgr->slabs) {398mtx_unlock(&mgr->mutex);399return NULL;400}401}402403/* Allocate the buffer from a partial (or just created) slab */404list = mgr->slabs.next;405slab = LIST_ENTRY(struct pb_slab, list, head);406407/* If totally full remove from the partial slab list */408if (--slab->numFree == 0)409list_delinit(list);410411list = slab->freeBuffers.next;412list_delinit(list);413414mtx_unlock(&mgr->mutex);415buf = LIST_ENTRY(struct pb_slab_buffer, list, head);416417pipe_reference_init(&buf->base.reference, 1);418buf->base.alignment_log2 = util_logbase2(desc->alignment);419buf->base.usage = desc->usage;420421return &buf->base;422}423424425static void426pb_slab_manager_flush(struct pb_manager *_mgr)427{428struct pb_slab_manager *mgr = pb_slab_manager(_mgr);429430assert(mgr->provider->flush);431if(mgr->provider->flush)432mgr->provider->flush(mgr->provider);433}434435436static void437pb_slab_manager_destroy(struct pb_manager *_mgr)438{439struct pb_slab_manager *mgr = pb_slab_manager(_mgr);440441/* TODO: cleanup all allocated buffers */442FREE(mgr);443}444445446struct pb_manager *447pb_slab_manager_create(struct pb_manager *provider,448pb_size bufSize,449pb_size slabSize,450const struct pb_desc *desc)451{452struct pb_slab_manager *mgr;453454mgr = CALLOC_STRUCT(pb_slab_manager);455if (!mgr)456return NULL;457458mgr->base.destroy = pb_slab_manager_destroy;459mgr->base.create_buffer = pb_slab_manager_create_buffer;460mgr->base.flush = pb_slab_manager_flush;461462mgr->provider = provider;463mgr->bufSize = bufSize;464mgr->slabSize = slabSize;465mgr->desc = *desc;466467list_inithead(&mgr->slabs);468469(void) mtx_init(&mgr->mutex, mtx_plain);470471return &mgr->base;472}473474475static struct pb_buffer *476pb_slab_range_manager_create_buffer(struct pb_manager *_mgr,477pb_size size,478const struct pb_desc *desc)479{480struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);481pb_size bufSize;482pb_size reqSize = size;483enum pb_usage_flags i;484485if(desc->alignment > reqSize)486reqSize = desc->alignment;487488bufSize = mgr->minBufSize;489for (i = 0; i < mgr->numBuckets; ++i) {490if(bufSize >= reqSize)491return mgr->buckets[i]->create_buffer(mgr->buckets[i], size, desc);492bufSize *= 2;493}494495/* Fall back to allocate a buffer object directly from the provider. */496return mgr->provider->create_buffer(mgr->provider, size, desc);497}498499500static void501pb_slab_range_manager_flush(struct pb_manager *_mgr)502{503struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);504505/* Individual slabs don't hold any temporary buffers so no need to call them */506507assert(mgr->provider->flush);508if(mgr->provider->flush)509mgr->provider->flush(mgr->provider);510}511512513static void514pb_slab_range_manager_destroy(struct pb_manager *_mgr)515{516struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);517unsigned i;518519for (i = 0; i < mgr->numBuckets; ++i)520mgr->buckets[i]->destroy(mgr->buckets[i]);521FREE(mgr->buckets);522FREE(mgr->bucketSizes);523FREE(mgr);524}525526527struct pb_manager *528pb_slab_range_manager_create(struct pb_manager *provider,529pb_size minBufSize,530pb_size maxBufSize,531pb_size slabSize,532const struct pb_desc *desc)533{534struct pb_slab_range_manager *mgr;535pb_size bufSize;536unsigned i;537538if (!provider)539return NULL;540541mgr = CALLOC_STRUCT(pb_slab_range_manager);542if (!mgr)543goto out_err0;544545mgr->base.destroy = pb_slab_range_manager_destroy;546mgr->base.create_buffer = pb_slab_range_manager_create_buffer;547mgr->base.flush = pb_slab_range_manager_flush;548549mgr->provider = provider;550mgr->minBufSize = minBufSize;551mgr->maxBufSize = maxBufSize;552553mgr->numBuckets = 1;554bufSize = minBufSize;555while(bufSize < maxBufSize) {556bufSize *= 2;557++mgr->numBuckets;558}559560mgr->buckets = CALLOC(mgr->numBuckets, sizeof(*mgr->buckets));561if (!mgr->buckets)562goto out_err1;563564bufSize = minBufSize;565for (i = 0; i < mgr->numBuckets; ++i) {566mgr->buckets[i] = pb_slab_manager_create(provider, bufSize, slabSize, desc);567if(!mgr->buckets[i])568goto out_err2;569bufSize *= 2;570}571572return &mgr->base;573574out_err2:575for (i = 0; i < mgr->numBuckets; ++i)576if(mgr->buckets[i])577mgr->buckets[i]->destroy(mgr->buckets[i]);578FREE(mgr->buckets);579out_err1:580FREE(mgr);581out_err0:582return NULL;583}584585586