Path: blob/21.2-virgl/src/gallium/auxiliary/pipebuffer/pb_slab.h
4565 views
/*1* Copyright 2016 Advanced Micro Devices, Inc.2* All Rights Reserved.3*4* Permission is hereby granted, free of charge, to any person obtaining5* a copy of this software and associated documentation files (the6* "Software"), to deal in the Software without restriction, including7* without limitation the rights to use, copy, modify, merge, publish,8* distribute, sub license, and/or sell copies of the Software, and to9* permit persons to whom the Software is furnished to do so, subject to10* the following conditions:11*12* The above copyright notice and this permission notice (including the13* next paragraph) shall be included in all copies or substantial portions14* of the Software.15*16* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,17* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES18* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND19* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS20* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER21* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,22* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE23* USE OR OTHER DEALINGS IN THE SOFTWARE.24*25*/2627/**28* \file29*30* Helper library for carving out smaller allocations (called "(slab) entries")31* from larger buffers (called "slabs").32*33* The library supports maintaining separate heaps (e.g. VRAM vs. GTT). The34* meaning of each heap is treated as opaque by this library.35*36* The library allows delaying the re-use of an entry, i.e. an entry may be37* freed by calling \ref pb_slab_free even while the corresponding buffer38* region is still in use by the GPU. A callback function is called to39* determine when it is safe to allocate the entry again; the user of this40* library is expected to maintain the required fences or similar.41*/4243#ifndef PB_SLAB_H44#define PB_SLAB_H4546#include "pb_buffer.h"47#include "util/list.h"48#include "os/os_thread.h"4950struct pb_slab;51struct pb_slabs;52struct pb_slab_group;5354/* Descriptor of a slab entry.55*56* The user of this utility library is expected to embed this in a larger57* structure that describes a buffer object.58*/59struct pb_slab_entry60{61struct list_head head;62struct pb_slab *slab; /* the slab that contains this buffer */63unsigned group_index; /* index into pb_slabs::groups */64unsigned entry_size;65};6667/* Descriptor of a slab from which many entries are carved out.68*69* The user of this utility library is expected to embed this in a larger70* structure that describes a buffer object.71*/72struct pb_slab73{74struct list_head head;7576struct list_head free; /* list of free pb_slab_entry structures */77unsigned num_free; /* number of entries in free list */78unsigned num_entries; /* total number of entries */79};8081/* Callback function that is called when a new slab needs to be allocated82* for fulfilling allocation requests of the given size from the given heap.83*84* The callback must allocate a pb_slab structure and the desired number85* of entries. All entries that belong to the slab must be added to the free86* list. Entries' pb_slab_entry structures must be initialized with the given87* group_index.88*89* The callback may call pb_slab functions.90*/91typedef struct pb_slab *(slab_alloc_fn)(void *priv,92unsigned heap,93unsigned entry_size,94unsigned group_index);9596/* Callback function that is called when all entries of a slab have been freed.97*98* The callback must free the slab and all its entries. It must not call any of99* the pb_slab functions, or a deadlock (recursive mutex lock) may occur.100*/101typedef void (slab_free_fn)(void *priv, struct pb_slab *);102103/* Callback function to determine whether a given entry can already be reused.104*/105typedef bool (slab_can_reclaim_fn)(void *priv, struct pb_slab_entry *);106107/* Manager of slab allocations. The user of this utility library should embed108* this in a structure somewhere and call pb_slab_init/deinit at init/shutdown109* time.110*/111struct pb_slabs112{113mtx_t mutex;114115unsigned min_order;116unsigned num_orders;117unsigned num_heaps;118bool allow_three_fourths_allocations;119120/* One group per (heap, order, three_fourth_allocations). */121struct pb_slab_group *groups;122123/* List of entries waiting to be reclaimed, i.e. they have been passed to124* pb_slab_free, but may not be safe for re-use yet. The tail points at125* the most-recently freed entry.126*/127struct list_head reclaim;128129void *priv;130slab_can_reclaim_fn *can_reclaim;131slab_alloc_fn *slab_alloc;132slab_free_fn *slab_free;133};134135struct pb_slab_entry *136pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap);137138void139pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry);140141void142pb_slabs_reclaim(struct pb_slabs *slabs);143144bool145pb_slabs_init(struct pb_slabs *slabs,146unsigned min_order, unsigned max_order,147unsigned num_heaps, bool allow_three_fourth_allocations,148void *priv,149slab_can_reclaim_fn *can_reclaim,150slab_alloc_fn *slab_alloc,151slab_free_fn *slab_free);152153void154pb_slabs_deinit(struct pb_slabs *slabs);155156#endif157158159