Path: blob/21.2-virgl/src/gallium/auxiliary/pipebuffer/pb_slab.c
4566 views
/*1* Copyright 2016 Advanced Micro Devices, Inc.2* All Rights Reserved.3*4* Permission is hereby granted, free of charge, to any person obtaining5* a copy of this software and associated documentation files (the6* "Software"), to deal in the Software without restriction, including7* without limitation the rights to use, copy, modify, merge, publish,8* distribute, sub license, and/or sell copies of the Software, and to9* permit persons to whom the Software is furnished to do so, subject to10* the following conditions:11*12* The above copyright notice and this permission notice (including the13* next paragraph) shall be included in all copies or substantial portions14* of the Software.15*16* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,17* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES18* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND19* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS20* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER21* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,22* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE23* USE OR OTHER DEALINGS IN THE SOFTWARE.24*25*/2627#include "pb_slab.h"2829#include "util/u_math.h"30#include "util/u_memory.h"3132/* All slab allocations from the same heap and with the same size belong33* to the same group.34*/35struct pb_slab_group36{37/* Slabs with allocation candidates. Typically, slabs in this list should38* have some free entries.39*40* However, when the head becomes full we purposefully keep it around41* until the next allocation attempt, at which time we try a reclaim.42* The intention is to keep serving allocations from the same slab as long43* as possible for better locality.44*45* Due to a race in new slab allocation, additional slabs in this list46* can be fully allocated as well.47*/48struct list_head slabs;49};505152static void53pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry)54{55struct pb_slab *slab = entry->slab;5657list_del(&entry->head); /* remove from reclaim list */58list_add(&entry->head, &slab->free);59slab->num_free++;6061/* Add slab to the group's list if it isn't already linked. */62if (!list_is_linked(&slab->head)) {63struct pb_slab_group *group = &slabs->groups[entry->group_index];64list_addtail(&slab->head, &group->slabs);65}6667if (slab->num_free >= slab->num_entries) {68list_del(&slab->head);69slabs->slab_free(slabs->priv, slab);70}71}7273static void74pb_slabs_reclaim_locked(struct pb_slabs *slabs)75{76while (!list_is_empty(&slabs->reclaim)) {77struct pb_slab_entry *entry =78LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head);7980if (!slabs->can_reclaim(slabs->priv, entry))81break;8283pb_slab_reclaim(slabs, entry);84}85}8687/* Allocate a slab entry of the given size from the given heap.88*89* This will try to re-use entries that have previously been freed. However,90* if no entries are free (or all free entries are still "in flight" as91* determined by the can_reclaim fallback function), a new slab will be92* requested via the slab_alloc callback.93*94* Note that slab_free can also be called by this function.95*/96struct pb_slab_entry *97pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)98{99unsigned order = MAX2(slabs->min_order, util_logbase2_ceil(size));100unsigned group_index;101struct pb_slab_group *group;102struct pb_slab *slab;103struct pb_slab_entry *entry;104unsigned entry_size = 1 << order;105bool three_fourths = false;106107/* If the size is <= 3/4 of the entry size, use a slab with entries using108* 3/4 sizes to reduce overallocation.109*/110if (slabs->allow_three_fourths_allocations && size <= entry_size * 3 / 4) {111entry_size = entry_size * 3 / 4;112three_fourths = true;113}114115assert(order < slabs->min_order + slabs->num_orders);116assert(heap < slabs->num_heaps);117118group_index = (heap * slabs->num_orders + (order - slabs->min_order)) *119(1 + slabs->allow_three_fourths_allocations) + three_fourths;120group = &slabs->groups[group_index];121122mtx_lock(&slabs->mutex);123124/* If there is no candidate slab at all, or the first slab has no free125* entries, try reclaiming entries.126*/127if (list_is_empty(&group->slabs) ||128list_is_empty(&LIST_ENTRY(struct pb_slab, group->slabs.next, head)->free))129pb_slabs_reclaim_locked(slabs);130131/* Remove slabs without free entries. */132while (!list_is_empty(&group->slabs)) {133slab = LIST_ENTRY(struct pb_slab, group->slabs.next, head);134if (!list_is_empty(&slab->free))135break;136137list_del(&slab->head);138}139140if (list_is_empty(&group->slabs)) {141/* Drop the mutex temporarily to prevent a deadlock where the allocation142* calls back into slab functions (most likely to happen for143* pb_slab_reclaim if memory is low).144*145* There's a chance that racing threads will end up allocating multiple146* slabs for the same group, but that doesn't hurt correctness.147*/148mtx_unlock(&slabs->mutex);149slab = slabs->slab_alloc(slabs->priv, heap, entry_size, group_index);150if (!slab)151return NULL;152mtx_lock(&slabs->mutex);153154list_add(&slab->head, &group->slabs);155}156157entry = LIST_ENTRY(struct pb_slab_entry, slab->free.next, head);158list_del(&entry->head);159slab->num_free--;160161mtx_unlock(&slabs->mutex);162163return entry;164}165166/* Free the given slab entry.167*168* The entry may still be in use e.g. by in-flight command submissions. The169* can_reclaim callback function will be called to determine whether the entry170* can be handed out again by pb_slab_alloc.171*/172void173pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry)174{175mtx_lock(&slabs->mutex);176list_addtail(&entry->head, &slabs->reclaim);177mtx_unlock(&slabs->mutex);178}179180/* Check if any of the entries handed to pb_slab_free are ready to be re-used.181*182* This may end up freeing some slabs and is therefore useful to try to reclaim183* some no longer used memory. However, calling this function is not strictly184* required since pb_slab_alloc will eventually do the same thing.185*/186void187pb_slabs_reclaim(struct pb_slabs *slabs)188{189mtx_lock(&slabs->mutex);190pb_slabs_reclaim_locked(slabs);191mtx_unlock(&slabs->mutex);192}193194/* Initialize the slabs manager.195*196* The minimum and maximum size of slab entries are 2^min_order and197* 2^max_order, respectively.198*199* priv will be passed to the given callback functions.200*/201bool202pb_slabs_init(struct pb_slabs *slabs,203unsigned min_order, unsigned max_order,204unsigned num_heaps, bool allow_three_fourth_allocations,205void *priv,206slab_can_reclaim_fn *can_reclaim,207slab_alloc_fn *slab_alloc,208slab_free_fn *slab_free)209{210unsigned num_groups;211unsigned i;212213assert(min_order <= max_order);214assert(max_order < sizeof(unsigned) * 8 - 1);215216slabs->min_order = min_order;217slabs->num_orders = max_order - min_order + 1;218slabs->num_heaps = num_heaps;219slabs->allow_three_fourths_allocations = allow_three_fourth_allocations;220221slabs->priv = priv;222slabs->can_reclaim = can_reclaim;223slabs->slab_alloc = slab_alloc;224slabs->slab_free = slab_free;225226list_inithead(&slabs->reclaim);227228num_groups = slabs->num_orders * slabs->num_heaps *229(1 + allow_three_fourth_allocations);230slabs->groups = CALLOC(num_groups, sizeof(*slabs->groups));231if (!slabs->groups)232return false;233234for (i = 0; i < num_groups; ++i) {235struct pb_slab_group *group = &slabs->groups[i];236list_inithead(&group->slabs);237}238239(void) mtx_init(&slabs->mutex, mtx_plain);240241return true;242}243244/* Shutdown the slab manager.245*246* This will free all allocated slabs and internal structures, even if some247* of the slab entries are still in flight (i.e. if can_reclaim would return248* false).249*/250void251pb_slabs_deinit(struct pb_slabs *slabs)252{253/* Reclaim all slab entries (even those that are still in flight). This254* implicitly calls slab_free for everything.255*/256while (!list_is_empty(&slabs->reclaim)) {257struct pb_slab_entry *entry =258LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head);259pb_slab_reclaim(slabs, entry);260}261262FREE(slabs->groups);263mtx_destroy(&slabs->mutex);264}265266267