/*-1* SPDX-License-Identifier: BSD-2-Clause2*3* Copyright (c) 2002-2019 Jeffrey Roberson <[email protected]>4* Copyright (c) 2004, 2005 Bosko Milekic <[email protected]>5* All rights reserved.6*7* Redistribution and use in source and binary forms, with or without8* modification, are permitted provided that the following conditions9* are met:10* 1. Redistributions of source code must retain the above copyright11* notice unmodified, this list of conditions, and the following12* disclaimer.13* 2. Redistributions in binary form must reproduce the above copyright14* notice, this list of conditions and the following disclaimer in the15* documentation and/or other materials provided with the distribution.16*17* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR18* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES19* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.20* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,21* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT22* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,23* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY24* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT25* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF26* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.27*28*/2930#include <sys/counter.h>31#include <sys/_bitset.h>32#include <sys/_domainset.h>33#include <sys/_task.h>3435/*36* This file includes definitions, structures, prototypes, and inlines that37* should not be used outside of the actual implementation of UMA.38*/3940/*41* The brief summary; Zones describe unique allocation types. Zones are42* organized into per-CPU caches which are filled by buckets. Buckets are43* organized according to memory domains. Buckets are filled from kegs which44* are also organized according to memory domains. Kegs describe a unique45* allocation type, backend memory provider, and layout. Kegs are associated46* with one or more zones and zones reference one or more kegs. Kegs provide47* slabs which are virtually contiguous collections of pages. Each slab is48* broken down int one or more items that will satisfy an individual allocation.49*50* Allocation is satisfied in the following order:51* 1) Per-CPU cache52* 2) Per-domain cache of buckets53* 3) Slab from any of N kegs54* 4) Backend page provider55*56* More detail on individual objects is contained below:57*58* Kegs contain lists of slabs which are stored in either the full bin, empty59* bin, or partially allocated bin, to reduce fragmentation. They also contain60* the user supplied value for size, which is adjusted for alignment purposes61* and rsize is the result of that. The Keg also stores information for62* managing a hash of page addresses that maps pages to uma_slab_t structures63* for pages that don't have embedded uma_slab_t's.64*65* Keg slab lists are organized by memory domain to support NUMA allocation66* policies. By default allocations are spread across domains to reduce the67* potential for hotspots. Special keg creation flags may be specified to68* prefer location allocation. However there is no strict enforcement as frees69* may happen on any CPU and these are returned to the CPU-local cache70* regardless of the originating domain.71*72* The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may73* be allocated off the page from a special slab zone. The free list within a74* slab is managed with a bitmask. For item sizes that would yield more than75* 10% memory waste we potentially allocate a separate uma_slab_t if this will76* improve the number of items per slab that will fit.77*78* The only really gross cases, with regards to memory waste, are for those79* items that are just over half the page size. You can get nearly 50% waste,80* so you fall back to the memory footprint of the power of two allocator. I81* have looked at memory allocation sizes on many of the machines available to82* me, and there does not seem to be an abundance of allocations at this range83* so at this time it may not make sense to optimize for it. This can, of84* course, be solved with dynamic slab sizes.85*86* Kegs may serve multiple Zones but by far most of the time they only serve87* one. When a Zone is created, a Keg is allocated and setup for it. While88* the backing Keg stores slabs, the Zone caches Buckets of items allocated89* from the slabs. Each Zone is equipped with an init/fini and ctor/dtor90* pair, as well as with its own set of small per-CPU caches, layered above91* the Zone's general Bucket cache.92*93* The PCPU caches are protected by critical sections, and may be accessed94* safely only from their associated CPU, while the Zones backed by the same95* Keg all share a common Keg lock (to coalesce contention on the backing96* slabs). The backing Keg typically only serves one Zone but in the case of97* multiple Zones, one of the Zones is considered the Primary Zone and all98* Zone-related stats from the Keg are done in the Primary Zone. For an99* example of a Multi-Zone setup, refer to the Mbuf allocation code.100*/101102/*103* This is the representation for normal (Non OFFPAGE slab)104*105* i == item106* s == slab pointer107*108* <---------------- Page (UMA_SLAB_SIZE) ------------------>109* ___________________________________________________________110* | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ |111* ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||112* ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||113* |___________________________________________________________|114*115*116* This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.117*118* ___________________________________________________________119* | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ |120* ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |121* ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |122* |___________________________________________________________|123* ___________ ^124* |slab header| |125* |___________|---*126*127*/128129#ifndef VM_UMA_INT_H130#define VM_UMA_INT_H131132#define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */133#define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */134#define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */135136/* Max waste percentage before going to off page slab management */137#define UMA_MAX_WASTE 10138139/* Max size of a CACHESPREAD slab. */140#define UMA_CACHESPREAD_MAX_SIZE (128 * 1024)141142/*143* These flags must not overlap with the UMA_ZONE flags specified in uma.h.144*/145#define UMA_ZFLAG_OFFPAGE 0x00200000 /*146* Force the slab structure147* allocation off of the real148* memory.149*/150#define UMA_ZFLAG_HASH 0x00400000 /*151* Use a hash table instead of152* caching information in the153* vm_page.154*/155#define UMA_ZFLAG_VTOSLAB 0x00800000 /*156* Zone uses vtoslab for157* lookup.158*/159#define UMA_ZFLAG_CTORDTOR 0x01000000 /* Zone has ctor/dtor set. */160#define UMA_ZFLAG_LIMIT 0x02000000 /* Zone has limit set. */161#define UMA_ZFLAG_CACHE 0x04000000 /* uma_zcache_create()d it */162#define UMA_ZFLAG_BUCKET 0x10000000 /* Bucket zone. */163#define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */164#define UMA_ZFLAG_TRASH 0x40000000 /* Add trash ctor/dtor. */165166#define UMA_ZFLAG_INHERIT \167(UMA_ZFLAG_OFFPAGE | UMA_ZFLAG_HASH | UMA_ZFLAG_VTOSLAB | \168UMA_ZFLAG_BUCKET | UMA_ZFLAG_INTERNAL)169170#define PRINT_UMA_ZFLAGS "\20" \171"\37TRASH" \172"\36INTERNAL" \173"\35BUCKET" \174"\33CACHE" \175"\32LIMIT" \176"\31CTORDTOR" \177"\30VTOSLAB" \178"\27HASH" \179"\26OFFPAGE" \180"\23SMR" \181"\22ROUNDROBIN" \182"\21FIRSTTOUCH" \183"\20PCPU" \184"\17NODUMP" \185"\16CACHESPREAD" \186"\14MAXBUCKET" \187"\13NOBUCKET" \188"\12SECONDARY" \189"\11NOTPAGE" \190"\10VM" \191"\7MTXCLASS" \192"\6NOFREE" \193"\5MALLOC" \194"\4NOTOUCH" \195"\3CONTIG" \196"\2ZINIT"197198/*199* Hash table for freed address -> slab translation.200*201* Only zones with memory not touchable by the allocator use the202* hash table. Otherwise slabs are found with vtoslab().203*/204#define UMA_HASH_SIZE_INIT 32205206#define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask)207208#define UMA_HASH_INSERT(h, s, mem) \209LIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \210(mem))], slab_tohashslab(s), uhs_hlink)211212#define UMA_HASH_REMOVE(h, s) \213LIST_REMOVE(slab_tohashslab(s), uhs_hlink)214215LIST_HEAD(slabhashhead, uma_hash_slab);216217struct uma_hash {218struct slabhashhead *uh_slab_hash; /* Hash table for slabs */219u_int uh_hashsize; /* Current size of the hash table */220u_int uh_hashmask; /* Mask used during hashing */221};222223/*224* Align field or structure to cache 'sector' in intel terminology. This225* is more efficient with adjacent line prefetch.226*/227#if defined(__amd64__) || defined(__powerpc64__)228#define UMA_SUPER_ALIGN (CACHE_LINE_SIZE * 2)229#else230#define UMA_SUPER_ALIGN CACHE_LINE_SIZE231#endif232233#define UMA_ALIGN __aligned(UMA_SUPER_ALIGN)234235/*236* The uma_bucket structure is used to queue and manage buckets divorced237* from per-cpu caches. They are loaded into uma_cache_bucket structures238* for use.239*/240struct uma_bucket {241STAILQ_ENTRY(uma_bucket) ub_link; /* Link into the zone */242int16_t ub_cnt; /* Count of items in bucket. */243int16_t ub_entries; /* Max items. */244smr_seq_t ub_seq; /* SMR sequence number. */245void *ub_bucket[]; /* actual allocation storage */246};247248typedef struct uma_bucket * uma_bucket_t;249250/*251* The uma_cache_bucket structure is statically allocated on each per-cpu252* cache. Its use reduces branches and cache misses in the fast path.253*/254struct uma_cache_bucket {255uma_bucket_t ucb_bucket;256int16_t ucb_cnt;257int16_t ucb_entries;258uint32_t ucb_spare;259};260261typedef struct uma_cache_bucket * uma_cache_bucket_t;262263/*264* The uma_cache structure is allocated for each cpu for every zone265* type. This optimizes synchronization out of the allocator fast path.266*/267struct uma_cache {268struct uma_cache_bucket uc_freebucket; /* Bucket we're freeing to */269struct uma_cache_bucket uc_allocbucket; /* Bucket to allocate from */270struct uma_cache_bucket uc_crossbucket; /* cross domain bucket */271uint64_t uc_allocs; /* Count of allocations */272uint64_t uc_frees; /* Count of frees */273} UMA_ALIGN;274275typedef struct uma_cache * uma_cache_t;276277LIST_HEAD(slabhead, uma_slab);278279/*280* The cache structure pads perfectly into 64 bytes so we use spare281* bits from the embedded cache buckets to store information from the zone282* and keep all fast-path allocations accessing a single per-cpu line.283*/284static inline void285cache_set_uz_flags(uma_cache_t cache, uint32_t flags)286{287288cache->uc_freebucket.ucb_spare = flags;289}290291static inline void292cache_set_uz_size(uma_cache_t cache, uint32_t size)293{294295cache->uc_allocbucket.ucb_spare = size;296}297298static inline uint32_t299cache_uz_flags(uma_cache_t cache)300{301302return (cache->uc_freebucket.ucb_spare);303}304305static inline uint32_t306cache_uz_size(uma_cache_t cache)307{308309return (cache->uc_allocbucket.ucb_spare);310}311312/*313* Per-domain slab lists. Embedded in the kegs.314*/315struct uma_domain {316struct mtx_padalign ud_lock; /* Lock for the domain lists. */317struct slabhead ud_part_slab; /* partially allocated slabs */318struct slabhead ud_free_slab; /* completely unallocated slabs */319struct slabhead ud_full_slab; /* fully allocated slabs */320uint32_t ud_pages; /* Total page count */321uint32_t ud_free_items; /* Count of items free in all slabs */322uint32_t ud_free_slabs; /* Count of free slabs */323} __aligned(CACHE_LINE_SIZE);324325typedef struct uma_domain * uma_domain_t;326327/*328* Keg management structure329*330* TODO: Optimize for cache line size331*332*/333struct uma_keg {334struct uma_hash uk_hash;335LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */336337struct domainset_ref uk_dr; /* Domain selection policy. */338uint32_t uk_align; /* Alignment mask */339uint32_t uk_reserve; /* Number of reserved items. */340uint32_t uk_size; /* Requested size of each item */341uint32_t uk_rsize; /* Real size of each item */342343uma_init uk_init; /* Keg's init routine */344uma_fini uk_fini; /* Keg's fini routine */345uma_alloc uk_allocf; /* Allocation function */346uma_free uk_freef; /* Free routine */347348u_long uk_offset; /* Next free offset from base KVA */349vm_offset_t uk_kva; /* Zone base KVA */350351uint32_t uk_pgoff; /* Offset to uma_slab struct */352uint16_t uk_ppera; /* pages per allocation from backend */353uint16_t uk_ipers; /* Items per slab */354uint32_t uk_flags; /* Internal flags */355356/* Least used fields go to the last cache line. */357const char *uk_name; /* Name of creating zone. */358LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */359360/* Must be last, variable sized. */361struct uma_domain uk_domain[]; /* Keg's slab lists. */362};363typedef struct uma_keg * uma_keg_t;364365/*366* Free bits per-slab.367*/368#define SLAB_MAX_SETSIZE (PAGE_SIZE / UMA_SMALLEST_UNIT)369#define SLAB_MIN_SETSIZE _BITSET_BITS370BITSET_DEFINE(noslabbits, 0);371372/*373* The slab structure manages a single contiguous allocation from backing374* store and subdivides it into individually allocatable items.375*/376struct uma_slab {377LIST_ENTRY(uma_slab) us_link; /* slabs in zone */378uint16_t us_freecount; /* How many are free? */379uint8_t us_flags; /* Page flags see uma.h */380uint8_t us_domain; /* Backing NUMA domain. */381struct noslabbits us_free; /* Free bitmask, flexible. */382};383_Static_assert(sizeof(struct uma_slab) == __offsetof(struct uma_slab, us_free),384"us_free field must be last");385_Static_assert(MAXMEMDOM < 255,386"us_domain field is not wide enough");387388typedef struct uma_slab * uma_slab_t;389390/*391* Slab structure with a full sized bitset and hash link for both392* HASH and OFFPAGE zones.393*/394struct uma_hash_slab {395LIST_ENTRY(uma_hash_slab) uhs_hlink; /* Link for hash table */396uint8_t *uhs_data; /* First item */397struct uma_slab uhs_slab; /* Must be last. */398};399400typedef struct uma_hash_slab * uma_hash_slab_t;401402static inline uma_hash_slab_t403slab_tohashslab(uma_slab_t slab)404{405406return (__containerof(slab, struct uma_hash_slab, uhs_slab));407}408409static inline void *410slab_data(uma_slab_t slab, uma_keg_t keg)411{412413if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) == 0)414return ((void *)((uintptr_t)slab - keg->uk_pgoff));415else416return (slab_tohashslab(slab)->uhs_data);417}418419static inline void *420slab_item(uma_slab_t slab, uma_keg_t keg, int index)421{422uintptr_t data;423424data = (uintptr_t)slab_data(slab, keg);425return ((void *)(data + keg->uk_rsize * index));426}427428static inline int429slab_item_index(uma_slab_t slab, uma_keg_t keg, void *item)430{431uintptr_t data;432433data = (uintptr_t)slab_data(slab, keg);434return (((uintptr_t)item - data) / keg->uk_rsize);435}436437STAILQ_HEAD(uma_bucketlist, uma_bucket);438439struct uma_zone_domain {440struct uma_bucketlist uzd_buckets; /* full buckets */441uma_bucket_t uzd_cross; /* Fills from cross buckets. */442long uzd_nitems; /* total item count */443long uzd_imax; /* maximum item count this period */444long uzd_imin; /* minimum item count this period */445long uzd_bimin; /* Minimum item count this batch. */446long uzd_wss; /* working set size estimate */447long uzd_limin; /* Longtime minimum item count. */448u_int uzd_timin; /* Time since uzd_limin == 0. */449smr_seq_t uzd_seq; /* Lowest queued seq. */450struct mtx uzd_lock; /* Lock for the domain */451} __aligned(CACHE_LINE_SIZE);452453typedef struct uma_zone_domain * uma_zone_domain_t;454455/*456* Zone structure - per memory type.457*/458struct uma_zone {459/* Offset 0, used in alloc/free fast/medium fast path and const. */460uint32_t uz_flags; /* Flags inherited from kegs */461uint32_t uz_size; /* Size inherited from kegs */462uma_ctor uz_ctor; /* Constructor for each allocation */463uma_dtor uz_dtor; /* Destructor */464smr_t uz_smr; /* Safe memory reclaim context. */465uint64_t uz_max_items; /* Maximum number of items to alloc */466uint64_t uz_bucket_max; /* Maximum bucket cache size */467uint16_t uz_bucket_size; /* Number of items in full bucket */468uint16_t uz_bucket_size_max; /* Maximum number of bucket items */469uint32_t uz_sleepers; /* Threads sleeping on limit */470counter_u64_t uz_xdomain; /* Total number of cross-domain frees */471472/* Offset 64, used in bucket replenish. */473uma_keg_t uz_keg; /* This zone's keg if !CACHE */474uma_import uz_import; /* Import new memory to cache. */475uma_release uz_release; /* Release memory from cache. */476void *uz_arg; /* Import/release argument. */477uma_init uz_init; /* Initializer for each item */478uma_fini uz_fini; /* Finalizer for each item. */479volatile uint64_t uz_items; /* Total items count & sleepers */480uint64_t uz_sleeps; /* Total number of alloc sleeps */481482/* Offset 128 Rare stats, misc read-only. */483LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */484counter_u64_t uz_allocs; /* Total number of allocations */485counter_u64_t uz_frees; /* Total number of frees */486counter_u64_t uz_fails; /* Total number of alloc failures */487const char *uz_name; /* Text name of the zone */488char *uz_ctlname; /* sysctl safe name string. */489int uz_namecnt; /* duplicate name count. */490uint16_t uz_bucket_size_min; /* Min number of items in bucket */491uint16_t uz_reclaimers; /* pending reclaim operations. */492493/* Offset 192, rare read-only. */494struct sysctl_oid *uz_oid; /* sysctl oid pointer. */495const char *uz_warning; /* Warning to print on failure */496struct timeval uz_ratecheck; /* Warnings rate-limiting */497struct task uz_maxaction; /* Task to run when at limit */498499/* Offset 256. */500struct mtx uz_cross_lock; /* Cross domain free lock */501502/*503* This HAS to be the last item because we adjust the zone size504* based on NCPU and then allocate the space for the zones.505*/506struct uma_cache uz_cpu[]; /* Per cpu caches */507508/* domains follow here. */509};510511/*512* Macros for interpreting the uz_items field. 20 bits of sleeper count513* and 44 bit of item count.514*/515#define UZ_ITEMS_SLEEPER_SHIFT 44LL516#define UZ_ITEMS_SLEEPERS_MAX ((1 << (64 - UZ_ITEMS_SLEEPER_SHIFT)) - 1)517#define UZ_ITEMS_COUNT_MASK ((1LL << UZ_ITEMS_SLEEPER_SHIFT) - 1)518#define UZ_ITEMS_COUNT(x) ((x) & UZ_ITEMS_COUNT_MASK)519#define UZ_ITEMS_SLEEPERS(x) ((x) >> UZ_ITEMS_SLEEPER_SHIFT)520#define UZ_ITEMS_SLEEPER (1LL << UZ_ITEMS_SLEEPER_SHIFT)521522#define ZONE_ASSERT_COLD(z) \523KASSERT(uma_zone_get_allocs((z)) == 0, \524("zone %s initialization after use.", (z)->uz_name))525526/* Domains are contiguous after the last CPU */527#define ZDOM_GET(z, n) \528(&((uma_zone_domain_t)&(z)->uz_cpu[mp_maxid + 1])[n])529530#undef UMA_ALIGN531532#ifdef _KERNEL533/* Internal prototypes */534static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);535536/* Lock Macros */537538#define KEG_LOCKPTR(k, d) (struct mtx *)&(k)->uk_domain[(d)].ud_lock539#define KEG_LOCK_INIT(k, d, lc) \540do { \541if ((lc)) \542mtx_init(KEG_LOCKPTR(k, d), (k)->uk_name, \543(k)->uk_name, MTX_DEF | MTX_DUPOK); \544else \545mtx_init(KEG_LOCKPTR(k, d), (k)->uk_name, \546"UMA zone", MTX_DEF | MTX_DUPOK); \547} while (0)548549#define KEG_LOCK_FINI(k, d) mtx_destroy(KEG_LOCKPTR(k, d))550#define KEG_LOCK(k, d) \551({ mtx_lock(KEG_LOCKPTR(k, d)); KEG_LOCKPTR(k, d); })552#define KEG_UNLOCK(k, d) mtx_unlock(KEG_LOCKPTR(k, d))553#define KEG_LOCK_ASSERT(k, d) mtx_assert(KEG_LOCKPTR(k, d), MA_OWNED)554555#define KEG_GET(zone, keg) do { \556(keg) = (zone)->uz_keg; \557KASSERT((void *)(keg) != NULL, \558("%s: Invalid zone %p type", __func__, (zone))); \559} while (0)560561#define KEG_ASSERT_COLD(k) \562KASSERT(uma_keg_get_allocs((k)) == 0, \563("keg %s initialization after use.", (k)->uk_name))564565#define ZDOM_LOCK_INIT(z, zdom, lc) \566do { \567if ((lc)) \568mtx_init(&(zdom)->uzd_lock, (z)->uz_name, \569(z)->uz_name, MTX_DEF | MTX_DUPOK); \570else \571mtx_init(&(zdom)->uzd_lock, (z)->uz_name, \572"UMA zone", MTX_DEF | MTX_DUPOK); \573} while (0)574#define ZDOM_LOCK_FINI(z) mtx_destroy(&(z)->uzd_lock)575#define ZDOM_LOCK_ASSERT(z) mtx_assert(&(z)->uzd_lock, MA_OWNED)576577#define ZDOM_LOCK(z) mtx_lock(&(z)->uzd_lock)578#define ZDOM_OWNED(z) (mtx_owner(&(z)->uzd_lock) != NULL)579#define ZDOM_UNLOCK(z) mtx_unlock(&(z)->uzd_lock)580581#define ZONE_LOCK(z) ZDOM_LOCK(ZDOM_GET((z), 0))582#define ZONE_UNLOCK(z) ZDOM_UNLOCK(ZDOM_GET((z), 0))583#define ZONE_LOCKPTR(z) (&ZDOM_GET((z), 0)->uzd_lock)584585#define ZONE_CROSS_LOCK_INIT(z) \586mtx_init(&(z)->uz_cross_lock, "UMA Cross", NULL, MTX_DEF)587#define ZONE_CROSS_LOCK(z) mtx_lock(&(z)->uz_cross_lock)588#define ZONE_CROSS_UNLOCK(z) mtx_unlock(&(z)->uz_cross_lock)589#define ZONE_CROSS_LOCK_FINI(z) mtx_destroy(&(z)->uz_cross_lock)590591/*592* Find a slab within a hash table. This is used for OFFPAGE zones to lookup593* the slab structure.594*595* Arguments:596* hash The hash table to search.597* data The base page of the item.598*599* Returns:600* A pointer to a slab if successful, else NULL.601*/602static __inline uma_slab_t603hash_sfind(struct uma_hash *hash, uint8_t *data)604{605uma_hash_slab_t slab;606u_int hval;607608hval = UMA_HASH(hash, data);609610LIST_FOREACH(slab, &hash->uh_slab_hash[hval], uhs_hlink) {611if ((uint8_t *)slab->uhs_data == data)612return (&slab->uhs_slab);613}614return (NULL);615}616617static __inline uma_slab_t618vtoslab(vm_offset_t va)619{620vm_page_t p;621622p = PHYS_TO_VM_PAGE(pmap_kextract(va));623return (p->plinks.uma.slab);624}625626static __inline void627vtozoneslab(vm_offset_t va, uma_zone_t *zone, uma_slab_t *slab)628{629vm_page_t p;630631p = PHYS_TO_VM_PAGE(pmap_kextract(va));632*slab = p->plinks.uma.slab;633*zone = p->plinks.uma.zone;634}635636static __inline void637vsetzoneslab(vm_offset_t va, uma_zone_t zone, uma_slab_t slab)638{639vm_page_t p;640641p = PHYS_TO_VM_PAGE(pmap_kextract(va));642p->plinks.uma.slab = slab;643p->plinks.uma.zone = zone;644}645646extern unsigned long uma_kmem_limit;647extern unsigned long uma_kmem_total;648649/* Adjust bytes under management by UMA. */650static inline void651uma_total_dec(unsigned long size)652{653654atomic_subtract_long(&uma_kmem_total, size);655}656657static inline void658uma_total_inc(unsigned long size)659{660661if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit)662uma_reclaim_wakeup();663}664665/*666* The following two functions may be defined by architecture specific code667* if they can provide more efficient allocation functions. This is useful668* for using direct mapped addresses.669*/670void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain,671uint8_t *pflag, int wait);672void uma_small_free(void *mem, vm_size_t size, uint8_t flags);673674/* Set a global soft limit on UMA managed memory. */675void uma_set_limit(unsigned long limit);676677#endif /* _KERNEL */678679#endif /* VM_UMA_INT_H */680681682