//! A very simple, uniformly-typed slab arena that supports deallocation and1//! reusing deallocated entries' space.2//!3//! > **⚠️ Warning ⚠️**: this crate is an internal-only crate for the Wasmtime4//! > project and is not intended for general use. APIs are not strictly5//! > reviewed for safety and usage outside of Wasmtime may have bugs. If6//! > you're interested in using this feel free to file an issue on the7//! > Wasmtime repository to start a discussion about doing so, but otherwise8//! > be aware that your usage of this crate is not supported.9//!10//! The free list of vacant entries in the slab are stored inline in the slab's11//! existing storage.12//!13//! # Example14//!15//! ```16//! use wasmtime_internal_slab::{Id, Slab};17//!18//! let mut slab = Slab::new();19//!20//! // Insert some values into the slab.21//! let rza = slab.alloc("Robert Fitzgerald Diggs");22//! let gza = slab.alloc("Gary Grice");23//! let bill = slab.alloc("Bill Gates");24//!25//! // Allocated elements can be accessed infallibly via indexing (and missing and26//! // deallocated entries will panic).27//! assert_eq!(slab[rza], "Robert Fitzgerald Diggs");28//!29//! // Alternatively, the `get` and `get_mut` methods provide fallible lookup.30//! if let Some(genius) = slab.get(gza) {31//! println!("The gza gza genius: {}", genius);32//! }33//! if let Some(val) = slab.get_mut(bill) {34//! *val = "Bill Gates doesn't belong in this set...";35//! }36//!37//! // We can remove values from the slab.38//! slab.dealloc(bill);39//!40//! // Allocate a new entry.41//! let bill = slab.alloc("Bill Murray");42//! ```43//!44//! # Using `Id`s with the Wrong `Slab`45//!46//! `Slab` does NOT check that `Id`s used to access previously-allocated values47//! came from the current `Slab` instance (as opposed to a different `Slab`48//! instance). Using `Id`s from a different `Slab` is safe, but will yield an49//! unrelated value, if any at all.50//!51//! If you desire checking that an `Id` came from the correct `Slab` instance,52//! it should be easy to layer that functionality on top of this crate by53//! wrapping `Slab` and `Id` in types that additionally maintain a slab instance54//! identifier.55//!56//! # The ABA Problem57//!58//! This `Slab` type does NOT protect against ABA bugs, such as the following59//! sequence:60//!61//! * Value `A` is allocated into the slab, yielding id `i`.62//!63//! * `A` is deallocated, and so `i`'s associated entry is added to the slab's64//! free list.65//!66//! * Value `B` is allocated into the slab, reusing `i`'s associated entry,67//! yielding id `i`.68//!69//! * The "original" id `i` is used to access the arena, expecting the70//! deallocated value `A`, but getting the new value `B`.71//!72//! That is, it does not detect and prevent against the memory-safe version of73//! use-after-free bugs.74//!75//! If you need to protect against ABA bugs, it should be easy to layer that76//! functionality on top of this crate by wrapping `Slab` with something like77//! the following:78//!79//! ```rust80//! pub struct GenerationalId {81//! id: wasmtime_internal_slab::Id,82//! generation: u32,83//! }84//!85//! struct GenerationalEntry<T> {86//! value: T,87//! generation: u32,88//! }89//!90//! pub struct GenerationalSlab<T> {91//! slab: wasmtime_internal_slab::Slab<GenerationalEntry<T>>,92//! generation: u32,93//! }94//!95//! impl<T> GenerationalSlab<T> {96//! pub fn alloc(&mut self, value: T) -> GenerationalId {97//! let generation = self.generation;98//! let id = self.slab.alloc(GenerationalEntry { value, generation });99//! GenerationalId { id, generation }100//! }101//!102//! pub fn get(&self, id: GenerationalId) -> Option<&T> {103//! let entry = self.slab.get(id.id)?;104//!105//! // Check that the entry's generation matches the id's generation,106//! // else we have an ABA bug. (Alternatively, return `None` instead107//! // of panicking.)108//! assert_eq!(id.generation, entry.generation);109//!110//! Some(&entry.value)111//! }112//!113//! pub fn dealloc(&mut self, id: GenerationalId) {114//! // Check that the entry's generation matches the id's generation,115//! // else we have an ABA bug. (Alternatively, silently return on116//! // double-free instead of panicking.)117//! assert_eq!(id.generation, self.slab[id.id].generation);118//!119//! self.slab.dealloc(id.id);120//!121//! // Increment our generation whenever we deallocate so that any new122//! // value placed in this same entry will have a different generation123//! // and we can detect ABA bugs.124//! self.generation += 1;125//! }126//! }127//! ```128129#![no_std]130#![forbid(unsafe_code)]131#![deny(missing_docs, missing_debug_implementations)]132133extern crate alloc;134135use alloc::vec::Vec;136use core::fmt;137use core::num::NonZeroU32;138139/// An identifier for an allocated value inside a `slab`.140#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]141#[repr(transparent)]142pub struct Id(EntryIndex);143144impl fmt::Debug for Id {145fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {146f.debug_tuple("Id").field(&self.0.index()).finish()147}148}149150impl Id {151/// Get the raw underlying representation of this `Id`.152#[inline]153pub fn into_raw(self) -> u32 {154u32::try_from(self.0.index()).unwrap()155}156157/// Construct an `Id` from its raw underlying representation.158///159/// `raw` should be a value that was previously created via160/// `Id::into_raw`. May panic if given arbitrary values.161#[inline]162pub fn from_raw(raw: u32) -> Self {163let raw = usize::try_from(raw).unwrap();164Self(EntryIndex::new(raw))165}166}167168/// A simple, uni-typed slab arena.169pub struct Slab<T> {170/// The slab's entries, each is either occupied and holding a `T` or vacant171/// and is a link the free list.172entries: Vec<Entry<T>>,173174/// The index of the first free entry in the free list.175free: Option<EntryIndex>,176177/// The number of occupied entries is this slab.178len: u32,179}180181impl<T> fmt::Debug for Slab<T>182where183T: fmt::Debug,184{185fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {186f.debug_map().entries(self.iter()).finish()187}188}189190enum Entry<T> {191/// An occupied entry holding a `T`.192Occupied(T),193194/// A vacant entry.195Free {196/// A link in the slab's free list, pointing to the next free entry, if197/// any.198next_free: Option<EntryIndex>,199},200}201202#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]203#[repr(transparent)]204struct EntryIndex(NonZeroU32);205206impl EntryIndex {207#[inline]208fn new(index: usize) -> Self {209assert!(index <= Slab::<()>::MAX_CAPACITY);210let x = u32::try_from(index + 1).unwrap();211Self(NonZeroU32::new(x).unwrap())212}213214#[inline]215fn index(&self) -> usize {216let index = self.0.get() - 1;217usize::try_from(index).unwrap()218}219}220221impl<T> Default for Slab<T> {222#[inline]223fn default() -> Self {224Self {225entries: Vec::default(),226free: None,227len: 0,228}229}230}231232impl<T> core::ops::Index<Id> for Slab<T> {233type Output = T;234235#[inline]236fn index(&self, id: Id) -> &Self::Output {237self.get(id)238.expect("id from different slab or value was deallocated")239}240}241242impl<T> core::ops::IndexMut<Id> for Slab<T> {243#[inline]244fn index_mut(&mut self, id: Id) -> &mut Self::Output {245self.get_mut(id)246.expect("id from different slab or value was deallocated")247}248}249250impl<T> Slab<T> {251/// The maximum capacity any `Slab` can have: `u32::MAX - 1`.252pub const MAX_CAPACITY: usize = (u32::MAX - 1) as usize;253254/// Construct a new, empty slab.255#[inline]256pub fn new() -> Self {257Slab::default()258}259260/// Construct a new, empty slab, pre-reserving space for at least `capacity`261/// elements.262#[inline]263pub fn with_capacity(capacity: usize) -> Self {264let mut slab = Self::new();265slab.reserve(capacity);266slab267}268269/// Ensure that there is space for at least `additional` elements in this270/// slab.271///272/// # Panics273///274/// Panics if the new capacity exceeds `Self::MAX_CAPACITY`.275pub fn reserve(&mut self, additional: usize) {276let cap = self.capacity();277let len = self.len();278assert!(cap >= len);279if cap - len >= additional {280// Already have `additional` capacity available.281return;282}283284self.entries.reserve(additional);285286// Maintain the invariant that `i <= MAX_CAPACITY` for all indices `i`287// in `self.entries`.288assert!(self.entries.capacity() <= Self::MAX_CAPACITY);289}290291fn double_capacity(&mut self) {292// Double our capacity to amortize the cost of resizing. But make sure293// we add some amount of minimum additional capacity, since doubling294// zero capacity isn't useful.295const MIN_CAPACITY: usize = 16;296let additional = core::cmp::max(self.entries.capacity(), MIN_CAPACITY);297self.reserve(additional);298}299300/// What is the capacity of this slab? That is, how many entries can it301/// contain within its current underlying storage?302#[inline]303pub fn capacity(&self) -> usize {304self.entries.capacity()305}306307/// How many values are currently allocated within this slab?308#[inline]309pub fn len(&self) -> usize {310usize::try_from(self.len).unwrap()311}312313/// Are there zero allocated values within this slab?314#[inline]315pub fn is_empty(&self) -> bool {316self.len() == 0317}318319/// Try to allocate a `T` value within this slab.320///321/// If there is no available capacity, ownership of the given value is322/// returned via `Err(value)`.323#[inline]324pub fn try_alloc(&mut self, value: T) -> Result<Id, T> {325if let Some(index) = self.try_alloc_index() {326let next_free = match self.entries[index.index()] {327Entry::Free { next_free } => next_free,328Entry::Occupied { .. } => unreachable!(),329};330self.free = next_free;331self.entries[index.index()] = Entry::Occupied(value);332self.len += 1;333Ok(Id(index))334} else {335Err(value)336}337}338339#[inline]340fn try_alloc_index(&mut self) -> Option<EntryIndex> {341self.free.take().or_else(|| {342if self.entries.len() < self.entries.capacity() {343let index = EntryIndex::new(self.entries.len());344self.entries.push(Entry::Free { next_free: None });345Some(index)346} else {347None348}349})350}351352/// Allocate a `T` value within this slab, allocating additional underlying353/// storage if there is no available capacity.354///355/// # Panics356///357/// Panics if allocating this value requires reallocating the underlying358/// storage, and the new capacity exceeds `Slab::MAX_CAPACITY`.359#[inline]360pub fn alloc(&mut self, value: T) -> Id {361self.try_alloc(value)362.unwrap_or_else(|value| self.alloc_slow(value))363}364365/// Get the `Id` that will be returned for the next allocation in this slab.366#[inline]367pub fn next_id(&self) -> Id {368let index = self.free.unwrap_or_else(|| EntryIndex::new(self.len()));369Id(index)370}371372#[inline(never)]373#[cold]374fn alloc_slow(&mut self, value: T) -> Id {375// Reserve additional capacity, since we didn't have space for the376// allocation.377self.double_capacity();378// After which the allocation will succeed.379self.try_alloc(value).ok().unwrap()380}381382/// Get a shared borrow of the value associated with `id`.383///384/// Returns `None` if the value has since been deallocated.385///386/// If `id` comes from a different `Slab` instance, this method may panic,387/// return `None`, or return an arbitrary value.388#[inline]389pub fn get(&self, id: Id) -> Option<&T> {390match self391.entries392.get(id.0.index())393.expect("id from different slab")394{395Entry::Occupied(x) => Some(x),396Entry::Free { .. } => None,397}398}399400/// Get an exclusive borrow of the value associated with `id`.401///402/// Returns `None` if the value has since been deallocated.403///404/// If `id` comes from a different `Slab` instance, this method may panic,405/// return `None`, or return an arbitrary value.406#[inline]407pub fn get_mut(&mut self, id: Id) -> Option<&mut T> {408match self409.entries410.get_mut(id.0.index())411.expect("id from different slab")412{413Entry::Occupied(x) => Some(x),414Entry::Free { .. } => None,415}416}417418/// Does this slab contain an allocated value for `id`?419#[inline]420pub fn contains(&self, id: Id) -> bool {421match self.entries.get(id.0.index()) {422Some(Entry::Occupied(_)) => true,423None | Some(Entry::Free { .. }) => false,424}425}426427/// Deallocate the value associated with the given `id`.428///429/// If `id` comes from a different `Slab` instance, this method may panic or430/// deallocate an arbitrary value.431#[inline]432pub fn dealloc(&mut self, id: Id) -> T {433let entry = core::mem::replace(434self.entries435.get_mut(id.0.index())436.expect("id from a different slab"),437Entry::Free { next_free: None },438);439match entry {440Entry::Free { .. } => panic!("attempt to deallocate an entry that is already vacant"),441Entry::Occupied(value) => {442let next_free = core::mem::replace(&mut self.free, Some(id.0));443self.entries[id.0.index()] = Entry::Free { next_free };444self.len -= 1;445value446}447}448}449450/// Iterate over all values currently allocated within this `Slab`.451///452/// Yields pairs of an `Id` and the `Id`'s associated value.453///454/// Iteration order is undefined.455#[inline]456pub fn iter(&self) -> impl Iterator<Item = (Id, &T)> + '_ {457assert!(self.entries.len() <= Self::MAX_CAPACITY);458self.entries459.iter()460.enumerate()461.filter_map(|(i, e)| match e {462Entry::Occupied(x) => Some((Id(EntryIndex::new(i)), x)),463Entry::Free { .. } => None,464})465}466467/// Mutably iterate over all values currently allocated within this `Slab`.468///469/// Yields pairs of an `Id` and the `Id`'s associated value.470///471/// Iteration order is undefined.472#[inline]473pub fn iter_mut(&mut self) -> impl Iterator<Item = (Id, &mut T)> + '_ {474assert!(self.entries.len() <= Self::MAX_CAPACITY);475self.entries476.iter_mut()477.enumerate()478.filter_map(|(i, e)| match e {479Entry::Occupied(x) => Some((Id(EntryIndex::new(i)), x)),480Entry::Free { .. } => None,481})482}483484/// Iterate over and remove all entries in this slab.485///486/// The slab will be empty after calling this method.487///488/// Yields pairs of an `Id` and the `Id`'s associated value.489///490/// Iteration order is undefined.491#[inline]492pub fn drain(&mut self) -> impl Iterator<Item = (Id, T)> + '_ {493assert!(self.entries.len() <= Self::MAX_CAPACITY);494self.len = 0;495self.free = None;496self.entries497.drain(..)498.enumerate()499.filter_map(|(i, e)| match e {500Entry::Occupied(x) => Some((Id(EntryIndex::new(i)), x)),501Entry::Free { .. } => None,502})503}504}505506507