//! A very simple, uniformly-typed slab arena that supports deallocation and1//! reusing deallocated entries' space.2//!3//! > **⚠️ Warning ⚠️**: this crate is an internal-only crate for the Wasmtime4//! > project and is not intended for general use. APIs are not strictly5//! > reviewed for safety and usage outside of Wasmtime may have bugs. If6//! > you're interested in using this feel free to file an issue on the7//! > Wasmtime repository to start a discussion about doing so, but otherwise8//! > be aware that your usage of this crate is not supported.9//!10//! The free list of vacant entries in the slab are stored inline in the slab's11//! existing storage.12//!13//! # Example14//!15//! ```16//! # fn foo() -> wasmtime_internal_core::error::Result<()> {17//! use wasmtime_internal_core::slab::Slab;18//!19//! let mut slab = Slab::new();20//!21//! // Insert some values into the slab.22//! let rza = slab.alloc("Robert Fitzgerald Diggs")?;23//! let gza = slab.alloc("Gary Grice")?;24//! let bill = slab.alloc("Bill Gates")?;25//!26//! // Allocated elements can be accessed infallibly via indexing (and missing and27//! // deallocated entries will panic).28//! assert_eq!(slab[rza], "Robert Fitzgerald Diggs");29//!30//! // Alternatively, the `get` and `get_mut` methods provide fallible lookup.31//! if let Some(genius) = slab.get(gza) {32//! println!("The gza gza genius: {}", genius);33//! }34//! if let Some(val) = slab.get_mut(bill) {35//! *val = "Bill Gates doesn't belong in this set...";36//! }37//!38//! // We can remove values from the slab.39//! slab.dealloc(bill);40//!41//! // Allocate a new entry.42//! let bill = slab.alloc("Bill Murray")?;43//! # Ok(())44//! # }45//! # foo().unwrap();46//! ```47//!48//! # Using `Id`s with the Wrong `Slab`49//!50//! `Slab` does NOT check that `Id`s used to access previously-allocated values51//! came from the current `Slab` instance (as opposed to a different `Slab`52//! instance). Using `Id`s from a different `Slab` is safe, but will yield an53//! unrelated value, if any at all.54//!55//! If you desire checking that an `Id` came from the correct `Slab` instance,56//! it should be easy to layer that functionality on top of this crate by57//! wrapping `Slab` and `Id` in types that additionally maintain a slab instance58//! identifier.59//!60//! # The ABA Problem61//!62//! This `Slab` type does NOT protect against ABA bugs, such as the following63//! sequence:64//!65//! * Value `A` is allocated into the slab, yielding id `i`.66//!67//! * `A` is deallocated, and so `i`'s associated entry is added to the slab's68//! free list.69//!70//! * Value `B` is allocated into the slab, reusing `i`'s associated entry,71//! yielding id `i`.72//!73//! * The "original" id `i` is used to access the arena, expecting the74//! deallocated value `A`, but getting the new value `B`.75//!76//! That is, it does not detect and prevent against the memory-safe version of77//! use-after-free bugs.78//!79//! If you need to protect against ABA bugs, it should be easy to layer that80//! functionality on top of this crate by wrapping `Slab` with something like81//! the following:82//!83//! ```rust84//! use wasmtime_internal_core::error::OutOfMemory;85//!86//! pub struct GenerationalId {87//! id: wasmtime_internal_core::slab::Id,88//! generation: u32,89//! }90//!91//! struct GenerationalEntry<T> {92//! value: T,93//! generation: u32,94//! }95//!96//! pub struct GenerationalSlab<T> {97//! slab: wasmtime_internal_core::slab::Slab<GenerationalEntry<T>>,98//! generation: u32,99//! }100//!101//! impl<T> GenerationalSlab<T> {102//! pub fn alloc(&mut self, value: T) -> Result<GenerationalId, OutOfMemory> {103//! let generation = self.generation;104//! let id = self.slab.alloc(GenerationalEntry { value, generation })?;105//! Ok(GenerationalId { id, generation })106//! }107//!108//! pub fn get(&self, id: GenerationalId) -> Option<&T> {109//! let entry = self.slab.get(id.id)?;110//!111//! // Check that the entry's generation matches the id's generation,112//! // else we have an ABA bug. (Alternatively, return `None` instead113//! // of panicking.)114//! assert_eq!(id.generation, entry.generation);115//!116//! Some(&entry.value)117//! }118//!119//! pub fn dealloc(&mut self, id: GenerationalId) {120//! // Check that the entry's generation matches the id's generation,121//! // else we have an ABA bug. (Alternatively, silently return on122//! // double-free instead of panicking.)123//! assert_eq!(id.generation, self.slab[id.id].generation);124//!125//! self.slab.dealloc(id.id);126//!127//! // Increment our generation whenever we deallocate so that any new128//! // value placed in this same entry will have a different generation129//! // and we can detect ABA bugs.130//! self.generation += 1;131//! }132//! }133//! ```134135use crate::alloc::Vec;136use crate::error::OutOfMemory;137use core::fmt;138use core::num::NonZeroU32;139140/// An identifier for an allocated value inside a `slab`.141#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]142#[repr(transparent)]143pub struct Id(EntryIndex);144145impl fmt::Debug for Id {146fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {147f.debug_tuple("Id").field(&self.0.index()).finish()148}149}150151impl Id {152/// Get the raw underlying representation of this `Id`.153#[inline]154pub fn into_raw(self) -> u32 {155u32::try_from(self.0.index()).unwrap()156}157158/// Construct an `Id` from its raw underlying representation.159///160/// `raw` should be a value that was previously created via161/// `Id::into_raw`. May panic if given arbitrary values.162#[inline]163pub fn from_raw(raw: u32) -> Self {164let raw = usize::try_from(raw).unwrap();165Self(EntryIndex::new(raw))166}167}168169/// A simple, uni-typed slab arena.170pub struct Slab<T> {171/// The slab's entries, each is either occupied and holding a `T` or vacant172/// and is a link the free list.173entries: Vec<Entry<T>>,174175/// The index of the first free entry in the free list.176free: Option<EntryIndex>,177178/// The number of occupied entries is this slab.179len: u32,180}181182impl<T> fmt::Debug for Slab<T>183where184T: fmt::Debug,185{186fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {187f.debug_map().entries(self.iter()).finish()188}189}190191enum Entry<T> {192/// An occupied entry holding a `T`.193Occupied(T),194195/// A vacant entry.196Free {197/// A link in the slab's free list, pointing to the next free entry, if198/// any.199next_free: Option<EntryIndex>,200},201}202203#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]204#[repr(transparent)]205struct EntryIndex(NonZeroU32);206207impl EntryIndex {208#[inline]209fn new(index: usize) -> Self {210assert!(index <= Slab::<()>::MAX_CAPACITY);211let x = u32::try_from(index + 1).unwrap();212Self(NonZeroU32::new(x).unwrap())213}214215#[inline]216fn index(&self) -> usize {217let index = self.0.get() - 1;218usize::try_from(index).unwrap()219}220}221222impl<T> Default for Slab<T> {223#[inline]224fn default() -> Self {225Self {226entries: Vec::default(),227free: None,228len: 0,229}230}231}232233impl<T> core::ops::Index<Id> for Slab<T> {234type Output = T;235236#[inline]237fn index(&self, id: Id) -> &Self::Output {238self.get(id)239.expect("id from different slab or value was deallocated")240}241}242243impl<T> core::ops::IndexMut<Id> for Slab<T> {244#[inline]245fn index_mut(&mut self, id: Id) -> &mut Self::Output {246self.get_mut(id)247.expect("id from different slab or value was deallocated")248}249}250251impl<T> Slab<T> {252/// The maximum capacity any `Slab` can have: `u32::MAX - 1`.253pub const MAX_CAPACITY: usize = (u32::MAX - 1) as usize;254255/// Construct a new, empty slab.256#[inline]257pub fn new() -> Self {258Slab::default()259}260261/// Construct a new, empty slab, pre-reserving space for at least `capacity`262/// elements.263#[inline]264pub fn with_capacity(capacity: usize) -> Result<Self, OutOfMemory> {265let mut slab = Self::new();266slab.reserve(capacity)?;267Ok(slab)268}269270/// Ensure that there is space for at least `additional` elements in this271/// slab.272///273/// # Panics274///275/// Panics if the new capacity exceeds `Self::MAX_CAPACITY`.276pub fn reserve(&mut self, additional: usize) -> Result<(), OutOfMemory> {277let cap = self.capacity();278let len = self.len();279assert!(cap >= len);280if cap - len >= additional {281// Already have `additional` capacity available.282return Ok(());283}284285self.entries.reserve(additional)?;286287// Maintain the invariant that `i <= MAX_CAPACITY` for all indices `i`288// in `self.entries`.289assert!(self.entries.capacity() <= Self::MAX_CAPACITY);290291Ok(())292}293294fn double_capacity(&mut self) -> Result<(), OutOfMemory> {295// Double our capacity to amortize the cost of resizing. But make sure296// we add some amount of minimum additional capacity, since doubling297// zero capacity isn't useful.298const MIN_CAPACITY: usize = 16;299let additional = core::cmp::max(self.entries.capacity(), MIN_CAPACITY);300self.reserve(additional)301}302303/// What is the capacity of this slab? That is, how many entries can it304/// contain within its current underlying storage?305#[inline]306pub fn capacity(&self) -> usize {307self.entries.capacity()308}309310/// How many values are currently allocated within this slab?311#[inline]312pub fn len(&self) -> usize {313usize::try_from(self.len).unwrap()314}315316/// Are there zero allocated values within this slab?317#[inline]318pub fn is_empty(&self) -> bool {319self.len() == 0320}321322/// Try to allocate a `T` value within this slab.323///324/// If there is no available capacity, ownership of the given value is325/// returned via `Err(value)`.326#[inline]327pub fn try_alloc(&mut self, value: T) -> Result<Id, T> {328if let Some(index) = self.try_alloc_index() {329let next_free = match self.entries[index.index()] {330Entry::Free { next_free } => next_free,331Entry::Occupied { .. } => unreachable!(),332};333self.free = next_free;334self.entries[index.index()] = Entry::Occupied(value);335self.len += 1;336Ok(Id(index))337} else {338Err(value)339}340}341342#[inline]343fn try_alloc_index(&mut self) -> Option<EntryIndex> {344self.free.take().or_else(|| {345if self.entries.len() < self.entries.capacity() {346let index = EntryIndex::new(self.entries.len());347self.entries348.push(Entry::Free { next_free: None })349.expect("have capacity");350Some(index)351} else {352None353}354})355}356357/// Allocate a `T` value within this slab, allocating additional underlying358/// storage if there is no available capacity.359///360/// # Panics361///362/// Panics if allocating this value requires reallocating the underlying363/// storage, and the new capacity exceeds `Slab::MAX_CAPACITY`.364#[inline]365pub fn alloc(&mut self, value: T) -> Result<Id, OutOfMemory> {366self.try_alloc(value)367.or_else(|value| self.alloc_slow(value))368}369370/// Get the `Id` that will be returned for the next allocation in this slab.371#[inline]372pub fn next_id(&self) -> Id {373let index = self.free.unwrap_or_else(|| EntryIndex::new(self.len()));374Id(index)375}376377#[inline(never)]378#[cold]379fn alloc_slow(&mut self, value: T) -> Result<Id, OutOfMemory> {380// Reserve additional capacity, since we didn't have space for the381// allocation.382self.double_capacity()?;383// After which the allocation will succeed.384let id = self.try_alloc(value).ok().unwrap();385Ok(id)386}387388/// Get a shared borrow of the value associated with `id`.389///390/// Returns `None` if the value has since been deallocated.391///392/// If `id` comes from a different `Slab` instance, this method may panic,393/// return `None`, or return an arbitrary value.394#[inline]395pub fn get(&self, id: Id) -> Option<&T> {396match self397.entries398.get(id.0.index())399.expect("id from different slab")400{401Entry::Occupied(x) => Some(x),402Entry::Free { .. } => None,403}404}405406/// Get an exclusive borrow of the value associated with `id`.407///408/// Returns `None` if the value has since been deallocated.409///410/// If `id` comes from a different `Slab` instance, this method may panic,411/// return `None`, or return an arbitrary value.412#[inline]413pub fn get_mut(&mut self, id: Id) -> Option<&mut T> {414match self415.entries416.get_mut(id.0.index())417.expect("id from different slab")418{419Entry::Occupied(x) => Some(x),420Entry::Free { .. } => None,421}422}423424/// Does this slab contain an allocated value for `id`?425#[inline]426pub fn contains(&self, id: Id) -> bool {427match self.entries.get(id.0.index()) {428Some(Entry::Occupied(_)) => true,429None | Some(Entry::Free { .. }) => false,430}431}432433/// Deallocate the value associated with the given `id`.434///435/// If `id` comes from a different `Slab` instance, this method may panic or436/// deallocate an arbitrary value.437#[inline]438pub fn dealloc(&mut self, id: Id) -> T {439let entry = core::mem::replace(440self.entries441.get_mut(id.0.index())442.expect("id from a different slab"),443Entry::Free { next_free: None },444);445match entry {446Entry::Free { .. } => panic!("attempt to deallocate an entry that is already vacant"),447Entry::Occupied(value) => {448let next_free = core::mem::replace(&mut self.free, Some(id.0));449self.entries[id.0.index()] = Entry::Free { next_free };450self.len -= 1;451value452}453}454}455456/// Iterate over all values currently allocated within this `Slab`.457///458/// Yields pairs of an `Id` and the `Id`'s associated value.459///460/// Iteration order is undefined.461#[inline]462pub fn iter(&self) -> impl Iterator<Item = (Id, &T)> + '_ {463assert!(self.entries.len() <= Self::MAX_CAPACITY);464self.entries465.iter()466.enumerate()467.filter_map(|(i, e)| match e {468Entry::Occupied(x) => Some((Id(EntryIndex::new(i)), x)),469Entry::Free { .. } => None,470})471}472473/// Mutably iterate over all values currently allocated within this `Slab`.474///475/// Yields pairs of an `Id` and the `Id`'s associated value.476///477/// Iteration order is undefined.478#[inline]479pub fn iter_mut(&mut self) -> impl Iterator<Item = (Id, &mut T)> + '_ {480assert!(self.entries.len() <= Self::MAX_CAPACITY);481self.entries482.iter_mut()483.enumerate()484.filter_map(|(i, e)| match e {485Entry::Occupied(x) => Some((Id(EntryIndex::new(i)), x)),486Entry::Free { .. } => None,487})488}489490/// Iterate over and remove all entries in this slab.491///492/// The slab will be empty after calling this method.493///494/// Yields pairs of an `Id` and the `Id`'s associated value.495///496/// Iteration order is undefined.497#[inline]498pub fn drain(&mut self) -> impl Iterator<Item = (Id, T)> + '_ {499assert!(self.entries.len() <= Self::MAX_CAPACITY);500self.len = 0;501self.free = None;502self.entries503.drain(..)504.enumerate()505.filter_map(|(i, e)| match e {506Entry::Occupied(x) => Some((Id(EntryIndex::new(i)), x)),507Entry::Free { .. } => None,508})509}510}511512513