Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/crates/slab/src/lib.rs
1691 views
1
//! A very simple, uniformly-typed slab arena that supports deallocation and
2
//! reusing deallocated entries' space.
3
//!
4
//! > **⚠️ Warning ⚠️**: this crate is an internal-only crate for the Wasmtime
5
//! > project and is not intended for general use. APIs are not strictly
6
//! > reviewed for safety and usage outside of Wasmtime may have bugs. If
7
//! > you're interested in using this feel free to file an issue on the
8
//! > Wasmtime repository to start a discussion about doing so, but otherwise
9
//! > be aware that your usage of this crate is not supported.
10
//!
11
//! The free list of vacant entries in the slab are stored inline in the slab's
12
//! existing storage.
13
//!
14
//! # Example
15
//!
16
//! ```
17
//! use wasmtime_internal_slab::{Id, Slab};
18
//!
19
//! let mut slab = Slab::new();
20
//!
21
//! // Insert some values into the slab.
22
//! let rza = slab.alloc("Robert Fitzgerald Diggs");
23
//! let gza = slab.alloc("Gary Grice");
24
//! let bill = slab.alloc("Bill Gates");
25
//!
26
//! // Allocated elements can be accessed infallibly via indexing (and missing and
27
//! // deallocated entries will panic).
28
//! assert_eq!(slab[rza], "Robert Fitzgerald Diggs");
29
//!
30
//! // Alternatively, the `get` and `get_mut` methods provide fallible lookup.
31
//! if let Some(genius) = slab.get(gza) {
32
//! println!("The gza gza genius: {}", genius);
33
//! }
34
//! if let Some(val) = slab.get_mut(bill) {
35
//! *val = "Bill Gates doesn't belong in this set...";
36
//! }
37
//!
38
//! // We can remove values from the slab.
39
//! slab.dealloc(bill);
40
//!
41
//! // Allocate a new entry.
42
//! let bill = slab.alloc("Bill Murray");
43
//! ```
44
//!
45
//! # Using `Id`s with the Wrong `Slab`
46
//!
47
//! `Slab` does NOT check that `Id`s used to access previously-allocated values
48
//! came from the current `Slab` instance (as opposed to a different `Slab`
49
//! instance). Using `Id`s from a different `Slab` is safe, but will yield an
50
//! unrelated value, if any at all.
51
//!
52
//! If you desire checking that an `Id` came from the correct `Slab` instance,
53
//! it should be easy to layer that functionality on top of this crate by
54
//! wrapping `Slab` and `Id` in types that additionally maintain a slab instance
55
//! identifier.
56
//!
57
//! # The ABA Problem
58
//!
59
//! This `Slab` type does NOT protect against ABA bugs, such as the following
60
//! sequence:
61
//!
62
//! * Value `A` is allocated into the slab, yielding id `i`.
63
//!
64
//! * `A` is deallocated, and so `i`'s associated entry is added to the slab's
65
//! free list.
66
//!
67
//! * Value `B` is allocated into the slab, reusing `i`'s associated entry,
68
//! yielding id `i`.
69
//!
70
//! * The "original" id `i` is used to access the arena, expecting the
71
//! deallocated value `A`, but getting the new value `B`.
72
//!
73
//! That is, it does not detect and prevent against the memory-safe version of
74
//! use-after-free bugs.
75
//!
76
//! If you need to protect against ABA bugs, it should be easy to layer that
77
//! functionality on top of this crate by wrapping `Slab` with something like
78
//! the following:
79
//!
80
//! ```rust
81
//! pub struct GenerationalId {
82
//! id: wasmtime_internal_slab::Id,
83
//! generation: u32,
84
//! }
85
//!
86
//! struct GenerationalEntry<T> {
87
//! value: T,
88
//! generation: u32,
89
//! }
90
//!
91
//! pub struct GenerationalSlab<T> {
92
//! slab: wasmtime_internal_slab::Slab<GenerationalEntry<T>>,
93
//! generation: u32,
94
//! }
95
//!
96
//! impl<T> GenerationalSlab<T> {
97
//! pub fn alloc(&mut self, value: T) -> GenerationalId {
98
//! let generation = self.generation;
99
//! let id = self.slab.alloc(GenerationalEntry { value, generation });
100
//! GenerationalId { id, generation }
101
//! }
102
//!
103
//! pub fn get(&self, id: GenerationalId) -> Option<&T> {
104
//! let entry = self.slab.get(id.id)?;
105
//!
106
//! // Check that the entry's generation matches the id's generation,
107
//! // else we have an ABA bug. (Alternatively, return `None` instead
108
//! // of panicking.)
109
//! assert_eq!(id.generation, entry.generation);
110
//!
111
//! Some(&entry.value)
112
//! }
113
//!
114
//! pub fn dealloc(&mut self, id: GenerationalId) {
115
//! // Check that the entry's generation matches the id's generation,
116
//! // else we have an ABA bug. (Alternatively, silently return on
117
//! // double-free instead of panicking.)
118
//! assert_eq!(id.generation, self.slab[id.id].generation);
119
//!
120
//! self.slab.dealloc(id.id);
121
//!
122
//! // Increment our generation whenever we deallocate so that any new
123
//! // value placed in this same entry will have a different generation
124
//! // and we can detect ABA bugs.
125
//! self.generation += 1;
126
//! }
127
//! }
128
//! ```
129
130
#![no_std]
131
#![forbid(unsafe_code)]
132
#![deny(missing_docs, missing_debug_implementations)]
133
134
extern crate alloc;
135
136
use alloc::vec::Vec;
137
use core::fmt;
138
use core::num::NonZeroU32;
139
140
/// An identifier for an allocated value inside a `slab`.
141
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
142
#[repr(transparent)]
143
pub struct Id(EntryIndex);
144
145
impl fmt::Debug for Id {
146
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
147
f.debug_tuple("Id").field(&self.0.index()).finish()
148
}
149
}
150
151
impl Id {
152
/// Get the raw underlying representation of this `Id`.
153
#[inline]
154
pub fn into_raw(self) -> u32 {
155
u32::try_from(self.0.index()).unwrap()
156
}
157
158
/// Construct an `Id` from its raw underlying representation.
159
///
160
/// `raw` should be a value that was previously created via
161
/// `Id::into_raw`. May panic if given arbitrary values.
162
#[inline]
163
pub fn from_raw(raw: u32) -> Self {
164
let raw = usize::try_from(raw).unwrap();
165
Self(EntryIndex::new(raw))
166
}
167
}
168
169
/// A simple, uni-typed slab arena.
170
pub struct Slab<T> {
171
/// The slab's entries, each is either occupied and holding a `T` or vacant
172
/// and is a link the free list.
173
entries: Vec<Entry<T>>,
174
175
/// The index of the first free entry in the free list.
176
free: Option<EntryIndex>,
177
178
/// The number of occupied entries is this slab.
179
len: u32,
180
}
181
182
impl<T> fmt::Debug for Slab<T>
183
where
184
T: fmt::Debug,
185
{
186
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
187
f.debug_map().entries(self.iter()).finish()
188
}
189
}
190
191
enum Entry<T> {
192
/// An occupied entry holding a `T`.
193
Occupied(T),
194
195
/// A vacant entry.
196
Free {
197
/// A link in the slab's free list, pointing to the next free entry, if
198
/// any.
199
next_free: Option<EntryIndex>,
200
},
201
}
202
203
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
204
#[repr(transparent)]
205
struct EntryIndex(NonZeroU32);
206
207
impl EntryIndex {
208
#[inline]
209
fn new(index: usize) -> Self {
210
assert!(index <= Slab::<()>::MAX_CAPACITY);
211
let x = u32::try_from(index + 1).unwrap();
212
Self(NonZeroU32::new(x).unwrap())
213
}
214
215
#[inline]
216
fn index(&self) -> usize {
217
let index = self.0.get() - 1;
218
usize::try_from(index).unwrap()
219
}
220
}
221
222
impl<T> Default for Slab<T> {
223
#[inline]
224
fn default() -> Self {
225
Self {
226
entries: Vec::default(),
227
free: None,
228
len: 0,
229
}
230
}
231
}
232
233
impl<T> core::ops::Index<Id> for Slab<T> {
234
type Output = T;
235
236
#[inline]
237
fn index(&self, id: Id) -> &Self::Output {
238
self.get(id)
239
.expect("id from different slab or value was deallocated")
240
}
241
}
242
243
impl<T> core::ops::IndexMut<Id> for Slab<T> {
244
#[inline]
245
fn index_mut(&mut self, id: Id) -> &mut Self::Output {
246
self.get_mut(id)
247
.expect("id from different slab or value was deallocated")
248
}
249
}
250
251
impl<T> Slab<T> {
252
/// The maximum capacity any `Slab` can have: `u32::MAX - 1`.
253
pub const MAX_CAPACITY: usize = (u32::MAX - 1) as usize;
254
255
/// Construct a new, empty slab.
256
#[inline]
257
pub fn new() -> Self {
258
Slab::default()
259
}
260
261
/// Construct a new, empty slab, pre-reserving space for at least `capacity`
262
/// elements.
263
#[inline]
264
pub fn with_capacity(capacity: usize) -> Self {
265
let mut slab = Self::new();
266
slab.reserve(capacity);
267
slab
268
}
269
270
/// Ensure that there is space for at least `additional` elements in this
271
/// slab.
272
///
273
/// # Panics
274
///
275
/// Panics if the new capacity exceeds `Self::MAX_CAPACITY`.
276
pub fn reserve(&mut self, additional: usize) {
277
let cap = self.capacity();
278
let len = self.len();
279
assert!(cap >= len);
280
if cap - len >= additional {
281
// Already have `additional` capacity available.
282
return;
283
}
284
285
self.entries.reserve(additional);
286
287
// Maintain the invariant that `i <= MAX_CAPACITY` for all indices `i`
288
// in `self.entries`.
289
assert!(self.entries.capacity() <= Self::MAX_CAPACITY);
290
}
291
292
fn double_capacity(&mut self) {
293
// Double our capacity to amortize the cost of resizing. But make sure
294
// we add some amount of minimum additional capacity, since doubling
295
// zero capacity isn't useful.
296
const MIN_CAPACITY: usize = 16;
297
let additional = core::cmp::max(self.entries.capacity(), MIN_CAPACITY);
298
self.reserve(additional);
299
}
300
301
/// What is the capacity of this slab? That is, how many entries can it
302
/// contain within its current underlying storage?
303
#[inline]
304
pub fn capacity(&self) -> usize {
305
self.entries.capacity()
306
}
307
308
/// How many values are currently allocated within this slab?
309
#[inline]
310
pub fn len(&self) -> usize {
311
usize::try_from(self.len).unwrap()
312
}
313
314
/// Are there zero allocated values within this slab?
315
#[inline]
316
pub fn is_empty(&self) -> bool {
317
self.len() == 0
318
}
319
320
/// Try to allocate a `T` value within this slab.
321
///
322
/// If there is no available capacity, ownership of the given value is
323
/// returned via `Err(value)`.
324
#[inline]
325
pub fn try_alloc(&mut self, value: T) -> Result<Id, T> {
326
if let Some(index) = self.try_alloc_index() {
327
let next_free = match self.entries[index.index()] {
328
Entry::Free { next_free } => next_free,
329
Entry::Occupied { .. } => unreachable!(),
330
};
331
self.free = next_free;
332
self.entries[index.index()] = Entry::Occupied(value);
333
self.len += 1;
334
Ok(Id(index))
335
} else {
336
Err(value)
337
}
338
}
339
340
#[inline]
341
fn try_alloc_index(&mut self) -> Option<EntryIndex> {
342
self.free.take().or_else(|| {
343
if self.entries.len() < self.entries.capacity() {
344
let index = EntryIndex::new(self.entries.len());
345
self.entries.push(Entry::Free { next_free: None });
346
Some(index)
347
} else {
348
None
349
}
350
})
351
}
352
353
/// Allocate a `T` value within this slab, allocating additional underlying
354
/// storage if there is no available capacity.
355
///
356
/// # Panics
357
///
358
/// Panics if allocating this value requires reallocating the underlying
359
/// storage, and the new capacity exceeds `Slab::MAX_CAPACITY`.
360
#[inline]
361
pub fn alloc(&mut self, value: T) -> Id {
362
self.try_alloc(value)
363
.unwrap_or_else(|value| self.alloc_slow(value))
364
}
365
366
/// Get the `Id` that will be returned for the next allocation in this slab.
367
#[inline]
368
pub fn next_id(&self) -> Id {
369
let index = self.free.unwrap_or_else(|| EntryIndex::new(self.len()));
370
Id(index)
371
}
372
373
#[inline(never)]
374
#[cold]
375
fn alloc_slow(&mut self, value: T) -> Id {
376
// Reserve additional capacity, since we didn't have space for the
377
// allocation.
378
self.double_capacity();
379
// After which the allocation will succeed.
380
self.try_alloc(value).ok().unwrap()
381
}
382
383
/// Get a shared borrow of the value associated with `id`.
384
///
385
/// Returns `None` if the value has since been deallocated.
386
///
387
/// If `id` comes from a different `Slab` instance, this method may panic,
388
/// return `None`, or return an arbitrary value.
389
#[inline]
390
pub fn get(&self, id: Id) -> Option<&T> {
391
match self
392
.entries
393
.get(id.0.index())
394
.expect("id from different slab")
395
{
396
Entry::Occupied(x) => Some(x),
397
Entry::Free { .. } => None,
398
}
399
}
400
401
/// Get an exclusive borrow of the value associated with `id`.
402
///
403
/// Returns `None` if the value has since been deallocated.
404
///
405
/// If `id` comes from a different `Slab` instance, this method may panic,
406
/// return `None`, or return an arbitrary value.
407
#[inline]
408
pub fn get_mut(&mut self, id: Id) -> Option<&mut T> {
409
match self
410
.entries
411
.get_mut(id.0.index())
412
.expect("id from different slab")
413
{
414
Entry::Occupied(x) => Some(x),
415
Entry::Free { .. } => None,
416
}
417
}
418
419
/// Does this slab contain an allocated value for `id`?
420
#[inline]
421
pub fn contains(&self, id: Id) -> bool {
422
match self.entries.get(id.0.index()) {
423
Some(Entry::Occupied(_)) => true,
424
None | Some(Entry::Free { .. }) => false,
425
}
426
}
427
428
/// Deallocate the value associated with the given `id`.
429
///
430
/// If `id` comes from a different `Slab` instance, this method may panic or
431
/// deallocate an arbitrary value.
432
#[inline]
433
pub fn dealloc(&mut self, id: Id) -> T {
434
let entry = core::mem::replace(
435
self.entries
436
.get_mut(id.0.index())
437
.expect("id from a different slab"),
438
Entry::Free { next_free: None },
439
);
440
match entry {
441
Entry::Free { .. } => panic!("attempt to deallocate an entry that is already vacant"),
442
Entry::Occupied(value) => {
443
let next_free = core::mem::replace(&mut self.free, Some(id.0));
444
self.entries[id.0.index()] = Entry::Free { next_free };
445
self.len -= 1;
446
value
447
}
448
}
449
}
450
451
/// Iterate over all values currently allocated within this `Slab`.
452
///
453
/// Yields pairs of an `Id` and the `Id`'s associated value.
454
///
455
/// Iteration order is undefined.
456
#[inline]
457
pub fn iter(&self) -> impl Iterator<Item = (Id, &T)> + '_ {
458
assert!(self.entries.len() <= Self::MAX_CAPACITY);
459
self.entries
460
.iter()
461
.enumerate()
462
.filter_map(|(i, e)| match e {
463
Entry::Occupied(x) => Some((Id(EntryIndex::new(i)), x)),
464
Entry::Free { .. } => None,
465
})
466
}
467
468
/// Mutably iterate over all values currently allocated within this `Slab`.
469
///
470
/// Yields pairs of an `Id` and the `Id`'s associated value.
471
///
472
/// Iteration order is undefined.
473
#[inline]
474
pub fn iter_mut(&mut self) -> impl Iterator<Item = (Id, &mut T)> + '_ {
475
assert!(self.entries.len() <= Self::MAX_CAPACITY);
476
self.entries
477
.iter_mut()
478
.enumerate()
479
.filter_map(|(i, e)| match e {
480
Entry::Occupied(x) => Some((Id(EntryIndex::new(i)), x)),
481
Entry::Free { .. } => None,
482
})
483
}
484
485
/// Iterate over and remove all entries in this slab.
486
///
487
/// The slab will be empty after calling this method.
488
///
489
/// Yields pairs of an `Id` and the `Id`'s associated value.
490
///
491
/// Iteration order is undefined.
492
#[inline]
493
pub fn drain(&mut self) -> impl Iterator<Item = (Id, T)> + '_ {
494
assert!(self.entries.len() <= Self::MAX_CAPACITY);
495
self.len = 0;
496
self.free = None;
497
self.entries
498
.drain(..)
499
.enumerate()
500
.filter_map(|(i, e)| match e {
501
Entry::Occupied(x) => Some((Id(EntryIndex::new(i)), x)),
502
Entry::Free { .. } => None,
503
})
504
}
505
}
506
507