Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/crates/core/src/slab.rs
3071 views
1
//! A very simple, uniformly-typed slab arena that supports deallocation and
2
//! reusing deallocated entries' space.
3
//!
4
//! > **⚠️ Warning ⚠️**: this crate is an internal-only crate for the Wasmtime
5
//! > project and is not intended for general use. APIs are not strictly
6
//! > reviewed for safety and usage outside of Wasmtime may have bugs. If
7
//! > you're interested in using this feel free to file an issue on the
8
//! > Wasmtime repository to start a discussion about doing so, but otherwise
9
//! > be aware that your usage of this crate is not supported.
10
//!
11
//! The free list of vacant entries in the slab are stored inline in the slab's
12
//! existing storage.
13
//!
14
//! # Example
15
//!
16
//! ```
17
//! # fn foo() -> wasmtime_internal_core::error::Result<()> {
18
//! use wasmtime_internal_core::slab::Slab;
19
//!
20
//! let mut slab = Slab::new();
21
//!
22
//! // Insert some values into the slab.
23
//! let rza = slab.alloc("Robert Fitzgerald Diggs")?;
24
//! let gza = slab.alloc("Gary Grice")?;
25
//! let bill = slab.alloc("Bill Gates")?;
26
//!
27
//! // Allocated elements can be accessed infallibly via indexing (and missing and
28
//! // deallocated entries will panic).
29
//! assert_eq!(slab[rza], "Robert Fitzgerald Diggs");
30
//!
31
//! // Alternatively, the `get` and `get_mut` methods provide fallible lookup.
32
//! if let Some(genius) = slab.get(gza) {
33
//! println!("The gza gza genius: {}", genius);
34
//! }
35
//! if let Some(val) = slab.get_mut(bill) {
36
//! *val = "Bill Gates doesn't belong in this set...";
37
//! }
38
//!
39
//! // We can remove values from the slab.
40
//! slab.dealloc(bill);
41
//!
42
//! // Allocate a new entry.
43
//! let bill = slab.alloc("Bill Murray")?;
44
//! # Ok(())
45
//! # }
46
//! # foo().unwrap();
47
//! ```
48
//!
49
//! # Using `Id`s with the Wrong `Slab`
50
//!
51
//! `Slab` does NOT check that `Id`s used to access previously-allocated values
52
//! came from the current `Slab` instance (as opposed to a different `Slab`
53
//! instance). Using `Id`s from a different `Slab` is safe, but will yield an
54
//! unrelated value, if any at all.
55
//!
56
//! If you desire checking that an `Id` came from the correct `Slab` instance,
57
//! it should be easy to layer that functionality on top of this crate by
58
//! wrapping `Slab` and `Id` in types that additionally maintain a slab instance
59
//! identifier.
60
//!
61
//! # The ABA Problem
62
//!
63
//! This `Slab` type does NOT protect against ABA bugs, such as the following
64
//! sequence:
65
//!
66
//! * Value `A` is allocated into the slab, yielding id `i`.
67
//!
68
//! * `A` is deallocated, and so `i`'s associated entry is added to the slab's
69
//! free list.
70
//!
71
//! * Value `B` is allocated into the slab, reusing `i`'s associated entry,
72
//! yielding id `i`.
73
//!
74
//! * The "original" id `i` is used to access the arena, expecting the
75
//! deallocated value `A`, but getting the new value `B`.
76
//!
77
//! That is, it does not detect and prevent against the memory-safe version of
78
//! use-after-free bugs.
79
//!
80
//! If you need to protect against ABA bugs, it should be easy to layer that
81
//! functionality on top of this crate by wrapping `Slab` with something like
82
//! the following:
83
//!
84
//! ```rust
85
//! use wasmtime_internal_core::error::OutOfMemory;
86
//!
87
//! pub struct GenerationalId {
88
//! id: wasmtime_internal_core::slab::Id,
89
//! generation: u32,
90
//! }
91
//!
92
//! struct GenerationalEntry<T> {
93
//! value: T,
94
//! generation: u32,
95
//! }
96
//!
97
//! pub struct GenerationalSlab<T> {
98
//! slab: wasmtime_internal_core::slab::Slab<GenerationalEntry<T>>,
99
//! generation: u32,
100
//! }
101
//!
102
//! impl<T> GenerationalSlab<T> {
103
//! pub fn alloc(&mut self, value: T) -> Result<GenerationalId, OutOfMemory> {
104
//! let generation = self.generation;
105
//! let id = self.slab.alloc(GenerationalEntry { value, generation })?;
106
//! Ok(GenerationalId { id, generation })
107
//! }
108
//!
109
//! pub fn get(&self, id: GenerationalId) -> Option<&T> {
110
//! let entry = self.slab.get(id.id)?;
111
//!
112
//! // Check that the entry's generation matches the id's generation,
113
//! // else we have an ABA bug. (Alternatively, return `None` instead
114
//! // of panicking.)
115
//! assert_eq!(id.generation, entry.generation);
116
//!
117
//! Some(&entry.value)
118
//! }
119
//!
120
//! pub fn dealloc(&mut self, id: GenerationalId) {
121
//! // Check that the entry's generation matches the id's generation,
122
//! // else we have an ABA bug. (Alternatively, silently return on
123
//! // double-free instead of panicking.)
124
//! assert_eq!(id.generation, self.slab[id.id].generation);
125
//!
126
//! self.slab.dealloc(id.id);
127
//!
128
//! // Increment our generation whenever we deallocate so that any new
129
//! // value placed in this same entry will have a different generation
130
//! // and we can detect ABA bugs.
131
//! self.generation += 1;
132
//! }
133
//! }
134
//! ```
135
136
use crate::alloc::Vec;
137
use crate::error::OutOfMemory;
138
use core::fmt;
139
use core::num::NonZeroU32;
140
141
/// An identifier for an allocated value inside a `slab`.
142
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
143
#[repr(transparent)]
144
pub struct Id(EntryIndex);
145
146
impl fmt::Debug for Id {
147
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
148
f.debug_tuple("Id").field(&self.0.index()).finish()
149
}
150
}
151
152
impl Id {
153
/// Get the raw underlying representation of this `Id`.
154
#[inline]
155
pub fn into_raw(self) -> u32 {
156
u32::try_from(self.0.index()).unwrap()
157
}
158
159
/// Construct an `Id` from its raw underlying representation.
160
///
161
/// `raw` should be a value that was previously created via
162
/// `Id::into_raw`. May panic if given arbitrary values.
163
#[inline]
164
pub fn from_raw(raw: u32) -> Self {
165
let raw = usize::try_from(raw).unwrap();
166
Self(EntryIndex::new(raw))
167
}
168
}
169
170
/// A simple, uni-typed slab arena.
171
pub struct Slab<T> {
172
/// The slab's entries, each is either occupied and holding a `T` or vacant
173
/// and is a link the free list.
174
entries: Vec<Entry<T>>,
175
176
/// The index of the first free entry in the free list.
177
free: Option<EntryIndex>,
178
179
/// The number of occupied entries is this slab.
180
len: u32,
181
}
182
183
impl<T> fmt::Debug for Slab<T>
184
where
185
T: fmt::Debug,
186
{
187
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
188
f.debug_map().entries(self.iter()).finish()
189
}
190
}
191
192
enum Entry<T> {
193
/// An occupied entry holding a `T`.
194
Occupied(T),
195
196
/// A vacant entry.
197
Free {
198
/// A link in the slab's free list, pointing to the next free entry, if
199
/// any.
200
next_free: Option<EntryIndex>,
201
},
202
}
203
204
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
205
#[repr(transparent)]
206
struct EntryIndex(NonZeroU32);
207
208
impl EntryIndex {
209
#[inline]
210
fn new(index: usize) -> Self {
211
assert!(index <= Slab::<()>::MAX_CAPACITY);
212
let x = u32::try_from(index + 1).unwrap();
213
Self(NonZeroU32::new(x).unwrap())
214
}
215
216
#[inline]
217
fn index(&self) -> usize {
218
let index = self.0.get() - 1;
219
usize::try_from(index).unwrap()
220
}
221
}
222
223
impl<T> Default for Slab<T> {
224
#[inline]
225
fn default() -> Self {
226
Self {
227
entries: Vec::default(),
228
free: None,
229
len: 0,
230
}
231
}
232
}
233
234
impl<T> core::ops::Index<Id> for Slab<T> {
235
type Output = T;
236
237
#[inline]
238
fn index(&self, id: Id) -> &Self::Output {
239
self.get(id)
240
.expect("id from different slab or value was deallocated")
241
}
242
}
243
244
impl<T> core::ops::IndexMut<Id> for Slab<T> {
245
#[inline]
246
fn index_mut(&mut self, id: Id) -> &mut Self::Output {
247
self.get_mut(id)
248
.expect("id from different slab or value was deallocated")
249
}
250
}
251
252
impl<T> Slab<T> {
253
/// The maximum capacity any `Slab` can have: `u32::MAX - 1`.
254
pub const MAX_CAPACITY: usize = (u32::MAX - 1) as usize;
255
256
/// Construct a new, empty slab.
257
#[inline]
258
pub fn new() -> Self {
259
Slab::default()
260
}
261
262
/// Construct a new, empty slab, pre-reserving space for at least `capacity`
263
/// elements.
264
#[inline]
265
pub fn with_capacity(capacity: usize) -> Result<Self, OutOfMemory> {
266
let mut slab = Self::new();
267
slab.reserve(capacity)?;
268
Ok(slab)
269
}
270
271
/// Ensure that there is space for at least `additional` elements in this
272
/// slab.
273
///
274
/// # Panics
275
///
276
/// Panics if the new capacity exceeds `Self::MAX_CAPACITY`.
277
pub fn reserve(&mut self, additional: usize) -> Result<(), OutOfMemory> {
278
let cap = self.capacity();
279
let len = self.len();
280
assert!(cap >= len);
281
if cap - len >= additional {
282
// Already have `additional` capacity available.
283
return Ok(());
284
}
285
286
self.entries.reserve(additional)?;
287
288
// Maintain the invariant that `i <= MAX_CAPACITY` for all indices `i`
289
// in `self.entries`.
290
assert!(self.entries.capacity() <= Self::MAX_CAPACITY);
291
292
Ok(())
293
}
294
295
fn double_capacity(&mut self) -> Result<(), OutOfMemory> {
296
// Double our capacity to amortize the cost of resizing. But make sure
297
// we add some amount of minimum additional capacity, since doubling
298
// zero capacity isn't useful.
299
const MIN_CAPACITY: usize = 16;
300
let additional = core::cmp::max(self.entries.capacity(), MIN_CAPACITY);
301
self.reserve(additional)
302
}
303
304
/// What is the capacity of this slab? That is, how many entries can it
305
/// contain within its current underlying storage?
306
#[inline]
307
pub fn capacity(&self) -> usize {
308
self.entries.capacity()
309
}
310
311
/// How many values are currently allocated within this slab?
312
#[inline]
313
pub fn len(&self) -> usize {
314
usize::try_from(self.len).unwrap()
315
}
316
317
/// Are there zero allocated values within this slab?
318
#[inline]
319
pub fn is_empty(&self) -> bool {
320
self.len() == 0
321
}
322
323
/// Try to allocate a `T` value within this slab.
324
///
325
/// If there is no available capacity, ownership of the given value is
326
/// returned via `Err(value)`.
327
#[inline]
328
pub fn try_alloc(&mut self, value: T) -> Result<Id, T> {
329
if let Some(index) = self.try_alloc_index() {
330
let next_free = match self.entries[index.index()] {
331
Entry::Free { next_free } => next_free,
332
Entry::Occupied { .. } => unreachable!(),
333
};
334
self.free = next_free;
335
self.entries[index.index()] = Entry::Occupied(value);
336
self.len += 1;
337
Ok(Id(index))
338
} else {
339
Err(value)
340
}
341
}
342
343
#[inline]
344
fn try_alloc_index(&mut self) -> Option<EntryIndex> {
345
self.free.take().or_else(|| {
346
if self.entries.len() < self.entries.capacity() {
347
let index = EntryIndex::new(self.entries.len());
348
self.entries
349
.push(Entry::Free { next_free: None })
350
.expect("have capacity");
351
Some(index)
352
} else {
353
None
354
}
355
})
356
}
357
358
/// Allocate a `T` value within this slab, allocating additional underlying
359
/// storage if there is no available capacity.
360
///
361
/// # Panics
362
///
363
/// Panics if allocating this value requires reallocating the underlying
364
/// storage, and the new capacity exceeds `Slab::MAX_CAPACITY`.
365
#[inline]
366
pub fn alloc(&mut self, value: T) -> Result<Id, OutOfMemory> {
367
self.try_alloc(value)
368
.or_else(|value| self.alloc_slow(value))
369
}
370
371
/// Get the `Id` that will be returned for the next allocation in this slab.
372
#[inline]
373
pub fn next_id(&self) -> Id {
374
let index = self.free.unwrap_or_else(|| EntryIndex::new(self.len()));
375
Id(index)
376
}
377
378
#[inline(never)]
379
#[cold]
380
fn alloc_slow(&mut self, value: T) -> Result<Id, OutOfMemory> {
381
// Reserve additional capacity, since we didn't have space for the
382
// allocation.
383
self.double_capacity()?;
384
// After which the allocation will succeed.
385
let id = self.try_alloc(value).ok().unwrap();
386
Ok(id)
387
}
388
389
/// Get a shared borrow of the value associated with `id`.
390
///
391
/// Returns `None` if the value has since been deallocated.
392
///
393
/// If `id` comes from a different `Slab` instance, this method may panic,
394
/// return `None`, or return an arbitrary value.
395
#[inline]
396
pub fn get(&self, id: Id) -> Option<&T> {
397
match self
398
.entries
399
.get(id.0.index())
400
.expect("id from different slab")
401
{
402
Entry::Occupied(x) => Some(x),
403
Entry::Free { .. } => None,
404
}
405
}
406
407
/// Get an exclusive borrow of the value associated with `id`.
408
///
409
/// Returns `None` if the value has since been deallocated.
410
///
411
/// If `id` comes from a different `Slab` instance, this method may panic,
412
/// return `None`, or return an arbitrary value.
413
#[inline]
414
pub fn get_mut(&mut self, id: Id) -> Option<&mut T> {
415
match self
416
.entries
417
.get_mut(id.0.index())
418
.expect("id from different slab")
419
{
420
Entry::Occupied(x) => Some(x),
421
Entry::Free { .. } => None,
422
}
423
}
424
425
/// Does this slab contain an allocated value for `id`?
426
#[inline]
427
pub fn contains(&self, id: Id) -> bool {
428
match self.entries.get(id.0.index()) {
429
Some(Entry::Occupied(_)) => true,
430
None | Some(Entry::Free { .. }) => false,
431
}
432
}
433
434
/// Deallocate the value associated with the given `id`.
435
///
436
/// If `id` comes from a different `Slab` instance, this method may panic or
437
/// deallocate an arbitrary value.
438
#[inline]
439
pub fn dealloc(&mut self, id: Id) -> T {
440
let entry = core::mem::replace(
441
self.entries
442
.get_mut(id.0.index())
443
.expect("id from a different slab"),
444
Entry::Free { next_free: None },
445
);
446
match entry {
447
Entry::Free { .. } => panic!("attempt to deallocate an entry that is already vacant"),
448
Entry::Occupied(value) => {
449
let next_free = core::mem::replace(&mut self.free, Some(id.0));
450
self.entries[id.0.index()] = Entry::Free { next_free };
451
self.len -= 1;
452
value
453
}
454
}
455
}
456
457
/// Iterate over all values currently allocated within this `Slab`.
458
///
459
/// Yields pairs of an `Id` and the `Id`'s associated value.
460
///
461
/// Iteration order is undefined.
462
#[inline]
463
pub fn iter(&self) -> impl Iterator<Item = (Id, &T)> + '_ {
464
assert!(self.entries.len() <= Self::MAX_CAPACITY);
465
self.entries
466
.iter()
467
.enumerate()
468
.filter_map(|(i, e)| match e {
469
Entry::Occupied(x) => Some((Id(EntryIndex::new(i)), x)),
470
Entry::Free { .. } => None,
471
})
472
}
473
474
/// Mutably iterate over all values currently allocated within this `Slab`.
475
///
476
/// Yields pairs of an `Id` and the `Id`'s associated value.
477
///
478
/// Iteration order is undefined.
479
#[inline]
480
pub fn iter_mut(&mut self) -> impl Iterator<Item = (Id, &mut T)> + '_ {
481
assert!(self.entries.len() <= Self::MAX_CAPACITY);
482
self.entries
483
.iter_mut()
484
.enumerate()
485
.filter_map(|(i, e)| match e {
486
Entry::Occupied(x) => Some((Id(EntryIndex::new(i)), x)),
487
Entry::Free { .. } => None,
488
})
489
}
490
491
/// Iterate over and remove all entries in this slab.
492
///
493
/// The slab will be empty after calling this method.
494
///
495
/// Yields pairs of an `Id` and the `Id`'s associated value.
496
///
497
/// Iteration order is undefined.
498
#[inline]
499
pub fn drain(&mut self) -> impl Iterator<Item = (Id, T)> + '_ {
500
assert!(self.entries.len() <= Self::MAX_CAPACITY);
501
self.len = 0;
502
self.free = None;
503
self.entries
504
.drain(..)
505
.enumerate()
506
.filter_map(|(i, e)| match e {
507
Entry::Occupied(x) => Some((Id(EntryIndex::new(i)), x)),
508
Entry::Free { .. } => None,
509
})
510
}
511
}
512
513