Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
google
GitHub Repository: google/crosvm
Path: blob/main/ext2/src/arena.rs
5394 views
1
// Copyright 2024 The ChromiumOS Authors
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE file.
4
5
//! Defines an arena allocator backed by `base::MemoryMapping`.
6
7
use std::cell::RefCell;
8
use std::collections::BTreeSet;
9
use std::fs::File;
10
11
use anyhow::anyhow;
12
use anyhow::bail;
13
use anyhow::Context;
14
use anyhow::Result;
15
use base::MappedRegion;
16
use base::MemoryMapping;
17
use zerocopy::FromBytes;
18
use zerocopy::Immutable;
19
use zerocopy::IntoBytes;
20
use zerocopy::KnownLayout;
21
22
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
23
struct Region {
24
start: usize,
25
len: usize,
26
}
27
28
/// Manages a set of regions that are not overlapping each other.
29
#[derive(Default)]
30
struct RegionManager(BTreeSet<Region>);
31
32
impl RegionManager {
33
fn allocate(&mut self, start: usize, len: usize) -> Result<()> {
34
// Allocation needs to fail if there exists a region that overlaps with [start, start+len).
35
// A region r is overlapping with [start, start+len) if and only if:
36
// r.start <= (start+len) && start <= (r.start+r.len)
37
//
38
// So, we first find the last region where r.start <= (start+len) holds.
39
let left = self
40
.0
41
.range(
42
..Region {
43
start: start + len,
44
len: 0,
45
},
46
)
47
.next_back()
48
.copied();
49
50
// New region to be added.
51
let new = match left {
52
None => Region { start, len },
53
Some(r) => {
54
if start < r.start + r.len {
55
bail!(
56
"range overlaps: existing: {:?}, new: {:?}",
57
left,
58
Region { start, len }
59
);
60
}
61
62
// if `r` and the new region is adjacent, merge them.
63
// otherwise, just return the new region.
64
if start == r.start + r.len {
65
let new = Region {
66
start: r.start,
67
len: r.len + len,
68
};
69
self.0.remove(&r);
70
new
71
} else {
72
Region { start, len }
73
}
74
}
75
};
76
77
// If there exists a region that starts from `new.start + new.len`,
78
// it should be merged with `new`.
79
let right = self
80
.0
81
.range(
82
Region {
83
start: new.start + new.len,
84
len: 0,
85
}..,
86
)
87
.next()
88
.copied();
89
match right {
90
Some(r) if r.start == new.start + new.len => {
91
// merge and insert
92
let merged = Region {
93
start: new.start,
94
len: new.len + r.len,
95
};
96
self.0.remove(&r);
97
self.0.insert(merged);
98
}
99
Some(_) | None => {
100
// just insert
101
self.0.insert(new);
102
}
103
}
104
105
Ok(())
106
}
107
108
#[cfg(test)]
109
fn to_vec(&self) -> Vec<&Region> {
110
self.0.iter().collect()
111
}
112
}
113
114
#[test]
115
fn test_region_manager() {
116
let mut rm: RegionManager = Default::default();
117
118
rm.allocate(0, 5).unwrap();
119
assert_eq!(rm.to_vec(), vec![&Region { start: 0, len: 5 }]);
120
rm.allocate(10, 5).unwrap();
121
rm.allocate(15, 5).unwrap(); // will be merged into the previous one
122
assert_eq!(
123
rm.to_vec(),
124
vec![&Region { start: 0, len: 5 }, &Region { start: 10, len: 10 }]
125
);
126
rm.allocate(3, 5).unwrap_err(); // fail
127
rm.allocate(8, 5).unwrap_err(); // fail
128
129
rm.allocate(25, 5).unwrap();
130
assert_eq!(
131
rm.to_vec(),
132
vec![
133
&Region { start: 0, len: 5 },
134
&Region { start: 10, len: 10 },
135
&Region { start: 25, len: 5 }
136
]
137
);
138
139
rm.allocate(5, 5).unwrap(); // will be merged to the existing two regions
140
assert_eq!(
141
rm.to_vec(),
142
vec![&Region { start: 0, len: 20 }, &Region { start: 25, len: 5 }]
143
);
144
rm.allocate(20, 5).unwrap();
145
assert_eq!(rm.to_vec(), vec![&Region { start: 0, len: 30 },]);
146
}
147
148
#[derive(Debug, Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)]
149
#[repr(C)]
150
/// Represents a ID of a disk block.
151
pub struct BlockId(u32);
152
153
impl From<u32> for BlockId {
154
fn from(value: u32) -> Self {
155
BlockId(value)
156
}
157
}
158
159
impl From<BlockId> for u32 {
160
fn from(value: BlockId) -> Self {
161
value.0
162
}
163
}
164
165
impl BlockId {
166
pub fn as_bytes(&self) -> &[u8] {
167
self.0.as_bytes()
168
}
169
}
170
171
/// Information on how to mmap a host file to ext2 blocks.
172
pub struct FileMappingInfo {
173
/// Offset in the memory that a file is mapped to.
174
pub mem_offset: usize,
175
/// The file to be mmap'd.
176
pub file: File,
177
/// The length of the mapping.
178
pub length: usize,
179
/// Offset in the file to start the mapping.
180
pub file_offset: usize,
181
}
182
183
/// Memory arena backed by `base::MemoryMapping`.
184
///
185
/// This struct takes a mutable referencet to the memory mapping so this arena won't arena the
186
/// region.
187
pub struct Arena<'a> {
188
mem: &'a mut MemoryMapping,
189
block_size: usize,
190
/// A set of regions that are not overlapping each other.
191
/// Use `RefCell` for interior mutability because the mutablity of `RegionManager` should be
192
/// independent from the mutability of the memory mapping.
193
regions: RefCell<RegionManager>,
194
195
mappings: RefCell<Vec<FileMappingInfo>>,
196
}
197
198
impl<'a> Arena<'a> {
199
/// Create a new arena backed by `len` bytes of `base::MemoryMapping`.
200
pub fn new(block_size: usize, mem: &'a mut MemoryMapping) -> Result<Self> {
201
Ok(Self {
202
mem,
203
block_size,
204
regions: Default::default(),
205
mappings: Default::default(),
206
})
207
}
208
209
/// A helper function to mark a region as reserved.
210
fn reserve(&self, mem_offset: usize, len: usize) -> Result<()> {
211
let mem_end = mem_offset.checked_add(len).context("mem_end overflow")?;
212
213
if mem_end > self.mem.size() {
214
bail!(
215
"out of memory region: {mem_offset} + {len} > {}",
216
self.mem.size()
217
);
218
}
219
220
self.regions.borrow_mut().allocate(mem_offset, len)?;
221
222
Ok(())
223
}
224
225
/// Reserves a region for mmap and stores the mmap information.
226
/// Note that `Arena` will not call mmap(). Instead, the owner of `Arena` instance must call
227
/// `into_mapping_info()` to retrieve the mapping information and call mmap later instead.
228
pub fn reserve_for_mmap(
229
&self,
230
mem_offset: usize,
231
length: usize,
232
file: File,
233
file_offset: usize,
234
) -> Result<()> {
235
self.reserve(mem_offset, length)?;
236
self.mappings.borrow_mut().push(FileMappingInfo {
237
mem_offset,
238
length,
239
file: file.try_clone()?,
240
file_offset,
241
});
242
243
Ok(())
244
}
245
246
/// Allocate a new slice on an anonymous memory.
247
/// `Arena` structs guarantees that this area is not overlapping with other regions.
248
pub fn allocate_slice(
249
&self,
250
block: BlockId,
251
block_offset: usize,
252
len: usize,
253
) -> Result<&'a mut [u8]> {
254
let offset = u32::from(block) as usize * self.block_size + block_offset;
255
self.reserve(offset, len)?;
256
257
let new_addr = (self.mem.as_ptr() as usize)
258
.checked_add(offset)
259
.context("address overflow")?;
260
261
// SAFETY: the memory region [new_addr, new_addr+len) is guaranteed to be valid.
262
let slice = unsafe { std::slice::from_raw_parts_mut(new_addr as *mut u8, len) };
263
Ok(slice)
264
}
265
266
/// Allocate a new region for a value with type `T`.
267
pub fn allocate<T: FromBytes + IntoBytes + KnownLayout>(
268
&self,
269
block: BlockId,
270
block_offset: usize,
271
) -> Result<&'a mut T> {
272
let slice = self.allocate_slice(block, block_offset, std::mem::size_of::<T>())?;
273
T::mut_from_bytes(slice).map_err(|_| anyhow!("failed to interpret"))
274
}
275
276
pub fn write_to_mem<T: IntoBytes + Immutable>(
277
&self,
278
block_id: BlockId,
279
block_offset: usize,
280
value: &T,
281
) -> Result<()> {
282
let slice = self.allocate_slice(block_id, block_offset, std::mem::size_of::<T>())?;
283
slice.copy_from_slice(value.as_bytes());
284
Ok(())
285
}
286
287
/// Consumes `Arena` and retrieve mmap information.
288
pub fn into_mapping_info(self) -> Vec<FileMappingInfo> {
289
self.mappings.take()
290
}
291
}
292
293