Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
google
GitHub Repository: google/crosvm
Path: blob/main/disk/src/composite.rs
5394 views
1
// Copyright 2019 The ChromiumOS Authors
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE file.
4
5
use std::cmp::max;
6
use std::cmp::min;
7
use std::collections::HashSet;
8
use std::convert::TryInto;
9
use std::fs::File;
10
use std::fs::OpenOptions;
11
use std::io;
12
use std::io::ErrorKind;
13
use std::io::Read;
14
use std::io::Seek;
15
use std::io::SeekFrom;
16
use std::io::Write;
17
use std::ops::Range;
18
use std::path::Path;
19
use std::path::PathBuf;
20
use std::sync::atomic::AtomicBool;
21
use std::sync::atomic::Ordering;
22
use std::sync::Arc;
23
24
use async_trait::async_trait;
25
use base::AsRawDescriptors;
26
use base::FileAllocate;
27
use base::FileReadWriteAtVolatile;
28
use base::FileSetLen;
29
use base::RawDescriptor;
30
use base::VolatileSlice;
31
use crc32fast::Hasher;
32
use cros_async::BackingMemory;
33
use cros_async::Executor;
34
use cros_async::MemRegionIter;
35
use protobuf::Message;
36
use protos::cdisk_spec;
37
use protos::cdisk_spec::ComponentDisk;
38
use protos::cdisk_spec::CompositeDisk;
39
use protos::cdisk_spec::ReadWriteCapability;
40
use remain::sorted;
41
use thiserror::Error;
42
use uuid::Uuid;
43
44
use crate::gpt;
45
use crate::gpt::write_gpt_header;
46
use crate::gpt::write_protective_mbr;
47
use crate::gpt::GptPartitionEntry;
48
use crate::gpt::GPT_BEGINNING_SIZE;
49
use crate::gpt::GPT_END_SIZE;
50
use crate::gpt::GPT_HEADER_SIZE;
51
use crate::gpt::GPT_NUM_PARTITIONS;
52
use crate::gpt::GPT_PARTITION_ENTRY_SIZE;
53
use crate::gpt::SECTOR_SIZE;
54
use crate::open_disk_file;
55
use crate::AsyncDisk;
56
use crate::DiskFile;
57
use crate::DiskFileParams;
58
use crate::DiskGetLen;
59
use crate::ImageType;
60
use crate::ToAsyncDisk;
61
62
/// The amount of padding needed between the last partition entry and the first partition, to align
63
/// the partition appropriately. The two sectors are for the MBR and the GPT header.
64
const PARTITION_ALIGNMENT_SIZE: usize = GPT_BEGINNING_SIZE as usize
65
- 2 * SECTOR_SIZE as usize
66
- GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize;
67
const HEADER_PADDING_LENGTH: usize = SECTOR_SIZE as usize - GPT_HEADER_SIZE as usize;
68
// Keep all partitions 4k aligned for performance.
69
const PARTITION_SIZE_SHIFT: u8 = 12;
70
71
// From https://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_type_GUIDs.
72
const LINUX_FILESYSTEM_GUID: Uuid = Uuid::from_u128(0x0FC63DAF_8483_4772_8E79_3D69D8477DE4);
73
const EFI_SYSTEM_PARTITION_GUID: Uuid = Uuid::from_u128(0xC12A7328_F81F_11D2_BA4B_00A0C93EC93B);
74
75
#[sorted]
76
#[derive(Error, Debug)]
77
pub enum Error {
78
#[error("failed to use underlying disk: \"{0}\"")]
79
DiskError(Box<crate::Error>),
80
#[error("duplicate GPT partition label \"{0}\"")]
81
DuplicatePartitionLabel(String),
82
#[error("failed to write GPT header: \"{0}\"")]
83
GptError(gpt::Error),
84
#[error("invalid magic header for composite disk format")]
85
InvalidMagicHeader,
86
#[error("invalid partition path {0:?}")]
87
InvalidPath(PathBuf),
88
#[error("failed to parse specification proto: \"{0}\"")]
89
InvalidProto(protobuf::Error),
90
#[error("invalid specification: \"{0}\"")]
91
InvalidSpecification(String),
92
#[error("no image files for partition {0:?}")]
93
NoImageFiles(PartitionInfo),
94
#[error("failed to open component file \"{1}\": \"{0}\"")]
95
OpenFile(io::Error, String),
96
#[error("failed to read specification: \"{0}\"")]
97
ReadSpecificationError(io::Error),
98
#[error("Read-write partition {0:?} size is not a multiple of {multiple}.", multiple = 1 << PARTITION_SIZE_SHIFT)]
99
UnalignedReadWrite(PartitionInfo),
100
#[error("unknown version {0} in specification")]
101
UnknownVersion(u64),
102
#[error("unsupported component disk type \"{0:?}\"")]
103
UnsupportedComponent(ImageType),
104
#[error("failed to write composite disk header: \"{0}\"")]
105
WriteHeader(io::Error),
106
#[error("failed to write specification proto: \"{0}\"")]
107
WriteProto(protobuf::Error),
108
#[error("failed to write zero filler: \"{0}\"")]
109
WriteZeroFiller(io::Error),
110
}
111
112
impl From<gpt::Error> for Error {
113
fn from(e: gpt::Error) -> Self {
114
Self::GptError(e)
115
}
116
}
117
118
pub type Result<T> = std::result::Result<T, Error>;
119
120
#[derive(Debug)]
121
struct ComponentDiskPart {
122
file: Box<dyn DiskFile>,
123
offset: u64, // Location in the block device visible to the guest
124
length: u64,
125
file_offset: u64, // Location within the host file
126
// Whether there have been any writes since the last fsync or fdatasync.
127
needs_flush: AtomicBool,
128
}
129
130
impl ComponentDiskPart {
131
fn range(&self) -> Range<u64> {
132
self.offset..(self.offset + self.length)
133
}
134
}
135
136
/// Represents a composite virtual disk made out of multiple component files. This is described on
137
/// disk by a protocol buffer file that lists out the component file locations and their offsets
138
/// and lengths on the virtual disk. The spaces covered by the component disks must be contiguous
139
/// and not overlapping.
140
#[derive(Debug)]
141
pub struct CompositeDiskFile {
142
component_disks: Vec<ComponentDiskPart>,
143
// We keep the root composite file open so that the file lock is not dropped.
144
_disk_spec_file: File,
145
}
146
147
// TODO(b/271381851): implement `try_clone`. It allows virtio-blk to run multiple workers.
148
impl DiskFile for CompositeDiskFile {}
149
150
fn ranges_overlap(a: &Range<u64>, b: &Range<u64>) -> bool {
151
range_intersection(a, b).is_some()
152
}
153
154
fn range_intersection(a: &Range<u64>, b: &Range<u64>) -> Option<Range<u64>> {
155
let r = Range {
156
start: max(a.start, b.start),
157
end: min(a.end, b.end),
158
};
159
if r.is_empty() {
160
None
161
} else {
162
Some(r)
163
}
164
}
165
166
/// The version of the composite disk format supported by this implementation.
167
const COMPOSITE_DISK_VERSION: u64 = 2;
168
169
/// A magic string placed at the beginning of a composite disk file to identify it.
170
pub const CDISK_MAGIC: &str = "composite_disk\x1d";
171
172
impl CompositeDiskFile {
173
fn new(mut disks: Vec<ComponentDiskPart>, disk_spec_file: File) -> Result<CompositeDiskFile> {
174
disks.sort_by(|d1, d2| d1.offset.cmp(&d2.offset));
175
for s in disks.windows(2) {
176
if s[0].offset == s[1].offset {
177
return Err(Error::InvalidSpecification(format!(
178
"Two disks at offset {}",
179
s[0].offset
180
)));
181
}
182
}
183
Ok(CompositeDiskFile {
184
component_disks: disks,
185
_disk_spec_file: disk_spec_file,
186
})
187
}
188
189
/// Set up a composite disk by reading the specification from a file. The file must consist of
190
/// the CDISK_MAGIC string followed by one binary instance of the CompositeDisk protocol
191
/// buffer. Returns an error if it could not read the file or if the specification was invalid.
192
pub fn from_file(mut file: File, params: DiskFileParams) -> Result<CompositeDiskFile> {
193
file.seek(SeekFrom::Start(0))
194
.map_err(Error::ReadSpecificationError)?;
195
let mut magic_space = [0u8; CDISK_MAGIC.len()];
196
file.read_exact(&mut magic_space[..])
197
.map_err(Error::ReadSpecificationError)?;
198
if magic_space != CDISK_MAGIC.as_bytes() {
199
return Err(Error::InvalidMagicHeader);
200
}
201
let proto: cdisk_spec::CompositeDisk =
202
Message::parse_from_reader(&mut file).map_err(Error::InvalidProto)?;
203
if proto.version > COMPOSITE_DISK_VERSION {
204
return Err(Error::UnknownVersion(proto.version));
205
}
206
let mut disks: Vec<ComponentDiskPart> = proto
207
.component_disks
208
.iter()
209
.map(|disk| {
210
let writable = !params.is_read_only
211
&& disk.read_write_capability
212
== cdisk_spec::ReadWriteCapability::READ_WRITE.into();
213
let component_path = PathBuf::from(&disk.file_path);
214
let path = if component_path.is_relative() || proto.version > 1 {
215
params.path.parent().unwrap().join(component_path)
216
} else {
217
component_path
218
};
219
220
// Note that a read-only parts of a composite disk should NOT be marked sparse,
221
// as the action of marking them sparse is a write. This may seem a little hacky,
222
// and it is; however:
223
// (a) there is not a good way to pass sparseness parameters per composite disk
224
// part (the proto does not have fields for it).
225
// (b) this override of sorts always matches the correct user intent.
226
Ok(ComponentDiskPart {
227
file: open_disk_file(DiskFileParams {
228
path: path.to_owned(),
229
is_read_only: !writable,
230
is_sparse_file: params.is_sparse_file && writable,
231
// TODO: Should pass `params.is_overlapped` through here. Needs testing.
232
is_overlapped: false,
233
is_direct: params.is_direct,
234
lock: params.lock,
235
depth: params.depth + 1,
236
})
237
.map_err(|e| Error::DiskError(Box::new(e)))?,
238
offset: disk.offset,
239
length: 0, // Assigned later
240
file_offset: disk.file_offset,
241
needs_flush: AtomicBool::new(false),
242
})
243
})
244
.collect::<Result<Vec<ComponentDiskPart>>>()?;
245
disks.sort_by(|d1, d2| d1.offset.cmp(&d2.offset));
246
for i in 0..(disks.len() - 1) {
247
let length = disks[i + 1].offset - disks[i].offset;
248
if length == 0 {
249
let text = format!("Two disks at offset {}", disks[i].offset);
250
return Err(Error::InvalidSpecification(text));
251
}
252
if let Some(disk) = disks.get_mut(i) {
253
disk.length = length;
254
} else {
255
let text = format!("Unable to set disk length {length}");
256
return Err(Error::InvalidSpecification(text));
257
}
258
}
259
if let Some(last_disk) = disks.last_mut() {
260
if proto.length <= last_disk.offset {
261
let text = format!(
262
"Full size of disk doesn't match last offset. {} <= {}",
263
proto.length, last_disk.offset
264
);
265
return Err(Error::InvalidSpecification(text));
266
}
267
last_disk.length = proto.length - last_disk.offset;
268
} else {
269
let text = format!("Unable to set last disk length to end at {}", proto.length);
270
return Err(Error::InvalidSpecification(text));
271
}
272
273
CompositeDiskFile::new(disks, file)
274
}
275
276
fn length(&self) -> u64 {
277
if let Some(disk) = self.component_disks.last() {
278
disk.offset + disk.length
279
} else {
280
0
281
}
282
}
283
284
fn disk_at_offset(&self, offset: u64) -> io::Result<&ComponentDiskPart> {
285
self.component_disks
286
.iter()
287
.find(|disk| disk.range().contains(&offset))
288
.ok_or_else(|| {
289
io::Error::new(
290
ErrorKind::InvalidData,
291
format!("no disk at offset {offset}"),
292
)
293
})
294
}
295
}
296
297
impl DiskGetLen for CompositeDiskFile {
298
fn get_len(&self) -> io::Result<u64> {
299
Ok(self.length())
300
}
301
}
302
303
impl FileSetLen for CompositeDiskFile {
304
fn set_len(&self, _len: u64) -> io::Result<()> {
305
Err(io::Error::other("unsupported operation"))
306
}
307
}
308
309
// Implements Read and Write targeting volatile storage for composite disks.
310
//
311
// Note that reads and writes will return early if crossing component disk boundaries.
312
// This is allowed by the read and write specifications, which only say read and write
313
// have to return how many bytes were actually read or written. Use read_exact_volatile
314
// or write_all_volatile to make sure all bytes are received/transmitted.
315
//
316
// If one of the component disks does a partial read or write, that also gets passed
317
// transparently to the parent.
318
impl FileReadWriteAtVolatile for CompositeDiskFile {
319
fn read_at_volatile(&self, slice: VolatileSlice, offset: u64) -> io::Result<usize> {
320
let cursor_location = offset;
321
let disk = self.disk_at_offset(cursor_location)?;
322
let subslice = if cursor_location + slice.size() as u64 > disk.offset + disk.length {
323
let new_size = disk.offset + disk.length - cursor_location;
324
slice
325
.sub_slice(0, new_size as usize)
326
.map_err(|e| io::Error::new(ErrorKind::InvalidData, e.to_string()))?
327
} else {
328
slice
329
};
330
disk.file
331
.read_at_volatile(subslice, cursor_location - disk.offset + disk.file_offset)
332
}
333
fn write_at_volatile(&self, slice: VolatileSlice, offset: u64) -> io::Result<usize> {
334
let cursor_location = offset;
335
let disk = self.disk_at_offset(cursor_location)?;
336
let subslice = if cursor_location + slice.size() as u64 > disk.offset + disk.length {
337
let new_size = disk.offset + disk.length - cursor_location;
338
slice
339
.sub_slice(0, new_size as usize)
340
.map_err(|e| io::Error::new(ErrorKind::InvalidData, e.to_string()))?
341
} else {
342
slice
343
};
344
345
let bytes = disk
346
.file
347
.write_at_volatile(subslice, cursor_location - disk.offset + disk.file_offset)?;
348
disk.needs_flush.store(true, Ordering::SeqCst);
349
Ok(bytes)
350
}
351
}
352
353
impl AsRawDescriptors for CompositeDiskFile {
354
fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {
355
self.component_disks
356
.iter()
357
.flat_map(|d| d.file.as_raw_descriptors())
358
.collect()
359
}
360
}
361
362
struct AsyncComponentDiskPart {
363
file: Box<dyn AsyncDisk>,
364
offset: u64, // Location in the block device visible to the guest
365
length: u64,
366
file_offset: u64, // Location within the host file
367
needs_flush: AtomicBool,
368
}
369
370
pub struct AsyncCompositeDiskFile {
371
component_disks: Vec<AsyncComponentDiskPart>,
372
}
373
374
impl DiskGetLen for AsyncCompositeDiskFile {
375
fn get_len(&self) -> io::Result<u64> {
376
Ok(self.length())
377
}
378
}
379
380
impl FileSetLen for AsyncCompositeDiskFile {
381
fn set_len(&self, _len: u64) -> io::Result<()> {
382
Err(io::Error::other("unsupported operation"))
383
}
384
}
385
386
impl FileAllocate for AsyncCompositeDiskFile {
387
fn allocate(&self, offset: u64, length: u64) -> io::Result<()> {
388
let range = offset..(offset + length);
389
let disks = self
390
.component_disks
391
.iter()
392
.filter(|disk| ranges_overlap(&disk.range(), &range));
393
for disk in disks {
394
if let Some(intersection) = range_intersection(&range, &disk.range()) {
395
disk.file.allocate(
396
intersection.start - disk.offset + disk.file_offset,
397
intersection.end - intersection.start,
398
)?;
399
disk.needs_flush.store(true, Ordering::SeqCst);
400
}
401
}
402
Ok(())
403
}
404
}
405
406
impl ToAsyncDisk for CompositeDiskFile {
407
fn to_async_disk(self: Box<Self>, ex: &Executor) -> crate::Result<Box<dyn AsyncDisk>> {
408
Ok(Box::new(AsyncCompositeDiskFile {
409
component_disks: self
410
.component_disks
411
.into_iter()
412
.map(|disk| -> crate::Result<_> {
413
Ok(AsyncComponentDiskPart {
414
file: disk.file.to_async_disk(ex)?,
415
offset: disk.offset,
416
length: disk.length,
417
file_offset: disk.file_offset,
418
needs_flush: disk.needs_flush,
419
})
420
})
421
.collect::<crate::Result<Vec<_>>>()?,
422
}))
423
}
424
}
425
426
impl AsyncComponentDiskPart {
427
fn range(&self) -> Range<u64> {
428
self.offset..(self.offset + self.length)
429
}
430
431
fn set_needs_flush(&self) {
432
self.needs_flush.store(true, Ordering::SeqCst);
433
}
434
}
435
436
impl AsyncCompositeDiskFile {
437
fn length(&self) -> u64 {
438
if let Some(disk) = self.component_disks.last() {
439
disk.offset + disk.length
440
} else {
441
0
442
}
443
}
444
445
fn disk_at_offset(&self, offset: u64) -> io::Result<&AsyncComponentDiskPart> {
446
self.component_disks
447
.iter()
448
.find(|disk| disk.range().contains(&offset))
449
.ok_or_else(|| {
450
io::Error::new(
451
ErrorKind::InvalidData,
452
format!("no disk at offset {offset}"),
453
)
454
})
455
}
456
457
fn disks_in_range<'a>(&'a self, range: &Range<u64>) -> Vec<&'a AsyncComponentDiskPart> {
458
self.component_disks
459
.iter()
460
.filter(|disk| ranges_overlap(&disk.range(), range))
461
.collect()
462
}
463
}
464
465
#[async_trait(?Send)]
466
impl AsyncDisk for AsyncCompositeDiskFile {
467
async fn flush(&self) -> crate::Result<()> {
468
futures::future::try_join_all(self.component_disks.iter().map(|c| c.file.flush())).await?;
469
Ok(())
470
}
471
472
async fn fsync(&self) -> crate::Result<()> {
473
// NOTE: The fsync implementation isn't really async, so no point in adding concurrency
474
// here unless we introduce a blocking threadpool.
475
for disk in self.component_disks.iter() {
476
if disk.needs_flush.fetch_and(false, Ordering::SeqCst) {
477
if let Err(e) = disk.file.fsync().await {
478
disk.set_needs_flush();
479
return Err(e);
480
}
481
}
482
}
483
Ok(())
484
}
485
486
async fn fdatasync(&self) -> crate::Result<()> {
487
// NOTE: The fdatasync implementation isn't really async, so no point in adding concurrency
488
// here unless we introduce a blocking threadpool.
489
for disk in self.component_disks.iter() {
490
if disk.needs_flush.fetch_and(false, Ordering::SeqCst) {
491
if let Err(e) = disk.file.fdatasync().await {
492
disk.set_needs_flush();
493
return Err(e);
494
}
495
}
496
}
497
Ok(())
498
}
499
500
async fn read_to_mem<'a>(
501
&'a self,
502
file_offset: u64,
503
mem: Arc<dyn BackingMemory + Send + Sync>,
504
mem_offsets: MemRegionIter<'a>,
505
) -> crate::Result<usize> {
506
let disk = self
507
.disk_at_offset(file_offset)
508
.map_err(crate::Error::ReadingData)?;
509
let remaining_disk = disk.offset + disk.length - file_offset;
510
disk.file
511
.read_to_mem(
512
file_offset - disk.offset + disk.file_offset,
513
mem,
514
mem_offsets.take_bytes(remaining_disk.try_into().unwrap()),
515
)
516
.await
517
}
518
519
async fn write_from_mem<'a>(
520
&'a self,
521
file_offset: u64,
522
mem: Arc<dyn BackingMemory + Send + Sync>,
523
mem_offsets: MemRegionIter<'a>,
524
) -> crate::Result<usize> {
525
let disk = self
526
.disk_at_offset(file_offset)
527
.map_err(crate::Error::ReadingData)?;
528
let remaining_disk = disk.offset + disk.length - file_offset;
529
let n = disk
530
.file
531
.write_from_mem(
532
file_offset - disk.offset + disk.file_offset,
533
mem,
534
mem_offsets.take_bytes(remaining_disk.try_into().unwrap()),
535
)
536
.await?;
537
disk.set_needs_flush();
538
Ok(n)
539
}
540
541
async fn punch_hole(&self, file_offset: u64, length: u64) -> crate::Result<()> {
542
let range = file_offset..(file_offset + length);
543
let disks = self.disks_in_range(&range);
544
for disk in disks {
545
if let Some(intersection) = range_intersection(&range, &disk.range()) {
546
disk.file
547
.punch_hole(
548
intersection.start - disk.offset + disk.file_offset,
549
intersection.end - intersection.start,
550
)
551
.await?;
552
disk.set_needs_flush();
553
}
554
}
555
Ok(())
556
}
557
558
async fn write_zeroes_at(&self, file_offset: u64, length: u64) -> crate::Result<()> {
559
let range = file_offset..(file_offset + length);
560
let disks = self.disks_in_range(&range);
561
for disk in disks {
562
if let Some(intersection) = range_intersection(&range, &disk.range()) {
563
disk.file
564
.write_zeroes_at(
565
intersection.start - disk.offset + disk.file_offset,
566
intersection.end - intersection.start,
567
)
568
.await?;
569
disk.set_needs_flush();
570
}
571
}
572
Ok(())
573
}
574
}
575
576
/// Information about a partition to create.
577
#[derive(Clone, Debug, Eq, PartialEq)]
578
pub struct PartitionInfo {
579
pub label: String,
580
pub path: PathBuf,
581
pub partition_type: ImagePartitionType,
582
pub writable: bool,
583
pub size: u64,
584
pub part_guid: Option<Uuid>,
585
}
586
587
impl PartitionInfo {
588
fn aligned_size(&self) -> u64 {
589
self.size.next_multiple_of(1 << PARTITION_SIZE_SHIFT)
590
}
591
}
592
593
/// The type of partition.
594
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
595
pub enum ImagePartitionType {
596
LinuxFilesystem,
597
EfiSystemPartition,
598
}
599
600
impl ImagePartitionType {
601
fn guid(self) -> Uuid {
602
match self {
603
Self::LinuxFilesystem => LINUX_FILESYSTEM_GUID,
604
Self::EfiSystemPartition => EFI_SYSTEM_PARTITION_GUID,
605
}
606
}
607
}
608
609
/// Write protective MBR and primary GPT table.
610
fn write_beginning(
611
file: &mut impl Write,
612
disk_guid: Uuid,
613
partitions: &[u8],
614
partition_entries_crc32: u32,
615
secondary_table_offset: u64,
616
disk_size: u64,
617
) -> Result<()> {
618
// Write the protective MBR to the first sector.
619
write_protective_mbr(file, disk_size)?;
620
621
// Write the GPT header, and pad out to the end of the sector.
622
write_gpt_header(
623
file,
624
disk_guid,
625
partition_entries_crc32,
626
secondary_table_offset,
627
false,
628
)?;
629
file.write_all(&[0; HEADER_PADDING_LENGTH])
630
.map_err(Error::WriteHeader)?;
631
632
// Write partition entries, including unused ones.
633
file.write_all(partitions).map_err(Error::WriteHeader)?;
634
635
// Write zeroes to align the first partition appropriately.
636
file.write_all(&[0; PARTITION_ALIGNMENT_SIZE])
637
.map_err(Error::WriteHeader)?;
638
639
Ok(())
640
}
641
642
/// Write secondary GPT table.
643
fn write_end(
644
file: &mut impl Write,
645
disk_guid: Uuid,
646
partitions: &[u8],
647
partition_entries_crc32: u32,
648
secondary_table_offset: u64,
649
) -> Result<()> {
650
// Write partition entries, including unused ones.
651
file.write_all(partitions).map_err(Error::WriteHeader)?;
652
653
// Write the GPT header, and pad out to the end of the sector.
654
write_gpt_header(
655
file,
656
disk_guid,
657
partition_entries_crc32,
658
secondary_table_offset,
659
true,
660
)?;
661
file.write_all(&[0; HEADER_PADDING_LENGTH])
662
.map_err(Error::WriteHeader)?;
663
664
Ok(())
665
}
666
667
/// Create the `GptPartitionEntry` for the given partition.
668
fn create_gpt_entry(partition: &PartitionInfo, offset: u64) -> GptPartitionEntry {
669
let mut partition_name: Vec<u16> = partition.label.encode_utf16().collect();
670
partition_name.resize(36, 0);
671
672
GptPartitionEntry {
673
partition_type_guid: partition.partition_type.guid(),
674
unique_partition_guid: partition.part_guid.unwrap_or(Uuid::new_v4()),
675
first_lba: offset / SECTOR_SIZE,
676
last_lba: (offset + partition.aligned_size()) / SECTOR_SIZE - 1,
677
attributes: 0,
678
partition_name: partition_name.try_into().unwrap(),
679
}
680
}
681
682
/// Create one or more `ComponentDisk` proto messages for the given partition.
683
fn create_component_disks(
684
partition: &PartitionInfo,
685
offset: u64,
686
zero_filler_path: &str,
687
) -> Result<Vec<ComponentDisk>> {
688
let aligned_size = partition.aligned_size();
689
690
let mut component_disks = vec![ComponentDisk {
691
offset,
692
file_path: partition
693
.path
694
.to_str()
695
.ok_or_else(|| Error::InvalidPath(partition.path.to_owned()))?
696
.to_string(),
697
read_write_capability: if partition.writable {
698
ReadWriteCapability::READ_WRITE.into()
699
} else {
700
ReadWriteCapability::READ_ONLY.into()
701
},
702
..ComponentDisk::new()
703
}];
704
705
if partition.size != aligned_size {
706
if partition.writable {
707
return Err(Error::UnalignedReadWrite(partition.to_owned()));
708
} else {
709
// Fill in the gap by reusing the zero filler file, because we know it is always bigger
710
// than the alignment size. Its size is 1 << PARTITION_SIZE_SHIFT (4k).
711
component_disks.push(ComponentDisk {
712
offset: offset + partition.size,
713
file_path: zero_filler_path.to_owned(),
714
read_write_capability: ReadWriteCapability::READ_ONLY.into(),
715
..ComponentDisk::new()
716
});
717
}
718
}
719
720
Ok(component_disks)
721
}
722
723
/// Create a new composite disk image containing the given partitions, and write it out to the given
724
/// files.
725
pub fn create_composite_disk(
726
partitions: &[PartitionInfo],
727
zero_filler_path: &Path,
728
header_path: &Path,
729
header_file: &mut impl Write,
730
footer_path: &Path,
731
footer_file: &mut impl Write,
732
output_composite: &mut File,
733
) -> Result<()> {
734
let zero_filler_path = zero_filler_path
735
.to_str()
736
.ok_or_else(|| Error::InvalidPath(zero_filler_path.to_owned()))?
737
.to_string();
738
let header_path = header_path
739
.to_str()
740
.ok_or_else(|| Error::InvalidPath(header_path.to_owned()))?
741
.to_string();
742
let footer_path = footer_path
743
.to_str()
744
.ok_or_else(|| Error::InvalidPath(footer_path.to_owned()))?
745
.to_string();
746
747
let mut composite_proto = CompositeDisk::new();
748
composite_proto.version = COMPOSITE_DISK_VERSION;
749
composite_proto.component_disks.push(ComponentDisk {
750
file_path: header_path,
751
offset: 0,
752
read_write_capability: ReadWriteCapability::READ_ONLY.into(),
753
..ComponentDisk::new()
754
});
755
756
// Write partitions to a temporary buffer so that we can calculate the CRC, and construct the
757
// ComponentDisk proto messages at the same time.
758
let mut partitions_buffer =
759
[0u8; GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize];
760
let mut writer: &mut [u8] = &mut partitions_buffer;
761
let mut next_disk_offset = GPT_BEGINNING_SIZE;
762
let mut labels = HashSet::with_capacity(partitions.len());
763
for partition in partitions {
764
let gpt_entry = create_gpt_entry(partition, next_disk_offset);
765
if !labels.insert(gpt_entry.partition_name) {
766
return Err(Error::DuplicatePartitionLabel(partition.label.clone()));
767
}
768
gpt_entry.write_bytes(&mut writer)?;
769
770
for component_disk in
771
create_component_disks(partition, next_disk_offset, &zero_filler_path)?
772
{
773
composite_proto.component_disks.push(component_disk);
774
}
775
776
next_disk_offset += partition.aligned_size();
777
}
778
// The secondary GPT needs to be at the very end of the file, but its size (0x4200) is not
779
// aligned to the chosen partition size (0x1000). We compensate for that by writing some
780
// padding to the start of the footer file.
781
const FOOTER_PADDING: u64 =
782
GPT_END_SIZE.next_multiple_of(1 << PARTITION_SIZE_SHIFT) - GPT_END_SIZE;
783
let footer_file_offset = next_disk_offset;
784
let secondary_table_offset = footer_file_offset + FOOTER_PADDING;
785
let disk_size = secondary_table_offset + GPT_END_SIZE;
786
composite_proto.component_disks.push(ComponentDisk {
787
file_path: footer_path,
788
offset: footer_file_offset,
789
read_write_capability: ReadWriteCapability::READ_ONLY.into(),
790
..ComponentDisk::new()
791
});
792
793
// Calculate CRC32 of partition entries.
794
let mut hasher = Hasher::new();
795
hasher.update(&partitions_buffer);
796
let partition_entries_crc32 = hasher.finalize();
797
798
let disk_guid = Uuid::new_v4();
799
write_beginning(
800
header_file,
801
disk_guid,
802
&partitions_buffer,
803
partition_entries_crc32,
804
secondary_table_offset,
805
disk_size,
806
)?;
807
808
footer_file
809
.write_all(&[0; FOOTER_PADDING as usize])
810
.map_err(Error::WriteHeader)?;
811
write_end(
812
footer_file,
813
disk_guid,
814
&partitions_buffer,
815
partition_entries_crc32,
816
secondary_table_offset,
817
)?;
818
819
composite_proto.length = disk_size;
820
output_composite
821
.write_all(CDISK_MAGIC.as_bytes())
822
.map_err(Error::WriteHeader)?;
823
composite_proto
824
.write_to_writer(output_composite)
825
.map_err(Error::WriteProto)?;
826
827
Ok(())
828
}
829
830
/// Create a zero filler file which can be used to fill the gaps between partition files.
831
/// The filler is sized to be big enough to fill the gaps. (1 << PARTITION_SIZE_SHIFT)
832
pub fn create_zero_filler<P: AsRef<Path>>(zero_filler_path: P) -> Result<()> {
833
let f = OpenOptions::new()
834
.create(true)
835
.read(true)
836
.write(true)
837
.truncate(true)
838
.open(zero_filler_path.as_ref())
839
.map_err(Error::WriteZeroFiller)?;
840
f.set_len(1 << PARTITION_SIZE_SHIFT)
841
.map_err(Error::WriteZeroFiller)
842
}
843
844
#[cfg(test)]
845
mod tests {
846
use std::fs::OpenOptions;
847
use std::io::Write;
848
use std::matches;
849
850
use base::AsRawDescriptor;
851
use tempfile::tempfile;
852
853
use super::*;
854
855
fn new_from_components(disks: Vec<ComponentDiskPart>) -> Result<CompositeDiskFile> {
856
CompositeDiskFile::new(disks, tempfile().unwrap())
857
}
858
859
#[test]
860
fn block_duplicate_offset_disks() {
861
let file1 = tempfile().unwrap();
862
let file2 = tempfile().unwrap();
863
let disk_part1 = ComponentDiskPart {
864
file: Box::new(file1),
865
offset: 0,
866
length: 100,
867
file_offset: 0,
868
needs_flush: AtomicBool::new(false),
869
};
870
let disk_part2 = ComponentDiskPart {
871
file: Box::new(file2),
872
offset: 0,
873
length: 100,
874
file_offset: 0,
875
needs_flush: AtomicBool::new(false),
876
};
877
assert!(new_from_components(vec![disk_part1, disk_part2]).is_err());
878
}
879
880
#[test]
881
fn get_len() {
882
let file1 = tempfile().unwrap();
883
let file2 = tempfile().unwrap();
884
let disk_part1 = ComponentDiskPart {
885
file: Box::new(file1),
886
offset: 0,
887
length: 100,
888
file_offset: 0,
889
needs_flush: AtomicBool::new(false),
890
};
891
let disk_part2 = ComponentDiskPart {
892
file: Box::new(file2),
893
offset: 100,
894
length: 100,
895
file_offset: 0,
896
needs_flush: AtomicBool::new(false),
897
};
898
let composite = new_from_components(vec![disk_part1, disk_part2]).unwrap();
899
let len = composite.get_len().unwrap();
900
assert_eq!(len, 200);
901
}
902
903
#[test]
904
fn async_get_len() {
905
let file1 = tempfile().unwrap();
906
let file2 = tempfile().unwrap();
907
let disk_part1 = ComponentDiskPart {
908
file: Box::new(file1),
909
offset: 0,
910
length: 100,
911
file_offset: 0,
912
needs_flush: AtomicBool::new(false),
913
};
914
let disk_part2 = ComponentDiskPart {
915
file: Box::new(file2),
916
offset: 100,
917
length: 100,
918
file_offset: 0,
919
needs_flush: AtomicBool::new(false),
920
};
921
let composite = new_from_components(vec![disk_part1, disk_part2]).unwrap();
922
923
let ex = Executor::new().unwrap();
924
let composite = Box::new(composite).to_async_disk(&ex).unwrap();
925
let len = composite.get_len().unwrap();
926
assert_eq!(len, 200);
927
}
928
929
#[test]
930
fn single_file_passthrough() {
931
let file = tempfile().unwrap();
932
let disk_part = ComponentDiskPart {
933
file: Box::new(file),
934
offset: 0,
935
length: 100,
936
file_offset: 0,
937
needs_flush: AtomicBool::new(false),
938
};
939
let composite = new_from_components(vec![disk_part]).unwrap();
940
let mut input_memory = [55u8; 5];
941
let input_volatile_memory = VolatileSlice::new(&mut input_memory[..]);
942
composite
943
.write_all_at_volatile(input_volatile_memory, 0)
944
.unwrap();
945
let mut output_memory = [0u8; 5];
946
let output_volatile_memory = VolatileSlice::new(&mut output_memory[..]);
947
composite
948
.read_exact_at_volatile(output_volatile_memory, 0)
949
.unwrap();
950
assert_eq!(input_memory, output_memory);
951
}
952
953
#[test]
954
fn single_file_passthrough_file_offset() {
955
let file = tempfile().unwrap();
956
let mut input_memory = [55u8, 56u8, 57u8, 58u8, 59u8];
957
let input_volatile_memory = VolatileSlice::new(&mut input_memory[..]);
958
file.write_all_at_volatile(input_volatile_memory, 0)
959
.unwrap();
960
961
let disk_part = ComponentDiskPart {
962
file: Box::new(file),
963
offset: 0,
964
length: 100,
965
file_offset: 2,
966
needs_flush: AtomicBool::new(false),
967
};
968
let composite = new_from_components(vec![disk_part]).unwrap();
969
let mut output_memory = [0u8; 3];
970
let output_volatile_memory = VolatileSlice::new(&mut output_memory[..]);
971
composite
972
.read_exact_at_volatile(output_volatile_memory, 0)
973
.unwrap();
974
assert_eq!(input_memory[2..], output_memory);
975
}
976
977
#[test]
978
fn async_single_file_passthrough() {
979
let file = tempfile().unwrap();
980
let disk_part = ComponentDiskPart {
981
file: Box::new(file),
982
offset: 0,
983
length: 100,
984
file_offset: 0,
985
needs_flush: AtomicBool::new(false),
986
};
987
let composite = new_from_components(vec![disk_part]).unwrap();
988
let ex = Executor::new().unwrap();
989
ex.run_until(async {
990
let composite = Box::new(composite).to_async_disk(&ex).unwrap();
991
let expected = [55u8; 5];
992
assert_eq!(
993
composite.write_double_buffered(0, &expected).await.unwrap(),
994
5
995
);
996
let mut buf = [0u8; 5];
997
assert_eq!(
998
composite
999
.read_double_buffered(0, &mut buf[..])
1000
.await
1001
.unwrap(),
1002
5
1003
);
1004
assert_eq!(buf, expected);
1005
})
1006
.unwrap();
1007
}
1008
1009
#[test]
1010
fn async_single_file_passthrough_offset() {
1011
let file = tempfile().unwrap();
1012
let mut input_memory = [55u8, 56u8, 57u8, 58u8, 59u8];
1013
let input_volatile_memory = VolatileSlice::new(&mut input_memory[..]);
1014
file.write_all_at_volatile(input_volatile_memory, 0)
1015
.unwrap();
1016
1017
let disk_part = ComponentDiskPart {
1018
file: Box::new(file),
1019
offset: 0,
1020
length: 100,
1021
file_offset: 2,
1022
needs_flush: AtomicBool::new(false),
1023
};
1024
let composite = new_from_components(vec![disk_part]).unwrap();
1025
let ex = Executor::new().unwrap();
1026
ex.run_until(async {
1027
let composite = Box::new(composite).to_async_disk(&ex).unwrap();
1028
let mut buf = [0u8; 3];
1029
assert_eq!(
1030
composite
1031
.read_double_buffered(0, &mut buf[..])
1032
.await
1033
.unwrap(),
1034
3
1035
);
1036
assert_eq!(input_memory[2..], buf);
1037
})
1038
.unwrap();
1039
}
1040
1041
#[test]
1042
fn triple_file_descriptors() {
1043
let file1 = tempfile().unwrap();
1044
let file2 = tempfile().unwrap();
1045
let file3 = tempfile().unwrap();
1046
let mut in_descriptors = vec![
1047
file1.as_raw_descriptor(),
1048
file2.as_raw_descriptor(),
1049
file3.as_raw_descriptor(),
1050
];
1051
in_descriptors.sort_unstable();
1052
let disk_part1 = ComponentDiskPart {
1053
file: Box::new(file1),
1054
offset: 0,
1055
length: 100,
1056
file_offset: 0,
1057
needs_flush: AtomicBool::new(false),
1058
};
1059
let disk_part2 = ComponentDiskPart {
1060
file: Box::new(file2),
1061
offset: 100,
1062
length: 100,
1063
file_offset: 0,
1064
needs_flush: AtomicBool::new(false),
1065
};
1066
let disk_part3 = ComponentDiskPart {
1067
file: Box::new(file3),
1068
offset: 200,
1069
length: 100,
1070
file_offset: 0,
1071
needs_flush: AtomicBool::new(false),
1072
};
1073
let composite = new_from_components(vec![disk_part1, disk_part2, disk_part3]).unwrap();
1074
let mut out_descriptors = composite.as_raw_descriptors();
1075
out_descriptors.sort_unstable();
1076
assert_eq!(in_descriptors, out_descriptors);
1077
}
1078
1079
#[test]
1080
fn triple_file_passthrough() {
1081
let file1 = tempfile().unwrap();
1082
let file2 = tempfile().unwrap();
1083
let file3 = tempfile().unwrap();
1084
let disk_part1 = ComponentDiskPart {
1085
file: Box::new(file1),
1086
offset: 0,
1087
length: 100,
1088
file_offset: 0,
1089
needs_flush: AtomicBool::new(false),
1090
};
1091
let disk_part2 = ComponentDiskPart {
1092
file: Box::new(file2),
1093
offset: 100,
1094
length: 100,
1095
file_offset: 0,
1096
needs_flush: AtomicBool::new(false),
1097
};
1098
let disk_part3 = ComponentDiskPart {
1099
file: Box::new(file3),
1100
offset: 200,
1101
length: 100,
1102
file_offset: 0,
1103
needs_flush: AtomicBool::new(false),
1104
};
1105
let composite = new_from_components(vec![disk_part1, disk_part2, disk_part3]).unwrap();
1106
let mut input_memory = [55u8; 200];
1107
let input_volatile_memory = VolatileSlice::new(&mut input_memory[..]);
1108
composite
1109
.write_all_at_volatile(input_volatile_memory, 50)
1110
.unwrap();
1111
let mut output_memory = [0u8; 200];
1112
let output_volatile_memory = VolatileSlice::new(&mut output_memory[..]);
1113
composite
1114
.read_exact_at_volatile(output_volatile_memory, 50)
1115
.unwrap();
1116
assert!(input_memory.iter().eq(output_memory.iter()));
1117
}
1118
1119
#[test]
1120
fn async_triple_file_passthrough() {
1121
let file1 = tempfile().unwrap();
1122
let file2 = tempfile().unwrap();
1123
let file3 = tempfile().unwrap();
1124
let disk_part1 = ComponentDiskPart {
1125
file: Box::new(file1),
1126
offset: 0,
1127
length: 100,
1128
file_offset: 0,
1129
needs_flush: AtomicBool::new(false),
1130
};
1131
let disk_part2 = ComponentDiskPart {
1132
file: Box::new(file2),
1133
offset: 100,
1134
length: 100,
1135
file_offset: 0,
1136
needs_flush: AtomicBool::new(false),
1137
};
1138
let disk_part3 = ComponentDiskPart {
1139
file: Box::new(file3),
1140
offset: 200,
1141
length: 100,
1142
file_offset: 0,
1143
needs_flush: AtomicBool::new(false),
1144
};
1145
let composite = new_from_components(vec![disk_part1, disk_part2, disk_part3]).unwrap();
1146
let ex = Executor::new().unwrap();
1147
ex.run_until(async {
1148
let composite = Box::new(composite).to_async_disk(&ex).unwrap();
1149
1150
let expected = [55u8; 200];
1151
assert_eq!(
1152
composite.write_double_buffered(0, &expected).await.unwrap(),
1153
100
1154
);
1155
assert_eq!(
1156
composite
1157
.write_double_buffered(100, &expected[100..])
1158
.await
1159
.unwrap(),
1160
100
1161
);
1162
1163
let mut buf = [0u8; 200];
1164
assert_eq!(
1165
composite
1166
.read_double_buffered(0, &mut buf[..])
1167
.await
1168
.unwrap(),
1169
100
1170
);
1171
assert_eq!(
1172
composite
1173
.read_double_buffered(100, &mut buf[100..])
1174
.await
1175
.unwrap(),
1176
100
1177
);
1178
assert_eq!(buf, expected);
1179
})
1180
.unwrap();
1181
}
1182
1183
#[test]
1184
fn async_triple_file_punch_hole() {
1185
let file1 = tempfile().unwrap();
1186
let file2 = tempfile().unwrap();
1187
let file3 = tempfile().unwrap();
1188
let disk_part1 = ComponentDiskPart {
1189
file: Box::new(file1),
1190
offset: 0,
1191
length: 100,
1192
file_offset: 0,
1193
needs_flush: AtomicBool::new(false),
1194
};
1195
let disk_part2 = ComponentDiskPart {
1196
file: Box::new(file2),
1197
offset: 100,
1198
length: 100,
1199
file_offset: 0,
1200
needs_flush: AtomicBool::new(false),
1201
};
1202
let disk_part3 = ComponentDiskPart {
1203
file: Box::new(file3),
1204
offset: 200,
1205
length: 100,
1206
file_offset: 0,
1207
needs_flush: AtomicBool::new(false),
1208
};
1209
let composite = new_from_components(vec![disk_part1, disk_part2, disk_part3]).unwrap();
1210
let ex = Executor::new().unwrap();
1211
ex.run_until(async {
1212
let composite = Box::new(composite).to_async_disk(&ex).unwrap();
1213
1214
let input = [55u8; 300];
1215
assert_eq!(
1216
composite.write_double_buffered(0, &input).await.unwrap(),
1217
100
1218
);
1219
assert_eq!(
1220
composite
1221
.write_double_buffered(100, &input[100..])
1222
.await
1223
.unwrap(),
1224
100
1225
);
1226
assert_eq!(
1227
composite
1228
.write_double_buffered(200, &input[200..])
1229
.await
1230
.unwrap(),
1231
100
1232
);
1233
1234
composite.punch_hole(50, 200).await.unwrap();
1235
1236
let mut buf = [0u8; 300];
1237
assert_eq!(
1238
composite
1239
.read_double_buffered(0, &mut buf[..])
1240
.await
1241
.unwrap(),
1242
100
1243
);
1244
assert_eq!(
1245
composite
1246
.read_double_buffered(100, &mut buf[100..])
1247
.await
1248
.unwrap(),
1249
100
1250
);
1251
assert_eq!(
1252
composite
1253
.read_double_buffered(200, &mut buf[200..])
1254
.await
1255
.unwrap(),
1256
100
1257
);
1258
1259
let mut expected = input;
1260
expected[50..250].iter_mut().for_each(|x| *x = 0);
1261
assert_eq!(buf, expected);
1262
})
1263
.unwrap();
1264
}
1265
1266
#[test]
1267
fn async_triple_file_write_zeroes() {
1268
let file1 = tempfile().unwrap();
1269
let file2 = tempfile().unwrap();
1270
let file3 = tempfile().unwrap();
1271
let disk_part1 = ComponentDiskPart {
1272
file: Box::new(file1),
1273
offset: 0,
1274
length: 100,
1275
file_offset: 0,
1276
needs_flush: AtomicBool::new(false),
1277
};
1278
let disk_part2 = ComponentDiskPart {
1279
file: Box::new(file2),
1280
offset: 100,
1281
length: 100,
1282
file_offset: 0,
1283
needs_flush: AtomicBool::new(false),
1284
};
1285
let disk_part3 = ComponentDiskPart {
1286
file: Box::new(file3),
1287
offset: 200,
1288
length: 100,
1289
file_offset: 0,
1290
needs_flush: AtomicBool::new(false),
1291
};
1292
let composite = new_from_components(vec![disk_part1, disk_part2, disk_part3]).unwrap();
1293
let ex = Executor::new().unwrap();
1294
ex.run_until(async {
1295
let composite = Box::new(composite).to_async_disk(&ex).unwrap();
1296
1297
let input = [55u8; 300];
1298
assert_eq!(
1299
composite.write_double_buffered(0, &input).await.unwrap(),
1300
100
1301
);
1302
assert_eq!(
1303
composite
1304
.write_double_buffered(100, &input[100..])
1305
.await
1306
.unwrap(),
1307
100
1308
);
1309
assert_eq!(
1310
composite
1311
.write_double_buffered(200, &input[200..])
1312
.await
1313
.unwrap(),
1314
100
1315
);
1316
1317
composite.write_zeroes_at(50, 200).await.unwrap();
1318
1319
let mut buf = [0u8; 300];
1320
assert_eq!(
1321
composite
1322
.read_double_buffered(0, &mut buf[..])
1323
.await
1324
.unwrap(),
1325
100
1326
);
1327
assert_eq!(
1328
composite
1329
.read_double_buffered(100, &mut buf[100..])
1330
.await
1331
.unwrap(),
1332
100
1333
);
1334
assert_eq!(
1335
composite
1336
.read_double_buffered(200, &mut buf[200..])
1337
.await
1338
.unwrap(),
1339
100
1340
);
1341
1342
let mut expected = input;
1343
expected[50..250].iter_mut().for_each(|x| *x = 0);
1344
assert_eq!(buf, expected);
1345
})
1346
.unwrap();
1347
}
1348
1349
// TODO: fsync on a RO file is legal, this test doesn't work as expected. Consider using a mock
1350
// DiskFile to detect the fsync calls.
1351
#[test]
1352
fn async_fsync_skips_unchanged_parts() {
1353
let mut rw_file = tempfile().unwrap();
1354
rw_file.write_all(&[0u8; 100]).unwrap();
1355
rw_file.seek(SeekFrom::Start(0)).unwrap();
1356
let mut ro_disk_image = tempfile::NamedTempFile::new().unwrap();
1357
ro_disk_image.write_all(&[0u8; 100]).unwrap();
1358
let ro_file = OpenOptions::new()
1359
.read(true)
1360
.open(ro_disk_image.path())
1361
.unwrap();
1362
1363
let rw_part = ComponentDiskPart {
1364
file: Box::new(rw_file),
1365
offset: 0,
1366
length: 100,
1367
file_offset: 0,
1368
needs_flush: AtomicBool::new(false),
1369
};
1370
let ro_part = ComponentDiskPart {
1371
file: Box::new(ro_file),
1372
offset: 100,
1373
length: 100,
1374
file_offset: 0,
1375
needs_flush: AtomicBool::new(false),
1376
};
1377
let composite = new_from_components(vec![rw_part, ro_part]).unwrap();
1378
let ex = Executor::new().unwrap();
1379
ex.run_until(async {
1380
let composite = Box::new(composite).to_async_disk(&ex).unwrap();
1381
1382
// Write to the RW part so that some fsync operation will occur.
1383
composite.write_zeroes_at(0, 20).await.unwrap();
1384
1385
// This is the test's assert. fsyncing should NOT touch a read-only disk part. On
1386
// Windows, this would be an error.
1387
composite.fsync().await.expect(
1388
"Failed to fsync composite disk. \
1389
This can happen if the disk writable state is wrong.",
1390
);
1391
})
1392
.unwrap();
1393
}
1394
1395
#[test]
1396
fn beginning_size() {
1397
let mut buffer = vec![];
1398
let partitions = [0u8; GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize];
1399
let disk_size = 1000 * SECTOR_SIZE;
1400
write_beginning(
1401
&mut buffer,
1402
Uuid::from_u128(0x12345678_1234_5678_abcd_12345678abcd),
1403
&partitions,
1404
42,
1405
disk_size - GPT_END_SIZE,
1406
disk_size,
1407
)
1408
.unwrap();
1409
1410
assert_eq!(buffer.len(), GPT_BEGINNING_SIZE as usize);
1411
}
1412
1413
#[test]
1414
fn end_size() {
1415
let mut buffer = vec![];
1416
let partitions = [0u8; GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize];
1417
let disk_size = 1000 * SECTOR_SIZE;
1418
write_end(
1419
&mut buffer,
1420
Uuid::from_u128(0x12345678_1234_5678_abcd_12345678abcd),
1421
&partitions,
1422
42,
1423
disk_size - GPT_END_SIZE,
1424
)
1425
.unwrap();
1426
1427
assert_eq!(buffer.len(), GPT_END_SIZE as usize);
1428
}
1429
1430
/// Creates a composite disk image with no partitions.
1431
#[test]
1432
fn create_composite_disk_empty() {
1433
let mut header_image = tempfile().unwrap();
1434
let mut footer_image = tempfile().unwrap();
1435
let mut composite_image = tempfile().unwrap();
1436
1437
create_composite_disk(
1438
&[],
1439
Path::new("/zero_filler.img"),
1440
Path::new("/header_path.img"),
1441
&mut header_image,
1442
Path::new("/footer_path.img"),
1443
&mut footer_image,
1444
&mut composite_image,
1445
)
1446
.unwrap();
1447
}
1448
1449
/// Creates a composite disk image with two partitions.
1450
#[test]
1451
#[allow(clippy::unnecessary_to_owned)] // false positives
1452
fn create_composite_disk_success() {
1453
fn tmpfile(prefix: &str) -> tempfile::NamedTempFile {
1454
tempfile::Builder::new().prefix(prefix).tempfile().unwrap()
1455
}
1456
1457
let mut header_image = tmpfile("header");
1458
let mut footer_image = tmpfile("footer");
1459
let mut composite_image = tmpfile("composite");
1460
1461
// The test doesn't read these, just needs to be able to open them.
1462
let partition1 = tmpfile("partition1");
1463
let partition2 = tmpfile("partition1");
1464
let zero_filler = tmpfile("zero");
1465
1466
create_composite_disk(
1467
&[
1468
PartitionInfo {
1469
label: "partition1".to_string(),
1470
path: partition1.path().to_path_buf(),
1471
partition_type: ImagePartitionType::LinuxFilesystem,
1472
writable: false,
1473
// Needs small amount of padding.
1474
size: 4000,
1475
part_guid: None,
1476
},
1477
PartitionInfo {
1478
label: "partition2".to_string(),
1479
path: partition2.path().to_path_buf(),
1480
partition_type: ImagePartitionType::LinuxFilesystem,
1481
writable: true,
1482
// Needs no padding.
1483
size: 4096,
1484
part_guid: Some(Uuid::from_u128(0x4049C8DC_6C2B_C740_A95A_BDAA629D4378)),
1485
},
1486
],
1487
zero_filler.path(),
1488
&header_image.path().to_path_buf(),
1489
header_image.as_file_mut(),
1490
&footer_image.path().to_path_buf(),
1491
footer_image.as_file_mut(),
1492
composite_image.as_file_mut(),
1493
)
1494
.unwrap();
1495
1496
// Check magic.
1497
composite_image.rewind().unwrap();
1498
let mut magic_space = [0u8; CDISK_MAGIC.len()];
1499
composite_image.read_exact(&mut magic_space[..]).unwrap();
1500
assert_eq!(magic_space, CDISK_MAGIC.as_bytes());
1501
// Check proto.
1502
let proto = CompositeDisk::parse_from_reader(&mut composite_image).unwrap();
1503
assert_eq!(
1504
proto,
1505
CompositeDisk {
1506
version: 2,
1507
component_disks: vec![
1508
ComponentDisk {
1509
file_path: header_image.path().to_str().unwrap().to_string(),
1510
offset: 0,
1511
read_write_capability: ReadWriteCapability::READ_ONLY.into(),
1512
..ComponentDisk::new()
1513
},
1514
ComponentDisk {
1515
file_path: partition1.path().to_str().unwrap().to_string(),
1516
offset: 0x5000, // GPT_BEGINNING_SIZE,
1517
read_write_capability: ReadWriteCapability::READ_ONLY.into(),
1518
..ComponentDisk::new()
1519
},
1520
ComponentDisk {
1521
file_path: zero_filler.path().to_str().unwrap().to_string(),
1522
offset: 0x5fa0, // GPT_BEGINNING_SIZE + 4000,
1523
read_write_capability: ReadWriteCapability::READ_ONLY.into(),
1524
..ComponentDisk::new()
1525
},
1526
ComponentDisk {
1527
file_path: partition2.path().to_str().unwrap().to_string(),
1528
offset: 0x6000, // GPT_BEGINNING_SIZE + 4096,
1529
read_write_capability: ReadWriteCapability::READ_WRITE.into(),
1530
..ComponentDisk::new()
1531
},
1532
ComponentDisk {
1533
file_path: footer_image.path().to_str().unwrap().to_string(),
1534
offset: 0x7000, // GPT_BEGINNING_SIZE + 4096 + 4096,
1535
read_write_capability: ReadWriteCapability::READ_ONLY.into(),
1536
..ComponentDisk::new()
1537
},
1538
],
1539
length: 0xc000,
1540
..CompositeDisk::new()
1541
}
1542
);
1543
1544
// Open the file as a composite disk and do some basic GPT header/footer validation.
1545
let ex = Executor::new().unwrap();
1546
ex.run_until(async {
1547
let disk = Box::new(
1548
CompositeDiskFile::from_file(
1549
composite_image.into_file(),
1550
DiskFileParams {
1551
path: "/foo".into(),
1552
is_read_only: true,
1553
is_sparse_file: false,
1554
is_overlapped: false,
1555
is_direct: false,
1556
lock: false,
1557
depth: 0,
1558
},
1559
)
1560
.unwrap(),
1561
)
1562
.to_async_disk(&ex)
1563
.unwrap();
1564
1565
let header_offset = SECTOR_SIZE;
1566
let footer_offset = disk.get_len().unwrap() - SECTOR_SIZE;
1567
1568
let mut header_bytes = [0u8; SECTOR_SIZE as usize];
1569
assert_eq!(
1570
disk.read_double_buffered(header_offset, &mut header_bytes[..])
1571
.await
1572
.unwrap(),
1573
SECTOR_SIZE as usize
1574
);
1575
1576
let mut footer_bytes = [0u8; SECTOR_SIZE as usize];
1577
assert_eq!(
1578
disk.read_double_buffered(footer_offset, &mut footer_bytes[..])
1579
.await
1580
.unwrap(),
1581
SECTOR_SIZE as usize
1582
);
1583
1584
// Check the header and footer fields point to each other correctly.
1585
let header_current_lba = u64::from_le_bytes(header_bytes[24..32].try_into().unwrap());
1586
assert_eq!(header_current_lba * SECTOR_SIZE, header_offset);
1587
let header_backup_lba = u64::from_le_bytes(header_bytes[32..40].try_into().unwrap());
1588
assert_eq!(header_backup_lba * SECTOR_SIZE, footer_offset);
1589
1590
let footer_current_lba = u64::from_le_bytes(footer_bytes[24..32].try_into().unwrap());
1591
assert_eq!(footer_current_lba * SECTOR_SIZE, footer_offset);
1592
let footer_backup_lba = u64::from_le_bytes(footer_bytes[32..40].try_into().unwrap());
1593
assert_eq!(footer_backup_lba * SECTOR_SIZE, header_offset);
1594
1595
// Header and footer should be equal if we zero the pointers and CRCs.
1596
header_bytes[16..20].fill(0);
1597
header_bytes[24..40].fill(0);
1598
footer_bytes[16..20].fill(0);
1599
footer_bytes[24..40].fill(0);
1600
assert_eq!(header_bytes, footer_bytes);
1601
})
1602
.unwrap();
1603
}
1604
1605
/// Attempts to create a composite disk image with two partitions with the same label.
1606
#[test]
1607
fn create_composite_disk_duplicate_label() {
1608
let mut header_image = tempfile().unwrap();
1609
let mut footer_image = tempfile().unwrap();
1610
let mut composite_image = tempfile().unwrap();
1611
1612
let result = create_composite_disk(
1613
&[
1614
PartitionInfo {
1615
label: "label".to_string(),
1616
path: "/partition1.img".to_string().into(),
1617
partition_type: ImagePartitionType::LinuxFilesystem,
1618
writable: false,
1619
size: 0,
1620
part_guid: None,
1621
},
1622
PartitionInfo {
1623
label: "label".to_string(),
1624
path: "/partition2.img".to_string().into(),
1625
partition_type: ImagePartitionType::LinuxFilesystem,
1626
writable: true,
1627
size: 0,
1628
part_guid: None,
1629
},
1630
],
1631
Path::new("/zero_filler.img"),
1632
Path::new("/header_path.img"),
1633
&mut header_image,
1634
Path::new("/footer_path.img"),
1635
&mut footer_image,
1636
&mut composite_image,
1637
);
1638
assert!(matches!(result, Err(Error::DuplicatePartitionLabel(label)) if label == "label"));
1639
}
1640
}
1641
1642