Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
google
GitHub Repository: google/crosvm
Path: blob/main/devices/src/virtio/scsi/commands.rs
5394 views
1
// Copyright 2023 The ChromiumOS Authors
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE file.
4
5
use std::cmp;
6
use std::io::Read;
7
use std::io::Write;
8
9
use base::warn;
10
use data_model::Be16;
11
use data_model::Be32;
12
use data_model::Be64;
13
use zerocopy::FromBytes;
14
use zerocopy::Immutable;
15
use zerocopy::IntoBytes;
16
use zerocopy::KnownLayout;
17
use zerocopy::Unaligned;
18
19
use crate::virtio::scsi::constants::INQUIRY;
20
use crate::virtio::scsi::constants::MAINTENANCE_IN;
21
use crate::virtio::scsi::constants::MODE_SELECT_6;
22
use crate::virtio::scsi::constants::MODE_SENSE_6;
23
use crate::virtio::scsi::constants::READ_10;
24
use crate::virtio::scsi::constants::READ_6;
25
use crate::virtio::scsi::constants::READ_CAPACITY_10;
26
use crate::virtio::scsi::constants::READ_CAPACITY_16;
27
use crate::virtio::scsi::constants::REPORT_LUNS;
28
use crate::virtio::scsi::constants::REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS;
29
use crate::virtio::scsi::constants::SERVICE_ACTION_IN_16;
30
use crate::virtio::scsi::constants::SYNCHRONIZE_CACHE_10;
31
use crate::virtio::scsi::constants::TEST_UNIT_READY;
32
use crate::virtio::scsi::constants::TYPE_DISK;
33
use crate::virtio::scsi::constants::UNMAP;
34
use crate::virtio::scsi::constants::WRITE_10;
35
use crate::virtio::scsi::constants::WRITE_SAME_10;
36
use crate::virtio::scsi::constants::WRITE_SAME_16;
37
use crate::virtio::scsi::device::AsyncLogicalUnit;
38
use crate::virtio::scsi::device::ExecuteError;
39
use crate::virtio::Reader;
40
use crate::virtio::Writer;
41
42
/// Parse and execute a SCSI command.
43
pub async fn execute_cdb(
44
cdb: &[u8],
45
reader: &mut Reader,
46
writer: &mut Writer,
47
dev: &AsyncLogicalUnit,
48
) -> Result<(), ExecuteError> {
49
let op = cdb[0];
50
match op {
51
INQUIRY => parse_cdb::<Inquiry>(cdb)?.emulate(writer, dev),
52
MAINTENANCE_IN => execute_maintenance_in(cdb, writer),
53
MODE_SELECT_6 => parse_cdb::<ModeSelect6>(cdb)?.emulate(reader, dev),
54
MODE_SENSE_6 => parse_cdb::<ModeSense6>(cdb)?.emulate(writer, dev),
55
READ_6 => parse_cdb::<Read6>(cdb)?.emulate(writer, dev).await,
56
READ_10 => parse_cdb::<Read10>(cdb)?.emulate(writer, dev).await,
57
READ_CAPACITY_10 => parse_cdb::<ReadCapacity10>(cdb)?.emulate(writer, dev),
58
REPORT_LUNS => parse_cdb::<ReportLuns>(cdb)?.emulate(writer),
59
SERVICE_ACTION_IN_16 => execute_service_action_in_16(cdb, writer, dev),
60
SYNCHRONIZE_CACHE_10 => parse_cdb::<SynchronizeCache10>(cdb)?.emulate(dev).await,
61
TEST_UNIT_READY => parse_cdb::<TestUnitReady>(cdb)?.emulate(),
62
UNMAP => parse_cdb::<Unmap>(cdb)?.emulate(reader, dev).await,
63
WRITE_10 => parse_cdb::<Write10>(cdb)?.emulate(reader, dev).await,
64
WRITE_SAME_10 => parse_cdb::<WriteSame10>(cdb)?.emulate(reader, dev).await,
65
WRITE_SAME_16 => parse_cdb::<WriteSame16>(cdb)?.emulate(reader, dev).await,
66
_ => {
67
warn!("SCSI command {:#x?} is not implemented", op);
68
Err(ExecuteError::Unsupported(op))
69
}
70
}
71
}
72
73
fn execute_maintenance_in(cdb: &[u8], writer: &mut Writer) -> Result<(), ExecuteError> {
74
// Top three bits are reserved.
75
let service_action = cdb[1] & 0x1f;
76
match service_action {
77
REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS => {
78
parse_cdb::<ReportSupportedTMFs>(cdb)?.emulate(writer)
79
}
80
_ => {
81
warn!(
82
"service action {:#x?} for MAINTENANCE_IN is not implemented",
83
service_action
84
);
85
Err(ExecuteError::Unsupported(cdb[0]))
86
}
87
}
88
}
89
90
fn execute_service_action_in_16(
91
cdb: &[u8],
92
writer: &mut Writer,
93
dev: &AsyncLogicalUnit,
94
) -> Result<(), ExecuteError> {
95
// Top three bits are reserved.
96
let service_action = cdb[1] & 0x1f;
97
match service_action {
98
READ_CAPACITY_16 => parse_cdb::<ReadCapacity16>(cdb)?.emulate(writer, dev),
99
_ => {
100
warn!(
101
"service action {:#x?} for SERVICE_ACTION_IN_16 is not implemented",
102
service_action
103
);
104
Err(ExecuteError::Unsupported(cdb[0]))
105
}
106
}
107
}
108
109
fn parse_cdb<T: FromBytes + Unaligned + Immutable + KnownLayout>(
110
cdb: &[u8],
111
) -> Result<&T, ExecuteError> {
112
let (command, _) = T::ref_from_prefix(cdb).map_err(|_| ExecuteError::ReadCommand)?;
113
Ok(command)
114
}
115
116
#[derive(
117
Copy,
118
Clone,
119
Debug,
120
Default,
121
FromBytes,
122
Immutable,
123
IntoBytes,
124
KnownLayout,
125
PartialEq,
126
Eq,
127
Unaligned,
128
)]
129
#[repr(C, packed)]
130
pub struct TestUnitReady {
131
opcode: u8,
132
reserved: [u8; 4],
133
control: u8,
134
}
135
136
impl TestUnitReady {
137
fn emulate(&self) -> Result<(), ExecuteError> {
138
// noop as the device is ready.
139
Ok(())
140
}
141
}
142
143
fn check_lba_range(last_lba: u64, lba: u64, xfer_blocks: usize) -> Result<(), ExecuteError> {
144
// Checking `lba + xfer_blocks - 1 <= last_lba`, but we are being careful about overflows.
145
match lba.checked_add(xfer_blocks as u64) {
146
Some(v) if v <= last_lba + 1 => Ok(()),
147
_ => Err(ExecuteError::LbaOutOfRange {
148
lba,
149
xfer_blocks,
150
last_lba,
151
}),
152
}
153
}
154
155
async fn read_from_disk(
156
writer: &mut Writer,
157
dev: &AsyncLogicalUnit,
158
xfer_blocks: usize,
159
lba: u64,
160
) -> Result<(), ExecuteError> {
161
check_lba_range(dev.last_lba, lba, xfer_blocks)?;
162
let block_size = dev.block_size;
163
let count = xfer_blocks * block_size as usize;
164
let offset = lba * block_size as u64;
165
let before = writer.bytes_written();
166
writer
167
.write_all_from_at_fut(&*dev.disk_image, count, offset)
168
.await
169
.map_err(|desc_error| {
170
let resid = count - (writer.bytes_written() - before);
171
ExecuteError::ReadIo { resid, desc_error }
172
})
173
}
174
175
#[derive(
176
Copy,
177
Clone,
178
Debug,
179
Default,
180
FromBytes,
181
Immutable,
182
IntoBytes,
183
KnownLayout,
184
PartialEq,
185
Eq,
186
Unaligned,
187
)]
188
#[repr(C, packed)]
189
pub struct Read6 {
190
opcode: u8,
191
lba_bytes: [u8; 3],
192
xfer_len_byte: u8,
193
control: u8,
194
}
195
196
impl Read6 {
197
fn lba(&self) -> u32 {
198
u32::from_be_bytes([
199
0,
200
// The top three bits are reserved.
201
self.lba_bytes[0] & 0x1f,
202
self.lba_bytes[1],
203
self.lba_bytes[2],
204
])
205
}
206
207
fn xfer_len(&self) -> usize {
208
// The transfer length set to 0 means 256 blocks should be read.
209
if self.xfer_len_byte == 0 {
210
256
211
} else {
212
self.xfer_len_byte as usize
213
}
214
}
215
216
async fn emulate(
217
&self,
218
writer: &mut Writer,
219
dev: &AsyncLogicalUnit,
220
) -> Result<(), ExecuteError> {
221
let xfer_len = self.xfer_len();
222
let lba = self.lba() as u64;
223
let _trace = cros_tracing::trace_event!(VirtioScsi, "READ(6)", xfer_len, lba);
224
read_from_disk(writer, dev, xfer_len, lba).await
225
}
226
}
227
228
#[derive(
229
Copy,
230
Clone,
231
Debug,
232
Default,
233
FromBytes,
234
Immutable,
235
IntoBytes,
236
KnownLayout,
237
PartialEq,
238
Eq,
239
Unaligned,
240
)]
241
#[repr(C, packed)]
242
pub struct Inquiry {
243
opcode: u8,
244
vpd_field: u8,
245
page_code: u8,
246
alloc_len_bytes: [u8; 2],
247
control: u8,
248
}
249
250
impl Inquiry {
251
fn vital_product_data_enabled(&self) -> bool {
252
self.vpd_field & 0x1 != 0
253
}
254
255
fn alloc_len(&self) -> usize {
256
u16::from_be_bytes(self.alloc_len_bytes) as usize
257
}
258
259
fn page_code(&self) -> u8 {
260
self.page_code
261
}
262
263
fn emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
264
let _trace = cros_tracing::trace_event!(VirtioScsi, "INQUIRY");
265
if self.vital_product_data_enabled() {
266
return self.emulate_vital_product_data_page(writer, dev);
267
}
268
// PAGE CODE should be 0 when vpd bit is 0.
269
if self.page_code() != 0 {
270
return Err(ExecuteError::InvalidField);
271
}
272
let alloc_len = self.alloc_len();
273
let mut outbuf = vec![0u8; cmp::max(writer.available_bytes(), alloc_len)];
274
// Peripheral
275
outbuf[0] = TYPE_DISK;
276
// Removable bit. We currently do not support removable SCSI devices.
277
outbuf[1] = 0x0;
278
// Version 0x5 indicates that the device complies to SPC-3.
279
outbuf[2] = 0x5;
280
// Hierarchical Support | Response Data Format
281
// Support hierarchical addressing mode to assign LUNs to logical units.
282
// Response Data Format should be 2.
283
outbuf[3] = 0x10 | 0x2;
284
// Additional Length
285
outbuf[4] = {
286
let buflen = outbuf.len().try_into().unwrap_or(u8::MAX);
287
// We will write at least 36 bytes and this is the 5th byte.
288
cmp::max(buflen, 36) - 5
289
};
290
// Cmdque: support full task management mode
291
outbuf[7] = 0x2;
292
// Vendor
293
Self::fill_left_aligned_ascii(&mut outbuf[8..16], "CROSVM");
294
// Product ID
295
Self::fill_left_aligned_ascii(&mut outbuf[16..32], "CROSVM HARDDISK");
296
// Product revision level
297
Self::fill_left_aligned_ascii(&mut outbuf[32..36], "0.1");
298
299
writer
300
.write_all(&outbuf[..alloc_len])
301
.map_err(ExecuteError::Write)
302
}
303
304
fn emulate_vital_product_data_page(
305
&self,
306
writer: &mut Writer,
307
dev: &AsyncLogicalUnit,
308
) -> Result<(), ExecuteError> {
309
let alloc_len = self.alloc_len();
310
let mut outbuf = vec![0u8; cmp::max(4096, alloc_len)];
311
// Peripheral
312
outbuf[0] = TYPE_DISK;
313
let page_code = self.page_code();
314
outbuf[1] = page_code;
315
match page_code {
316
// Supported VPD Pages
317
0x00 => {
318
// outbuf[2] byte is reserved.
319
// 0x00: Supported VPD Pages (this command)
320
// 0x83: Device Identification
321
// 0xb0: Block Limits
322
// 0xb2: Logical Block Provisioning
323
const SUPPORTED_VPD_PAGE_CODES: [u8; 4] = [0x00, 0x83, 0xb0, 0xb2];
324
let page_code_len: u8 = SUPPORTED_VPD_PAGE_CODES
325
.len()
326
.try_into()
327
.expect("The number of vpd page codes cannot exceed u8::MAX");
328
// Page legth
329
outbuf[3] = page_code_len;
330
outbuf[4..4 + page_code_len as usize].copy_from_slice(&SUPPORTED_VPD_PAGE_CODES);
331
}
332
// Device Identification
333
0x83 => {
334
const DEVICE_ID: &[u8] = b"CROSVM SCSI DEVICE";
335
let device_id_len: u8 = DEVICE_ID
336
.len()
337
.try_into()
338
.expect("device id should be shorter");
339
// Page length: An identification descriptor will be 4 bytes followed by an id.
340
outbuf[2..4].copy_from_slice(&(4 + device_id_len as u16).to_be_bytes());
341
// ASCII
342
outbuf[4] = 0x2;
343
// ASSOCIATION | IDENTIFICATION_TYPE_FIELD
344
// ASSOCIATION: device_id is associated with the addressed logical unit.
345
// IDENTIFICATION_TYPE_FIELD: vendor specific
346
// outbuf[5] = 0x0 | 0x0;
347
// outbuf[6] byte is reserved.
348
outbuf[7] = device_id_len;
349
outbuf[8..8 + device_id_len as usize].copy_from_slice(DEVICE_ID);
350
}
351
// Block Limits
352
0xb0 => {
353
// Page length
354
outbuf[3] = 0x3c;
355
// We do not support a value of zero in the NUMBER OF LOGICAL BLOCKS field in the
356
// WRITE SAME command CDBs.
357
outbuf[4] = 1;
358
// skip outbuf[5]: crosvm does not support the COMPARE AND WRITE command.
359
// Maximum transfer length
360
outbuf[8..12].copy_from_slice(
361
&(dev.last_lba + 1)
362
.try_into()
363
.unwrap_or(u32::MAX)
364
.to_be_bytes(),
365
);
366
// Maximum unmap LBA count
367
outbuf[20..24].fill(0xff);
368
// Maximum unmap block descriptor count
369
outbuf[24..28].fill(0xff);
370
// Optimal unmap granularity
371
outbuf[28..32].copy_from_slice(&128u32.to_be_bytes());
372
// Maximum WRITE SAME length
373
outbuf[36..44].copy_from_slice(&(dev.last_lba + 1).to_be_bytes());
374
}
375
// Logical Block Provisioning
376
0xb2 => {
377
// Page length
378
outbuf[3] = 4;
379
// skip outbuf[4]: crosvm does not support logical block provisioning threshold
380
// sets.
381
const UNMAP: u8 = 1 << 7;
382
const WRITE_SAME_16: u8 = 1 << 6;
383
const WRITE_SAME_10: u8 = 1 << 5;
384
outbuf[5] = UNMAP | WRITE_SAME_10 | WRITE_SAME_16;
385
// The logical unit is thin-provisioned.
386
outbuf[6] = 0x02;
387
// skip outbuf[7]: The logical block data represented by unmapped LBAs is vendor
388
// specific
389
}
390
_ => {
391
warn!("unsupported vpd page code: {:#x?}", page_code);
392
return Err(ExecuteError::InvalidField);
393
}
394
};
395
writer
396
.write_all(&outbuf[..alloc_len])
397
.map_err(ExecuteError::Write)
398
}
399
400
fn fill_left_aligned_ascii(buf: &mut [u8], s: &str) {
401
debug_assert!(s.len() < buf.len());
402
buf[..s.len()].copy_from_slice(s.as_bytes());
403
buf[s.len()..].fill(b' ');
404
}
405
}
406
407
// Fill in the information of the page code and return the number of bytes written to the buffer.
408
fn fill_mode_page(
409
page_code: u8,
410
subpage_code: u8,
411
page_control: PageControl,
412
outbuf: &mut [u8],
413
) -> Option<u8> {
414
// outbuf[0]: page code
415
// outbuf[1]: page length
416
match (page_code, subpage_code) {
417
// Vendor specific.
418
(0x00, 0x00) => None,
419
// Read-Write error recovery mode page
420
(0x01, 0x00) => {
421
const LEN: u8 = 10;
422
outbuf[0] = page_code;
423
outbuf[1] = LEN;
424
if page_control != PageControl::Changable {
425
// Automatic write reallocation enabled.
426
outbuf[3] = 0x80;
427
}
428
Some(LEN + 2)
429
}
430
// Caching.
431
(0x08, 0x00) => {
432
const LEN: u8 = 0x12;
433
outbuf[0] = page_code;
434
outbuf[1] = LEN;
435
if page_control != PageControl::Changable {
436
// Writeback cache enabled.
437
outbuf[2] = 0x04;
438
}
439
Some(LEN + 2)
440
}
441
_ => None,
442
}
443
}
444
445
// According to the spec, devices that implement MODE SENSE(6) shall also implement MODE SELECT(6)
446
// as well.
447
#[derive(
448
Copy,
449
Clone,
450
Debug,
451
Default,
452
FromBytes,
453
Immutable,
454
IntoBytes,
455
KnownLayout,
456
PartialEq,
457
Eq,
458
Unaligned,
459
)]
460
#[repr(C, packed)]
461
pub struct ModeSelect6 {
462
opcode: u8,
463
pf_sp_field: u8,
464
_reserved: [u8; 2],
465
param_list_len: u8,
466
control: u8,
467
}
468
469
impl ModeSelect6 {
470
fn is_valid_pf_and_sp(&self) -> bool {
471
// crosvm only support page format bit = 1 and saved pages bit = 0
472
self.pf_sp_field & 0x11 == 0x10
473
}
474
475
fn emulate(&self, reader: &mut Reader, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
476
#[derive(
477
Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
478
)]
479
#[repr(C, packed)]
480
struct BlockDescriptor {
481
_density: u8,
482
_number_of_blocks_field: [u8; 3],
483
_reserved: u8,
484
block_len_field: [u8; 3],
485
}
486
487
impl BlockDescriptor {
488
fn block_len(&self) -> u32 {
489
u32::from_be_bytes([
490
0,
491
self.block_len_field[0],
492
self.block_len_field[1],
493
self.block_len_field[2],
494
])
495
}
496
}
497
498
let _trace = cros_tracing::trace_event!(VirtioScsi, "MODE_SELECT(6)");
499
if !self.is_valid_pf_and_sp() {
500
return Err(ExecuteError::InvalidField);
501
}
502
// Values for the mode parameter header.
503
let [_mode_data_len, medium_type, _dev_param, block_desc_len] =
504
reader.read_obj::<[u8; 4]>().map_err(ExecuteError::Read)?;
505
if medium_type != TYPE_DISK {
506
return Err(ExecuteError::InvalidField);
507
}
508
match block_desc_len {
509
0 => (),
510
8 => {
511
let block_desc = reader
512
.read_obj::<BlockDescriptor>()
513
.map_err(ExecuteError::Read)?;
514
// crosvm currently does not support modifying the block size.
515
if block_desc.block_len() != dev.block_size {
516
return Err(ExecuteError::InvalidField);
517
}
518
}
519
// crosvm does not support 2 or more block descriptors, hence block_desc_len other than
520
// 0 and 8 is considered invalid.
521
_ => return Err(ExecuteError::InvalidField),
522
};
523
while reader.available_bytes() > 0 {
524
Self::handle_mode_page(reader)?;
525
}
526
Ok(())
527
}
528
529
fn handle_mode_page(reader: &mut Reader) -> Result<(), ExecuteError> {
530
#[derive(
531
Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
532
)]
533
#[repr(C, packed)]
534
struct Page0Header {
535
page_code: u8,
536
page_len: u8,
537
}
538
539
#[derive(
540
Copy, Clone, Debug, Default, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq, Eq,
541
)]
542
#[repr(C, packed)]
543
struct SubpageHeader {
544
page_code: u8,
545
subpage_code: u8,
546
page_len_field: [u8; 2],
547
}
548
549
let is_page0 = reader.peek_obj::<u8>().map_err(ExecuteError::Read)? & 0x40 == 0;
550
let (page_code, subpage_code, page_len) = if is_page0 {
551
let header = reader
552
.read_obj::<Page0Header>()
553
.map_err(ExecuteError::Read)?;
554
(header.page_code, 0, header.page_len as u16)
555
} else {
556
let header = reader
557
.read_obj::<SubpageHeader>()
558
.map_err(ExecuteError::Read)?;
559
(
560
header.page_code,
561
header.subpage_code,
562
u16::from_be_bytes(header.page_len_field),
563
)
564
};
565
let mut outbuf = vec![0; page_len as usize];
566
fill_mode_page(page_code, subpage_code, PageControl::Current, &mut outbuf);
567
let mut input = vec![0; page_len as usize];
568
reader.read_exact(&mut input).map_err(ExecuteError::Read)?;
569
// crosvm does not allow any values to be changed.
570
if input == outbuf {
571
Ok(())
572
} else {
573
Err(ExecuteError::InvalidField)
574
}
575
}
576
}
577
578
#[derive(
579
Copy,
580
Clone,
581
Debug,
582
Default,
583
FromBytes,
584
Immutable,
585
IntoBytes,
586
KnownLayout,
587
PartialEq,
588
Eq,
589
Unaligned,
590
)]
591
#[repr(C, packed)]
592
pub struct ModeSense6 {
593
opcode: u8,
594
dbd_field: u8,
595
page_control_and_page_code: u8,
596
subpage_code: u8,
597
alloc_len: u8,
598
control: u8,
599
}
600
601
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
602
enum PageControl {
603
Current,
604
Default,
605
Changable,
606
}
607
608
impl ModeSense6 {
609
fn alloc_len(&self) -> usize {
610
self.alloc_len as usize
611
}
612
613
fn disable_block_desc(&self) -> bool {
614
self.dbd_field & 0x8 != 0
615
}
616
617
fn page_code(&self) -> u8 {
618
// The top two bits represents page control field, and the rest is page code.
619
self.page_control_and_page_code & 0x3f
620
}
621
622
fn page_control(&self) -> Result<PageControl, ExecuteError> {
623
match self.page_control_and_page_code >> 6 {
624
0 => Ok(PageControl::Current),
625
1 => Ok(PageControl::Changable),
626
2 => Ok(PageControl::Default),
627
3 => Err(ExecuteError::SavingParamNotSupported),
628
_ => Err(ExecuteError::InvalidField),
629
}
630
}
631
632
fn subpage_code(&self) -> u8 {
633
self.subpage_code
634
}
635
636
fn emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
637
let _trace = cros_tracing::trace_event!(VirtioScsi, "MODE_SENSE(6)");
638
let alloc_len = self.alloc_len();
639
let mut outbuf = vec![0u8; cmp::max(4096, alloc_len)];
640
// outbuf[0]: Represents data length. Will be filled later.
641
// outbuf[1]: Medium type should be 0.
642
643
// Device specific parameter
644
// We do not support the disabled page out (DPO) and forced unit access (FUA) bit.
645
outbuf[2] = if dev.read_only { 0x80 } else { 0x00 };
646
let mut idx = if !self.disable_block_desc() {
647
// Block descriptor length.
648
outbuf[3] = 8;
649
// outbuf[4]: Density code is 0.
650
let sectors = dev.last_lba + 1;
651
// Fill in the number of sectors if not bigger than 0xffffff, leave it with 0
652
// otherwise.
653
if sectors <= 0xffffff {
654
outbuf[5..8].copy_from_slice(&(sectors as u32).to_be_bytes()[1..]);
655
}
656
// outbuf[8]: reserved.
657
outbuf[9..12].copy_from_slice(&dev.block_size.to_be_bytes()[1..]);
658
12
659
} else {
660
4
661
};
662
663
let page_control = self.page_control()?;
664
let page_code = self.page_code();
665
let subpage_code = self.subpage_code();
666
// The pair of the page code and the subpage code specifies which mode pages and subpages
667
// to return. Refer to the Table 99 in the SPC-3 spec for more details:
668
// <https://www.t10.org/cgi-bin/ac.pl?t=f&f=spc3r23.pdf>
669
match (page_code, subpage_code) {
670
// Return all mode pages with subpage 0.
671
(0x3f, 0x00) => {
672
Self::add_all_page_codes(subpage_code, page_control, &mut outbuf, &mut idx)
673
}
674
// Return all mode pages with subpages 0x00-0xfe.
675
(0x3f, 0xff) => {
676
for subpage_code in 0..0xff {
677
Self::add_all_page_codes(subpage_code, page_control, &mut outbuf, &mut idx)
678
}
679
}
680
// subpage_code other than 0x00 or 0xff are reserved.
681
(0x3f, _) => return Err(ExecuteError::InvalidField),
682
// Return a specific mode page with subpages 0x00-0xfe.
683
(_, 0xff) => {
684
for subpage_code in 0..0xff {
685
match fill_mode_page(
686
page_code,
687
subpage_code,
688
page_control,
689
&mut outbuf[idx as usize..],
690
) {
691
Some(n) => idx += n,
692
None => return Err(ExecuteError::InvalidField),
693
};
694
}
695
}
696
(_, _) => {
697
match fill_mode_page(
698
page_code,
699
subpage_code,
700
page_control,
701
&mut outbuf[idx as usize..],
702
) {
703
Some(n) => idx += n,
704
None => return Err(ExecuteError::InvalidField),
705
};
706
}
707
};
708
outbuf[0] = idx - 1;
709
writer
710
.write_all(&outbuf[..alloc_len])
711
.map_err(ExecuteError::Write)
712
}
713
714
// Fill in mode pages with a specific subpage_code.
715
fn add_all_page_codes(
716
subpage_code: u8,
717
page_control: PageControl,
718
outbuf: &mut [u8],
719
idx: &mut u8,
720
) {
721
for page_code in 1..0x3f {
722
if let Some(n) = fill_mode_page(
723
page_code,
724
subpage_code,
725
page_control,
726
&mut outbuf[*idx as usize..],
727
) {
728
*idx += n;
729
}
730
}
731
// Add mode page 0 after all other mode pages were returned.
732
if let Some(n) = fill_mode_page(0, subpage_code, page_control, &mut outbuf[*idx as usize..])
733
{
734
*idx += n;
735
}
736
}
737
}
738
739
#[derive(
740
Copy,
741
Clone,
742
Debug,
743
Default,
744
FromBytes,
745
Immutable,
746
IntoBytes,
747
KnownLayout,
748
PartialEq,
749
Eq,
750
Unaligned,
751
)]
752
#[repr(C, packed)]
753
pub struct ReadCapacity10 {
754
opcode: u8,
755
_obsolete1: u8,
756
_obsolete2: [u8; 4],
757
_reserved: [u8; 2],
758
_obsolete3: u8,
759
control: u8,
760
}
761
762
impl ReadCapacity10 {
763
fn emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
764
// Returned value is the block address of the last sector.
765
// If the block address exceeds u32::MAX, we return u32::MAX.
766
let block_address: u32 = dev.last_lba.try_into().unwrap_or(u32::MAX);
767
let mut outbuf = [0u8; 8];
768
outbuf[..4].copy_from_slice(&block_address.to_be_bytes());
769
outbuf[4..8].copy_from_slice(&dev.block_size.to_be_bytes());
770
writer.write_all(&outbuf).map_err(ExecuteError::Write)
771
}
772
}
773
774
#[derive(
775
Copy,
776
Clone,
777
Debug,
778
Default,
779
FromBytes,
780
Immutable,
781
IntoBytes,
782
KnownLayout,
783
PartialEq,
784
Eq,
785
Unaligned,
786
)]
787
#[repr(C, packed)]
788
pub struct ReadCapacity16 {
789
opcode: u8,
790
service_action_field: u8,
791
_obsolete: [u8; 8],
792
alloc_len_bytes: [u8; 4],
793
_reserved: u8,
794
control: u8,
795
}
796
797
impl ReadCapacity16 {
798
fn emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
799
let _trace = cros_tracing::trace_event!(VirtioScsi, "READ_CAPACITY(16)");
800
let mut outbuf = [0u8; 32];
801
// Last logical block address
802
outbuf[..8].copy_from_slice(&dev.last_lba.to_be_bytes());
803
// Block size
804
outbuf[8..12].copy_from_slice(&dev.block_size.to_be_bytes());
805
// crosvm implements logical block provisioning management.
806
outbuf[14] = 1 << 7;
807
writer.write_all(&outbuf).map_err(ExecuteError::Write)
808
}
809
}
810
811
#[derive(
812
Copy,
813
Clone,
814
Debug,
815
Default,
816
FromBytes,
817
Immutable,
818
IntoBytes,
819
KnownLayout,
820
PartialEq,
821
Eq,
822
Unaligned,
823
)]
824
#[repr(C, packed)]
825
pub struct Read10 {
826
opcode: u8,
827
rdprotect: u8,
828
lba_bytes: [u8; 4],
829
group_number: u8,
830
xfer_len_bytes: [u8; 2],
831
control: u8,
832
}
833
834
impl Read10 {
835
fn xfer_len(&self) -> usize {
836
u16::from_be_bytes(self.xfer_len_bytes) as usize
837
}
838
839
fn lba(&self) -> u64 {
840
u32::from_be_bytes(self.lba_bytes) as u64
841
}
842
843
async fn emulate(
844
&self,
845
writer: &mut Writer,
846
dev: &AsyncLogicalUnit,
847
) -> Result<(), ExecuteError> {
848
let xfer_len = self.xfer_len();
849
let lba = self.lba();
850
let _trace = cros_tracing::trace_event!(VirtioScsi, "READ(10)", lba, xfer_len);
851
read_from_disk(writer, dev, xfer_len, lba).await
852
}
853
}
854
855
#[derive(
856
Copy,
857
Clone,
858
Debug,
859
Default,
860
FromBytes,
861
Immutable,
862
IntoBytes,
863
KnownLayout,
864
PartialEq,
865
Eq,
866
Unaligned,
867
)]
868
#[repr(C, packed)]
869
pub struct Write10 {
870
opcode: u8,
871
wrprotect: u8,
872
lba_bytes: [u8; 4],
873
group_number: u8,
874
xfer_len_bytes: [u8; 2],
875
control: u8,
876
}
877
878
impl Write10 {
879
fn lba(&self) -> u64 {
880
u32::from_be_bytes(self.lba_bytes) as u64
881
}
882
883
fn xfer_len(&self) -> usize {
884
u16::from_be_bytes(self.xfer_len_bytes) as usize
885
}
886
887
async fn emulate(
888
&self,
889
reader: &mut Reader,
890
dev: &AsyncLogicalUnit,
891
) -> Result<(), ExecuteError> {
892
let xfer_len = self.xfer_len();
893
let lba = self.lba();
894
let _trace = cros_tracing::trace_event!(VirtioScsi, "WRITE(10)", lba, xfer_len);
895
write_to_disk(reader, dev, xfer_len, lba).await
896
}
897
}
898
899
async fn write_to_disk(
900
reader: &mut Reader,
901
dev: &AsyncLogicalUnit,
902
xfer_blocks: usize,
903
lba: u64,
904
) -> Result<(), ExecuteError> {
905
if dev.read_only {
906
return Err(ExecuteError::ReadOnly);
907
}
908
check_lba_range(dev.last_lba, lba, xfer_blocks)?;
909
let block_size = dev.block_size;
910
let count = xfer_blocks * block_size as usize;
911
let offset = lba * block_size as u64;
912
let before = reader.bytes_read();
913
reader
914
.read_exact_to_at_fut(&*dev.disk_image, count, offset)
915
.await
916
.map_err(|desc_error| {
917
let resid = count - (reader.bytes_read() - before);
918
ExecuteError::WriteIo { resid, desc_error }
919
})
920
}
921
922
#[derive(
923
Copy,
924
Clone,
925
Debug,
926
Default,
927
FromBytes,
928
Immutable,
929
IntoBytes,
930
KnownLayout,
931
PartialEq,
932
Eq,
933
Unaligned,
934
)]
935
#[repr(C, packed)]
936
pub struct SynchronizeCache10 {
937
opcode: u8,
938
immed_byte: u8,
939
lba_bytes: [u8; 4],
940
group_number: u8,
941
block_num_bytes: [u8; 2],
942
control: u8,
943
}
944
945
impl SynchronizeCache10 {
946
async fn emulate(&self, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
947
let _trace = cros_tracing::trace_event!(VirtioScsi, "SYNCHRONIZE_CACHE(10)");
948
if dev.read_only {
949
return Err(ExecuteError::ReadOnly);
950
}
951
dev.disk_image.fdatasync().await.map_err(|e| {
952
warn!("failed to sync: {e}");
953
ExecuteError::SynchronizationError
954
})
955
}
956
}
957
958
async fn unmap(dev: &AsyncLogicalUnit, lba: u64, nblocks: u64) -> Result<(), ExecuteError> {
959
check_lba_range(dev.last_lba, lba, nblocks as usize)?;
960
let offset = lba * dev.block_size as u64;
961
let length = nblocks * dev.block_size as u64;
962
// Ignore the errors here since the device is not strictly required to unmap the LBAs.
963
let _ = dev.disk_image.punch_hole(offset, length).await;
964
Ok(())
965
}
966
967
async fn write_same(
968
dev: &AsyncLogicalUnit,
969
lba: u64,
970
nblocks: u64,
971
reader: &mut Reader,
972
) -> Result<(), ExecuteError> {
973
check_lba_range(dev.last_lba, lba, nblocks as usize)?;
974
// The WRITE SAME command expects the device to transfer a single logical block from the
975
// Data-Out buffer.
976
reader.split_at(dev.block_size as usize);
977
if reader.get_remaining().iter().all(|s| s.is_all_zero()) {
978
let block_size = dev.block_size as u64;
979
// Ignore the errors here since the device is not strictly required to unmap the LBAs.
980
let _ = dev
981
.disk_image
982
.write_zeroes_at(lba * block_size, nblocks * block_size)
983
.await;
984
Ok(())
985
} else {
986
// TODO(b/309376528): If the specified data is not zero, raise error for now.
987
Err(ExecuteError::InvalidField)
988
}
989
}
990
991
#[derive(
992
Copy,
993
Clone,
994
Debug,
995
Default,
996
FromBytes,
997
Immutable,
998
IntoBytes,
999
KnownLayout,
1000
PartialEq,
1001
Eq,
1002
Unaligned,
1003
)]
1004
#[repr(C, packed)]
1005
pub struct WriteSame10 {
1006
opcode: u8,
1007
wrprotect_anchor_unmap: u8,
1008
lba_bytes: [u8; 4],
1009
group_number_field: u8,
1010
nblocks_bytes: [u8; 2],
1011
control: u8,
1012
}
1013
1014
impl WriteSame10 {
1015
fn lba(&self) -> u32 {
1016
u32::from_be_bytes(self.lba_bytes)
1017
}
1018
1019
fn nblocks(&self) -> u16 {
1020
u16::from_be_bytes(self.nblocks_bytes)
1021
}
1022
1023
fn unmap(&self) -> bool {
1024
self.wrprotect_anchor_unmap & 0x8 != 0
1025
}
1026
1027
fn anchor(&self) -> bool {
1028
self.wrprotect_anchor_unmap & 0x10 != 0
1029
}
1030
1031
async fn emulate(
1032
&self,
1033
reader: &mut Reader,
1034
dev: &AsyncLogicalUnit,
1035
) -> Result<(), ExecuteError> {
1036
let lba = self.lba() as u64;
1037
let nblocks = self.nblocks() as u64;
1038
let _trace = cros_tracing::trace_event!(VirtioScsi, "WRITE_SAME(10)", lba, nblocks);
1039
if dev.read_only {
1040
return Err(ExecuteError::ReadOnly);
1041
}
1042
if nblocks == 0 {
1043
// crosvm does not allow the number of blocks to be zero.
1044
return Err(ExecuteError::InvalidField);
1045
}
1046
if self.anchor() {
1047
// crosvm currently does not support anchor operations.
1048
return Err(ExecuteError::InvalidField);
1049
}
1050
if self.unmap() {
1051
unmap(dev, lba, nblocks).await
1052
} else {
1053
write_same(dev, lba, nblocks, reader).await
1054
}
1055
}
1056
}
1057
1058
#[derive(
1059
Copy,
1060
Clone,
1061
Debug,
1062
Default,
1063
FromBytes,
1064
Immutable,
1065
IntoBytes,
1066
KnownLayout,
1067
PartialEq,
1068
Eq,
1069
Unaligned,
1070
)]
1071
#[repr(C, packed)]
1072
pub struct Unmap {
1073
opcode: u8,
1074
anchor_field: u8,
1075
_reserved: [u8; 4],
1076
group_number_field: u8,
1077
param_list_len_bytes: [u8; 2],
1078
control: u8,
1079
}
1080
1081
impl Unmap {
1082
fn anchor(&self) -> bool {
1083
self.anchor_field & 0x01 != 0
1084
}
1085
1086
fn param_list_len(&self) -> u16 {
1087
u16::from_be_bytes(self.param_list_len_bytes)
1088
}
1089
1090
async fn emulate(
1091
&self,
1092
reader: &mut Reader,
1093
dev: &AsyncLogicalUnit,
1094
) -> Result<(), ExecuteError> {
1095
let _trace = cros_tracing::trace_event!(VirtioScsi, "UNMAP");
1096
// Reject anchor == 1
1097
if self.anchor() {
1098
return Err(ExecuteError::InvalidField);
1099
}
1100
if dev.read_only {
1101
return Err(ExecuteError::ReadOnly);
1102
}
1103
let param_list_len = self.param_list_len();
1104
if 0 < param_list_len && param_list_len < 8 {
1105
return Err(ExecuteError::InvalidParamLen);
1106
}
1107
// unmap data len
1108
reader.consume(2);
1109
let unmap_block_descriptors = {
1110
let block_data_len = reader
1111
.read_obj::<Be16>()
1112
.map_err(ExecuteError::Read)?
1113
.to_native();
1114
// If the data length is not a multiple of 16, the last unmap block should be ignored.
1115
block_data_len / 16
1116
};
1117
// reserved
1118
reader.consume(4);
1119
for _ in 0..unmap_block_descriptors {
1120
let lba = reader
1121
.read_obj::<Be64>()
1122
.map_err(ExecuteError::Read)?
1123
.to_native();
1124
let nblocks = reader
1125
.read_obj::<Be32>()
1126
.map_err(ExecuteError::Read)?
1127
.to_native() as u64;
1128
// reserved
1129
reader.consume(4);
1130
unmap(dev, lba, nblocks).await?;
1131
}
1132
Ok(())
1133
}
1134
}
1135
1136
#[derive(
1137
Copy,
1138
Clone,
1139
Debug,
1140
Default,
1141
FromBytes,
1142
Immutable,
1143
IntoBytes,
1144
KnownLayout,
1145
PartialEq,
1146
Eq,
1147
Unaligned,
1148
)]
1149
#[repr(C, packed)]
1150
pub struct WriteSame16 {
1151
opcode: u8,
1152
wrprotect_anchor_unmap: u8,
1153
lba_bytes: [u8; 8],
1154
nblocks_bytes: [u8; 4],
1155
group_number_field: u8,
1156
control: u8,
1157
}
1158
1159
impl WriteSame16 {
1160
fn lba(&self) -> u64 {
1161
u64::from_be_bytes(self.lba_bytes)
1162
}
1163
1164
fn nblocks(&self) -> u32 {
1165
u32::from_be_bytes(self.nblocks_bytes)
1166
}
1167
1168
fn unmap(&self) -> bool {
1169
self.wrprotect_anchor_unmap & 0x8 != 0
1170
}
1171
1172
fn anchor(&self) -> bool {
1173
self.wrprotect_anchor_unmap & 0x10 != 0
1174
}
1175
1176
async fn emulate(
1177
&self,
1178
reader: &mut Reader,
1179
dev: &AsyncLogicalUnit,
1180
) -> Result<(), ExecuteError> {
1181
let lba = self.lba();
1182
let nblocks = self.nblocks() as u64;
1183
let _trace = cros_tracing::trace_event!(VirtioScsi, "WRITE_SAME(16)", lba, nblocks);
1184
if nblocks == 0 {
1185
// crosvm does not allow the number of blocks to be zero.
1186
return Err(ExecuteError::InvalidField);
1187
}
1188
if self.anchor() {
1189
// crosvm currently does not support anchor operations.
1190
return Err(ExecuteError::InvalidField);
1191
}
1192
if self.unmap() {
1193
unmap(dev, lba, nblocks).await
1194
} else {
1195
write_same(dev, lba, nblocks, reader).await
1196
}
1197
}
1198
}
1199
1200
#[derive(
1201
Copy,
1202
Clone,
1203
Debug,
1204
Default,
1205
FromBytes,
1206
Immutable,
1207
IntoBytes,
1208
KnownLayout,
1209
PartialEq,
1210
Eq,
1211
Unaligned,
1212
)]
1213
#[repr(C, packed)]
1214
pub struct ReportLuns {
1215
opcode: u8,
1216
_reserved: u8,
1217
select_report: u8,
1218
_reserved2: [u8; 3],
1219
alloc_len_bytes: [u8; 4],
1220
_reserved3: u8,
1221
control: u8,
1222
}
1223
1224
impl ReportLuns {
1225
fn alloc_len(&self) -> usize {
1226
u32::from_be_bytes(self.alloc_len_bytes) as usize
1227
}
1228
1229
fn emulate(&self, writer: &mut Writer) -> Result<(), ExecuteError> {
1230
let _trace = cros_tracing::trace_event!(VirtioScsi, "REPORT_LUNS");
1231
// We need at least 16 bytes.
1232
if self.alloc_len() < 16 {
1233
return Err(ExecuteError::InvalidField);
1234
}
1235
// Each LUN takes 8 bytes and we only support LUN0.
1236
let lun_list_len = 8u32;
1237
writer
1238
.write_all(&lun_list_len.to_be_bytes())
1239
.map_err(ExecuteError::Write)?;
1240
let reserved = [0; 4];
1241
writer.write_all(&reserved).map_err(ExecuteError::Write)?;
1242
let lun0 = 0u64;
1243
writer
1244
.write_all(&lun0.to_be_bytes())
1245
.map_err(ExecuteError::Write)
1246
}
1247
}
1248
1249
#[derive(
1250
Copy,
1251
Clone,
1252
Debug,
1253
Default,
1254
FromBytes,
1255
Immutable,
1256
IntoBytes,
1257
KnownLayout,
1258
PartialEq,
1259
Eq,
1260
Unaligned,
1261
)]
1262
#[repr(C, packed)]
1263
pub struct ReportSupportedTMFs {
1264
opcode: u8,
1265
service_action_field: u8,
1266
_reserved1: [u8; 4],
1267
alloc_len_bytes: [u8; 4],
1268
_reserved2: u8,
1269
control: u8,
1270
}
1271
1272
impl ReportSupportedTMFs {
1273
fn alloc_len(&self) -> u32 {
1274
u32::from_be_bytes(self.alloc_len_bytes)
1275
}
1276
1277
fn emulate(&self, writer: &mut Writer) -> Result<(), ExecuteError> {
1278
let _trace = cros_tracing::trace_event!(VirtioScsi, "REPORT_SUPPORTED_TMFs");
1279
// The allocation length should be at least four.
1280
if self.alloc_len() < 4 {
1281
return Err(ExecuteError::InvalidField);
1282
}
1283
// We support LOGICAL UNIT RESET and TARGET RESET.
1284
const LOGICAL_UNIT_RESET: u8 = 1 << 3;
1285
const TARGET_RESET: u8 = 1 << 1;
1286
writer
1287
.write_obj(LOGICAL_UNIT_RESET | TARGET_RESET)
1288
.map_err(ExecuteError::Write)?;
1289
// Push reserved bytes.
1290
let reserved = [0u8; 3];
1291
writer.write_all(&reserved).map_err(ExecuteError::Write)?;
1292
Ok(())
1293
}
1294
}
1295
1296
#[cfg(test)]
1297
mod tests {
1298
use super::*;
1299
1300
#[test]
1301
fn parse_test_unit_ready() {
1302
let cdb = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00];
1303
let test_unit_ready = parse_cdb::<TestUnitReady>(&cdb).unwrap();
1304
assert_eq!(test_unit_ready.opcode, TEST_UNIT_READY);
1305
assert_eq!(test_unit_ready.reserved, [0; 4]);
1306
assert_eq!(test_unit_ready.control, 0);
1307
}
1308
1309
#[test]
1310
fn parse_read6() {
1311
let cdb = [0x08, 0xab, 0xcd, 0xef, 0x00, 0x00];
1312
let read6 = parse_cdb::<Read6>(&cdb).unwrap();
1313
assert_eq!(read6.xfer_len(), 256);
1314
assert_eq!(read6.lba(), 0x0bcdef);
1315
}
1316
1317
#[test]
1318
fn parse_inquiry() {
1319
let cdb = [0x12, 0x01, 0x00, 0x00, 0x40, 0x00];
1320
let inquiry = parse_cdb::<Inquiry>(&cdb).unwrap();
1321
assert!(inquiry.vital_product_data_enabled());
1322
assert_eq!(inquiry.alloc_len(), 0x0040);
1323
assert_eq!(inquiry.page_code(), 0x00);
1324
}
1325
1326
#[test]
1327
fn parse_mode_sense_6() {
1328
let cdb = [0x1a, 0x00, 0xa8, 0x00, 0x04, 0x00];
1329
let mode_sense_6 = parse_cdb::<ModeSense6>(&cdb).unwrap();
1330
assert_eq!(mode_sense_6.alloc_len(), 0x04);
1331
assert_eq!(mode_sense_6.page_code(), 0x28);
1332
assert_eq!(mode_sense_6.page_control().unwrap(), PageControl::Default);
1333
}
1334
1335
#[test]
1336
fn parse_read_capacity_10() {
1337
let cdb = [0x25, 0x00, 0xab, 0xcd, 0xef, 0x01, 0x00, 0x00, 0x9, 0x0];
1338
let _read_capacity_10 = parse_cdb::<ReadCapacity10>(&cdb).unwrap();
1339
}
1340
1341
#[test]
1342
fn parse_read10() {
1343
let cdb = [0x28, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00];
1344
let read10 = parse_cdb::<Read10>(&cdb).unwrap();
1345
assert_eq!(read10.xfer_len(), 0x0008);
1346
assert_eq!(read10.lba(), 0x003c0000);
1347
}
1348
1349
#[test]
1350
fn parse_write10() {
1351
let cdb = [0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00];
1352
let write10 = parse_cdb::<Write10>(&cdb).unwrap();
1353
assert_eq!(write10.xfer_len(), 0x0008);
1354
assert_eq!(write10.lba(), 0x00000000);
1355
}
1356
1357
#[test]
1358
fn parse_synchronize_cache_10() {
1359
let cdb = [0x35, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00];
1360
let synchronize_cache_10 = parse_cdb::<SynchronizeCache10>(&cdb).unwrap();
1361
assert_eq!(synchronize_cache_10.opcode, SYNCHRONIZE_CACHE_10);
1362
assert_eq!(synchronize_cache_10.immed_byte, 0);
1363
assert_eq!(synchronize_cache_10.lba_bytes, [0x00, 0x00, 0x00, 0x00]);
1364
assert_eq!(synchronize_cache_10.group_number, 0x00);
1365
assert_eq!(synchronize_cache_10.block_num_bytes, [0x00, 0x00]);
1366
assert_eq!(synchronize_cache_10.control, 0x00);
1367
}
1368
1369
#[test]
1370
fn parse_report_luns() {
1371
let cdb = [
1372
0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0xcd, 0xef, 0x12, 0x00, 0x00,
1373
];
1374
let report_luns = parse_cdb::<ReportLuns>(&cdb).unwrap();
1375
assert_eq!(report_luns.alloc_len(), 0xabcdef12);
1376
}
1377
1378
#[test]
1379
fn parse_report_supported_tmfs() {
1380
let cdb = [
1381
0xa3, 0x0d, 0x00, 0x00, 0x00, 0x00, 0xab, 0xcd, 0xef, 0x12, 0x00, 0x00,
1382
];
1383
let report_supported_tmfs = parse_cdb::<ReportSupportedTMFs>(&cdb).unwrap();
1384
assert_eq!(report_supported_tmfs.alloc_len(), 0xabcdef12);
1385
}
1386
}
1387
1388