Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/gpu/nova-core/gsp/cmdq.rs
38186 views
1
// SPDX-License-Identifier: GPL-2.0
2
3
use core::{
4
cmp,
5
mem,
6
sync::atomic::{
7
fence,
8
Ordering, //
9
}, //
10
};
11
12
use kernel::{
13
device,
14
dma::{
15
CoherentAllocation,
16
DmaAddress, //
17
},
18
dma_write,
19
io::poll::read_poll_timeout,
20
prelude::*,
21
sync::aref::ARef,
22
time::Delta,
23
transmute::{
24
AsBytes,
25
FromBytes, //
26
},
27
};
28
29
use crate::{
30
driver::Bar0,
31
gsp::{
32
fw::{
33
GspMsgElement,
34
MsgFunction,
35
MsgqRxHeader,
36
MsgqTxHeader, //
37
},
38
PteArray,
39
GSP_PAGE_SHIFT,
40
GSP_PAGE_SIZE, //
41
},
42
num,
43
regs,
44
sbuffer::SBufferIter, //
45
};
46
47
/// Trait implemented by types representing a command to send to the GSP.
48
///
49
/// The main purpose of this trait is to provide [`Cmdq::send_command`] with the information it
50
/// needs to send a given command.
51
///
52
/// [`CommandToGsp::init`] in particular is responsible for initializing the command directly
53
/// into the space reserved for it in the command queue buffer.
54
///
55
/// Some commands may be followed by a variable-length payload. For these, the
56
/// [`CommandToGsp::variable_payload_len`] and [`CommandToGsp::init_variable_payload`] need to be
57
/// defined as well.
58
pub(crate) trait CommandToGsp {
59
/// Function identifying this command to the GSP.
60
const FUNCTION: MsgFunction;
61
62
/// Type generated by [`CommandToGsp::init`], to be written into the command queue buffer.
63
type Command: FromBytes + AsBytes;
64
65
/// Error type returned by [`CommandToGsp::init`].
66
type InitError;
67
68
/// In-place command initializer responsible for filling the command in the command queue
69
/// buffer.
70
fn init(&self) -> impl Init<Self::Command, Self::InitError>;
71
72
/// Size of the variable-length payload following the command structure generated by
73
/// [`CommandToGsp::init`].
74
///
75
/// Most commands don't have a variable-length payload, so this is zero by default.
76
fn variable_payload_len(&self) -> usize {
77
0
78
}
79
80
/// Method initializing the variable-length payload.
81
///
82
/// The command buffer is circular, which means that we may need to jump back to its beginning
83
/// while in the middle of a command. For this reason, the variable-length payload is
84
/// initialized using a [`SBufferIter`].
85
///
86
/// This method will receive a buffer of the length returned by
87
/// [`CommandToGsp::variable_payload_len`], and must write every single byte of it. Leaving
88
/// unwritten space will lead to an error.
89
///
90
/// Most commands don't have a variable-length payload, so this does nothing by default.
91
fn init_variable_payload(
92
&self,
93
_dst: &mut SBufferIter<core::array::IntoIter<&mut [u8], 2>>,
94
) -> Result {
95
Ok(())
96
}
97
}
98
99
/// Trait representing messages received from the GSP.
100
///
101
/// This trait tells [`Cmdq::receive_msg`] how it can receive a given type of message.
102
pub(crate) trait MessageFromGsp: Sized {
103
/// Function identifying this message from the GSP.
104
const FUNCTION: MsgFunction;
105
106
/// Error type returned by [`MessageFromGsp::read`].
107
type InitError;
108
109
/// Type containing the raw message to be read from the message queue.
110
type Message: FromBytes;
111
112
/// Method reading the message from the message queue and returning it.
113
///
114
/// From a `Self::Message` and a [`SBufferIter`], constructs an instance of `Self` and returns
115
/// it.
116
fn read(
117
msg: &Self::Message,
118
sbuffer: &mut SBufferIter<core::array::IntoIter<&[u8], 2>>,
119
) -> Result<Self, Self::InitError>;
120
}
121
122
/// Number of GSP pages making the [`Msgq`].
123
pub(crate) const MSGQ_NUM_PAGES: u32 = 0x3f;
124
125
/// Circular buffer of a [`Msgq`].
126
///
127
/// This area of memory is to be shared between the driver and the GSP to exchange commands or
128
/// messages.
129
#[repr(C, align(0x1000))]
130
#[derive(Debug)]
131
struct MsgqData {
132
data: [[u8; GSP_PAGE_SIZE]; num::u32_as_usize(MSGQ_NUM_PAGES)],
133
}
134
135
// Annoyingly we are forced to use a literal to specify the alignment of
136
// `MsgqData`, so check that it corresponds to the actual GSP page size here.
137
static_assert!(align_of::<MsgqData>() == GSP_PAGE_SIZE);
138
139
/// Unidirectional message queue.
140
///
141
/// Contains the data for a message queue, that either the driver or GSP writes to.
142
///
143
/// Note that while the write pointer of `tx` corresponds to the `msgq` of the same instance, the
144
/// read pointer of `rx` actually refers to the `Msgq` owned by the other side.
145
/// This design ensures that only the driver or GSP ever writes to a given instance of this struct.
146
#[repr(C)]
147
// There is no struct defined for this in the open-gpu-kernel-source headers.
148
// Instead it is defined by code in `GspMsgQueuesInit()`.
149
struct Msgq {
150
/// Header for sending messages, including the write pointer.
151
tx: MsgqTxHeader,
152
/// Header for receiving messages, including the read pointer.
153
rx: MsgqRxHeader,
154
/// The message queue proper.
155
msgq: MsgqData,
156
}
157
158
/// Structure shared between the driver and the GSP and containing the command and message queues.
159
#[repr(C)]
160
struct GspMem {
161
/// Self-mapping page table entries.
162
ptes: PteArray<{ GSP_PAGE_SIZE / size_of::<u64>() }>,
163
/// CPU queue: the driver writes commands here, and the GSP reads them. It also contains the
164
/// write and read pointers that the CPU updates.
165
///
166
/// This member is read-only for the GSP.
167
cpuq: Msgq,
168
/// GSP queue: the GSP writes messages here, and the driver reads them. It also contains the
169
/// write and read pointers that the GSP updates.
170
///
171
/// This member is read-only for the driver.
172
gspq: Msgq,
173
}
174
175
// SAFETY: These structs don't meet the no-padding requirements of AsBytes but
176
// that is not a problem because they are not used outside the kernel.
177
unsafe impl AsBytes for GspMem {}
178
179
// SAFETY: These structs don't meet the no-padding requirements of FromBytes but
180
// that is not a problem because they are not used outside the kernel.
181
unsafe impl FromBytes for GspMem {}
182
183
/// Wrapper around [`GspMem`] to share it with the GPU using a [`CoherentAllocation`].
184
///
185
/// This provides the low-level functionality to communicate with the GSP, including allocation of
186
/// queue space to write messages to and management of read/write pointers.
187
///
188
/// This is shared with the GSP, with clear ownership rules regarding the command queues:
189
///
190
/// * The driver owns (i.e. can write to) the part of the CPU message queue between the CPU write
191
/// pointer and the GSP read pointer. This region is returned by [`Self::driver_write_area`].
192
/// * The driver owns (i.e. can read from) the part of the GSP message queue between the CPU read
193
/// pointer and the GSP write pointer. This region is returned by [`Self::driver_read_area`].
194
struct DmaGspMem(CoherentAllocation<GspMem>);
195
196
impl DmaGspMem {
197
/// Allocate a new instance and map it for `dev`.
198
fn new(dev: &device::Device<device::Bound>) -> Result<Self> {
199
const MSGQ_SIZE: u32 = num::usize_into_u32::<{ size_of::<Msgq>() }>();
200
const RX_HDR_OFF: u32 = num::usize_into_u32::<{ mem::offset_of!(Msgq, rx) }>();
201
202
let gsp_mem =
203
CoherentAllocation::<GspMem>::alloc_coherent(dev, 1, GFP_KERNEL | __GFP_ZERO)?;
204
dma_write!(gsp_mem[0].ptes = PteArray::new(gsp_mem.dma_handle())?)?;
205
dma_write!(gsp_mem[0].cpuq.tx = MsgqTxHeader::new(MSGQ_SIZE, RX_HDR_OFF, MSGQ_NUM_PAGES))?;
206
dma_write!(gsp_mem[0].cpuq.rx = MsgqRxHeader::new())?;
207
208
Ok(Self(gsp_mem))
209
}
210
211
/// Returns the region of the CPU message queue that the driver is currently allowed to write
212
/// to.
213
///
214
/// As the message queue is a circular buffer, the region may be discontiguous in memory. In
215
/// that case the second slice will have a non-zero length.
216
fn driver_write_area(&mut self) -> (&mut [[u8; GSP_PAGE_SIZE]], &mut [[u8; GSP_PAGE_SIZE]]) {
217
let tx = self.cpu_write_ptr() as usize;
218
let rx = self.gsp_read_ptr() as usize;
219
220
// SAFETY:
221
// - The `CoherentAllocation` contains exactly one object.
222
// - We will only access the driver-owned part of the shared memory.
223
// - Per the safety statement of the function, no concurrent access will be performed.
224
let gsp_mem = &mut unsafe { self.0.as_slice_mut(0, 1) }.unwrap()[0];
225
// PANIC: per the invariant of `cpu_write_ptr`, `tx` is `<= MSGQ_NUM_PAGES`.
226
let (before_tx, after_tx) = gsp_mem.cpuq.msgq.data.split_at_mut(tx);
227
228
if rx <= tx {
229
// The area from `tx` up to the end of the ring, and from the beginning of the ring up
230
// to `rx`, minus one unit, belongs to the driver.
231
if rx == 0 {
232
let last = after_tx.len() - 1;
233
(&mut after_tx[..last], &mut before_tx[0..0])
234
} else {
235
(after_tx, &mut before_tx[..rx])
236
}
237
} else {
238
// The area from `tx` to `rx`, minus one unit, belongs to the driver.
239
//
240
// PANIC: per the invariants of `cpu_write_ptr` and `gsp_read_ptr`, `rx` and `tx` are
241
// `<= MSGQ_NUM_PAGES`, and the test above ensured that `rx > tx`.
242
(after_tx.split_at_mut(rx - tx).0, &mut before_tx[0..0])
243
}
244
}
245
246
/// Returns the region of the GSP message queue that the driver is currently allowed to read
247
/// from.
248
///
249
/// As the message queue is a circular buffer, the region may be discontiguous in memory. In
250
/// that case the second slice will have a non-zero length.
251
fn driver_read_area(&self) -> (&[[u8; GSP_PAGE_SIZE]], &[[u8; GSP_PAGE_SIZE]]) {
252
let tx = self.gsp_write_ptr() as usize;
253
let rx = self.cpu_read_ptr() as usize;
254
255
// SAFETY:
256
// - The `CoherentAllocation` contains exactly one object.
257
// - We will only access the driver-owned part of the shared memory.
258
// - Per the safety statement of the function, no concurrent access will be performed.
259
let gsp_mem = &unsafe { self.0.as_slice(0, 1) }.unwrap()[0];
260
// PANIC: per the invariant of `cpu_read_ptr`, `xx` is `<= MSGQ_NUM_PAGES`.
261
let (before_rx, after_rx) = gsp_mem.gspq.msgq.data.split_at(rx);
262
263
match tx.cmp(&rx) {
264
cmp::Ordering::Equal => (&after_rx[0..0], &after_rx[0..0]),
265
cmp::Ordering::Greater => (&after_rx[..tx], &before_rx[0..0]),
266
cmp::Ordering::Less => (after_rx, &before_rx[..tx]),
267
}
268
}
269
270
/// Allocates a region on the command queue that is large enough to send a command of `size`
271
/// bytes.
272
///
273
/// This returns a [`GspCommand`] ready to be written to by the caller.
274
///
275
/// # Errors
276
///
277
/// - `EAGAIN` if the driver area is too small to hold the requested command.
278
/// - `EIO` if the command header is not properly aligned.
279
fn allocate_command(&mut self, size: usize) -> Result<GspCommand<'_>> {
280
// Get the current writable area as an array of bytes.
281
let (slice_1, slice_2) = {
282
let (slice_1, slice_2) = self.driver_write_area();
283
284
#[allow(clippy::incompatible_msrv)]
285
(slice_1.as_flattened_mut(), slice_2.as_flattened_mut())
286
};
287
288
// If the GSP is still processing previous messages the shared region
289
// may be full in which case we will have to retry once the GSP has
290
// processed the existing commands.
291
if size_of::<GspMsgElement>() + size > slice_1.len() + slice_2.len() {
292
return Err(EAGAIN);
293
}
294
295
// Extract area for the `GspMsgElement`.
296
let (header, slice_1) = GspMsgElement::from_bytes_mut_prefix(slice_1).ok_or(EIO)?;
297
298
// Create the contents area.
299
let (slice_1, slice_2) = if slice_1.len() > size {
300
// Contents fits entirely in `slice_1`.
301
(&mut slice_1[..size], &mut slice_2[0..0])
302
} else {
303
// Need all of `slice_1` and some of `slice_2`.
304
let slice_2_len = size - slice_1.len();
305
(slice_1, &mut slice_2[..slice_2_len])
306
};
307
308
Ok(GspCommand {
309
header,
310
contents: (slice_1, slice_2),
311
})
312
}
313
314
// Returns the index of the memory page the GSP will write the next message to.
315
//
316
// # Invariants
317
//
318
// - The returned value is between `0` and `MSGQ_NUM_PAGES`.
319
fn gsp_write_ptr(&self) -> u32 {
320
let gsp_mem = self.0.start_ptr();
321
322
// SAFETY:
323
// - The 'CoherentAllocation' contains at least one object.
324
// - By the invariants of `CoherentAllocation` the pointer is valid.
325
(unsafe { (*gsp_mem).gspq.tx.write_ptr() } % MSGQ_NUM_PAGES)
326
}
327
328
// Returns the index of the memory page the GSP will read the next command from.
329
//
330
// # Invariants
331
//
332
// - The returned value is between `0` and `MSGQ_NUM_PAGES`.
333
fn gsp_read_ptr(&self) -> u32 {
334
let gsp_mem = self.0.start_ptr();
335
336
// SAFETY:
337
// - The 'CoherentAllocation' contains at least one object.
338
// - By the invariants of `CoherentAllocation` the pointer is valid.
339
(unsafe { (*gsp_mem).gspq.rx.read_ptr() } % MSGQ_NUM_PAGES)
340
}
341
342
// Returns the index of the memory page the CPU can read the next message from.
343
//
344
// # Invariants
345
//
346
// - The returned value is between `0` and `MSGQ_NUM_PAGES`.
347
fn cpu_read_ptr(&self) -> u32 {
348
let gsp_mem = self.0.start_ptr();
349
350
// SAFETY:
351
// - The ['CoherentAllocation'] contains at least one object.
352
// - By the invariants of CoherentAllocation the pointer is valid.
353
(unsafe { (*gsp_mem).cpuq.rx.read_ptr() } % MSGQ_NUM_PAGES)
354
}
355
356
// Informs the GSP that it can send `elem_count` new pages into the message queue.
357
fn advance_cpu_read_ptr(&mut self, elem_count: u32) {
358
let rptr = self.cpu_read_ptr().wrapping_add(elem_count) % MSGQ_NUM_PAGES;
359
360
// Ensure read pointer is properly ordered.
361
fence(Ordering::SeqCst);
362
363
let gsp_mem = self.0.start_ptr_mut();
364
365
// SAFETY:
366
// - The 'CoherentAllocation' contains at least one object.
367
// - By the invariants of `CoherentAllocation` the pointer is valid.
368
unsafe { (*gsp_mem).cpuq.rx.set_read_ptr(rptr) };
369
}
370
371
// Returns the index of the memory page the CPU can write the next command to.
372
//
373
// # Invariants
374
//
375
// - The returned value is between `0` and `MSGQ_NUM_PAGES`.
376
fn cpu_write_ptr(&self) -> u32 {
377
let gsp_mem = self.0.start_ptr();
378
379
// SAFETY:
380
// - The 'CoherentAllocation' contains at least one object.
381
// - By the invariants of `CoherentAllocation` the pointer is valid.
382
(unsafe { (*gsp_mem).cpuq.tx.write_ptr() } % MSGQ_NUM_PAGES)
383
}
384
385
// Informs the GSP that it can process `elem_count` new pages from the command queue.
386
fn advance_cpu_write_ptr(&mut self, elem_count: u32) {
387
let wptr = self.cpu_write_ptr().wrapping_add(elem_count) & MSGQ_NUM_PAGES;
388
let gsp_mem = self.0.start_ptr_mut();
389
390
// SAFETY:
391
// - The 'CoherentAllocation' contains at least one object.
392
// - By the invariants of `CoherentAllocation` the pointer is valid.
393
unsafe { (*gsp_mem).cpuq.tx.set_write_ptr(wptr) };
394
395
// Ensure all command data is visible before triggering the GSP read.
396
fence(Ordering::SeqCst);
397
}
398
}
399
400
/// A command ready to be sent on the command queue.
401
///
402
/// This is the type returned by [`DmaGspMem::allocate_command`].
403
struct GspCommand<'a> {
404
// Writable reference to the header of the command.
405
header: &'a mut GspMsgElement,
406
// Writable slices to the contents of the command. The second slice is zero unless the command
407
// loops over the command queue.
408
contents: (&'a mut [u8], &'a mut [u8]),
409
}
410
411
/// A message ready to be processed from the message queue.
412
///
413
/// This is the type returned by [`Cmdq::wait_for_msg`].
414
struct GspMessage<'a> {
415
// Reference to the header of the message.
416
header: &'a GspMsgElement,
417
// Slices to the contents of the message. The second slice is zero unless the message loops
418
// over the message queue.
419
contents: (&'a [u8], &'a [u8]),
420
}
421
422
/// GSP command queue.
423
///
424
/// Provides the ability to send commands and receive messages from the GSP using a shared memory
425
/// area.
426
pub(crate) struct Cmdq {
427
/// Device this command queue belongs to.
428
dev: ARef<device::Device>,
429
/// Current command sequence number.
430
seq: u32,
431
/// Memory area shared with the GSP for communicating commands and messages.
432
gsp_mem: DmaGspMem,
433
}
434
435
impl Cmdq {
436
/// Offset of the data after the PTEs.
437
const POST_PTE_OFFSET: usize = core::mem::offset_of!(GspMem, cpuq);
438
439
/// Offset of command queue ring buffer.
440
pub(crate) const CMDQ_OFFSET: usize = core::mem::offset_of!(GspMem, cpuq)
441
+ core::mem::offset_of!(Msgq, msgq)
442
- Self::POST_PTE_OFFSET;
443
444
/// Offset of message queue ring buffer.
445
pub(crate) const STATQ_OFFSET: usize = core::mem::offset_of!(GspMem, gspq)
446
+ core::mem::offset_of!(Msgq, msgq)
447
- Self::POST_PTE_OFFSET;
448
449
/// Number of page table entries for the GSP shared region.
450
pub(crate) const NUM_PTES: usize = size_of::<GspMem>() >> GSP_PAGE_SHIFT;
451
452
/// Creates a new command queue for `dev`.
453
pub(crate) fn new(dev: &device::Device<device::Bound>) -> Result<Cmdq> {
454
let gsp_mem = DmaGspMem::new(dev)?;
455
456
Ok(Cmdq {
457
dev: dev.into(),
458
seq: 0,
459
gsp_mem,
460
})
461
}
462
463
/// Computes the checksum for the message pointed to by `it`.
464
///
465
/// A message is made of several parts, so `it` is an iterator over byte slices representing
466
/// these parts.
467
fn calculate_checksum<T: Iterator<Item = u8>>(it: T) -> u32 {
468
let sum64 = it
469
.enumerate()
470
.map(|(idx, byte)| (((idx % 8) * 8) as u32, byte))
471
.fold(0, |acc, (rol, byte)| acc ^ u64::from(byte).rotate_left(rol));
472
473
((sum64 >> 32) as u32) ^ (sum64 as u32)
474
}
475
476
/// Notifies the GSP that we have updated the command queue pointers.
477
fn notify_gsp(bar: &Bar0) {
478
regs::NV_PGSP_QUEUE_HEAD::default()
479
.set_address(0)
480
.write(bar);
481
}
482
483
/// Sends `command` to the GSP.
484
///
485
/// # Errors
486
///
487
/// - `EAGAIN` if there was not enough space in the command queue to send the command.
488
/// - `EIO` if the variable payload requested by the command has not been entirely
489
/// written to by its [`CommandToGsp::init_variable_payload`] method.
490
///
491
/// Error codes returned by the command initializers are propagated as-is.
492
pub(crate) fn send_command<M>(&mut self, bar: &Bar0, command: M) -> Result
493
where
494
M: CommandToGsp,
495
// This allows all error types, including `Infallible`, to be used for `M::InitError`.
496
Error: From<M::InitError>,
497
{
498
let command_size = size_of::<M::Command>() + command.variable_payload_len();
499
let dst = self.gsp_mem.allocate_command(command_size)?;
500
501
// Extract area for the command itself.
502
let (cmd, payload_1) = M::Command::from_bytes_mut_prefix(dst.contents.0).ok_or(EIO)?;
503
504
// Fill the header and command in-place.
505
let msg_element = GspMsgElement::init(self.seq, command_size, M::FUNCTION);
506
// SAFETY: `msg_header` and `cmd` are valid references, and not touched if the initializer
507
// fails.
508
unsafe {
509
msg_element.__init(core::ptr::from_mut(dst.header))?;
510
command.init().__init(core::ptr::from_mut(cmd))?;
511
}
512
513
// Fill the variable-length payload.
514
if command_size > size_of::<M::Command>() {
515
let mut sbuffer =
516
SBufferIter::new_writer([&mut payload_1[..], &mut dst.contents.1[..]]);
517
command.init_variable_payload(&mut sbuffer)?;
518
519
if !sbuffer.is_empty() {
520
return Err(EIO);
521
}
522
}
523
524
// Compute checksum now that the whole message is ready.
525
dst.header
526
.set_checksum(Cmdq::calculate_checksum(SBufferIter::new_reader([
527
dst.header.as_bytes(),
528
dst.contents.0,
529
dst.contents.1,
530
])));
531
532
dev_dbg!(
533
&self.dev,
534
"GSP RPC: send: seq# {}, function={}, length=0x{:x}\n",
535
self.seq,
536
M::FUNCTION,
537
dst.header.length(),
538
);
539
540
// All set - update the write pointer and inform the GSP of the new command.
541
let elem_count = dst.header.element_count();
542
self.seq += 1;
543
self.gsp_mem.advance_cpu_write_ptr(elem_count);
544
Cmdq::notify_gsp(bar);
545
546
Ok(())
547
}
548
549
/// Wait for a message to become available on the message queue.
550
///
551
/// This works purely at the transport layer and does not interpret or validate the message
552
/// beyond the advertised length in its [`GspMsgElement`].
553
///
554
/// This method returns:
555
///
556
/// - A reference to the [`GspMsgElement`] of the message,
557
/// - Two byte slices with the contents of the message. The second slice is empty unless the
558
/// message loops across the message queue.
559
///
560
/// # Errors
561
///
562
/// - `ETIMEDOUT` if `timeout` has elapsed before any message becomes available.
563
/// - `EIO` if there was some inconsistency (e.g. message shorter than advertised) on the
564
/// message queue.
565
///
566
/// Error codes returned by the message constructor are propagated as-is.
567
fn wait_for_msg(&self, timeout: Delta) -> Result<GspMessage<'_>> {
568
// Wait for a message to arrive from the GSP.
569
let (slice_1, slice_2) = read_poll_timeout(
570
|| Ok(self.gsp_mem.driver_read_area()),
571
|driver_area| !driver_area.0.is_empty(),
572
Delta::from_millis(1),
573
timeout,
574
)
575
.map(|(slice_1, slice_2)| {
576
#[allow(clippy::incompatible_msrv)]
577
(slice_1.as_flattened(), slice_2.as_flattened())
578
})?;
579
580
// Extract the `GspMsgElement`.
581
let (header, slice_1) = GspMsgElement::from_bytes_prefix(slice_1).ok_or(EIO)?;
582
583
dev_dbg!(
584
self.dev,
585
"GSP RPC: receive: seq# {}, function={:?}, length=0x{:x}\n",
586
header.sequence(),
587
header.function(),
588
header.length(),
589
);
590
591
// Check that the driver read area is large enough for the message.
592
if slice_1.len() + slice_2.len() < header.length() {
593
return Err(EIO);
594
}
595
596
// Cut the message slices down to the actual length of the message.
597
let (slice_1, slice_2) = if slice_1.len() > header.length() {
598
// PANIC: we checked above that `slice_1` is at least as long as `msg_header.length()`.
599
(slice_1.split_at(header.length()).0, &slice_2[0..0])
600
} else {
601
(
602
slice_1,
603
// PANIC: we checked above that `slice_1.len() + slice_2.len()` is at least as
604
// large as `msg_header.length()`.
605
slice_2.split_at(header.length() - slice_1.len()).0,
606
)
607
};
608
609
// Validate checksum.
610
if Cmdq::calculate_checksum(SBufferIter::new_reader([
611
header.as_bytes(),
612
slice_1,
613
slice_2,
614
])) != 0
615
{
616
dev_err!(
617
self.dev,
618
"GSP RPC: receive: Call {} - bad checksum",
619
header.sequence()
620
);
621
return Err(EIO);
622
}
623
624
Ok(GspMessage {
625
header,
626
contents: (slice_1, slice_2),
627
})
628
}
629
630
/// Receive a message from the GSP.
631
///
632
/// `init` is a closure tasked with processing the message. It receives a reference to the
633
/// message in the message queue, and a [`SBufferIter`] pointing to its variable-length
634
/// payload, if any.
635
///
636
/// The expected message is specified using the `M` generic parameter. If the pending message
637
/// is different, `EAGAIN` is returned and the unexpected message is dropped.
638
///
639
/// This design is by no means final, but it is simple and will let us go through GSP
640
/// initialization.
641
///
642
/// # Errors
643
///
644
/// - `ETIMEDOUT` if `timeout` has elapsed before any message becomes available.
645
/// - `EIO` if there was some inconsistency (e.g. message shorter than advertised) on the
646
/// message queue.
647
/// - `EINVAL` if the function of the message was unrecognized.
648
pub(crate) fn receive_msg<M: MessageFromGsp>(&mut self, timeout: Delta) -> Result<M>
649
where
650
// This allows all error types, including `Infallible`, to be used for `M::InitError`.
651
Error: From<M::InitError>,
652
{
653
let message = self.wait_for_msg(timeout)?;
654
let function = message.header.function().map_err(|_| EINVAL)?;
655
656
// Extract the message. Store the result as we want to advance the read pointer even in
657
// case of failure.
658
let result = if function == M::FUNCTION {
659
let (cmd, contents_1) = M::Message::from_bytes_prefix(message.contents.0).ok_or(EIO)?;
660
let mut sbuffer = SBufferIter::new_reader([contents_1, message.contents.1]);
661
662
M::read(cmd, &mut sbuffer).map_err(|e| e.into())
663
} else {
664
Err(ERANGE)
665
};
666
667
// Advance the read pointer past this message.
668
self.gsp_mem.advance_cpu_read_ptr(u32::try_from(
669
message.header.length().div_ceil(GSP_PAGE_SIZE),
670
)?);
671
672
result
673
}
674
675
/// Returns the DMA handle of the command queue's shared memory region.
676
pub(crate) fn dma_handle(&self) -> DmaAddress {
677
self.gsp_mem.0.dma_handle()
678
}
679
}
680
681