Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/accel/qaic/sahara.c
51282 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
3
/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */
4
5
#include <linux/devcoredump.h>
6
#include <linux/firmware.h>
7
#include <linux/limits.h>
8
#include <linux/mhi.h>
9
#include <linux/minmax.h>
10
#include <linux/mod_devicetable.h>
11
#include <linux/overflow.h>
12
#include <linux/types.h>
13
#include <linux/vmalloc.h>
14
#include <linux/workqueue.h>
15
16
#include "sahara.h"
17
18
#define SAHARA_HELLO_CMD 0x1 /* Min protocol version 1.0 */
19
#define SAHARA_HELLO_RESP_CMD 0x2 /* Min protocol version 1.0 */
20
#define SAHARA_READ_DATA_CMD 0x3 /* Min protocol version 1.0 */
21
#define SAHARA_END_OF_IMAGE_CMD 0x4 /* Min protocol version 1.0 */
22
#define SAHARA_DONE_CMD 0x5 /* Min protocol version 1.0 */
23
#define SAHARA_DONE_RESP_CMD 0x6 /* Min protocol version 1.0 */
24
#define SAHARA_RESET_CMD 0x7 /* Min protocol version 1.0 */
25
#define SAHARA_RESET_RESP_CMD 0x8 /* Min protocol version 1.0 */
26
#define SAHARA_MEM_DEBUG_CMD 0x9 /* Min protocol version 2.0 */
27
#define SAHARA_MEM_READ_CMD 0xa /* Min protocol version 2.0 */
28
#define SAHARA_CMD_READY_CMD 0xb /* Min protocol version 2.1 */
29
#define SAHARA_SWITCH_MODE_CMD 0xc /* Min protocol version 2.1 */
30
#define SAHARA_EXECUTE_CMD 0xd /* Min protocol version 2.1 */
31
#define SAHARA_EXECUTE_RESP_CMD 0xe /* Min protocol version 2.1 */
32
#define SAHARA_EXECUTE_DATA_CMD 0xf /* Min protocol version 2.1 */
33
#define SAHARA_MEM_DEBUG64_CMD 0x10 /* Min protocol version 2.5 */
34
#define SAHARA_MEM_READ64_CMD 0x11 /* Min protocol version 2.5 */
35
#define SAHARA_READ_DATA64_CMD 0x12 /* Min protocol version 2.8 */
36
#define SAHARA_RESET_STATE_CMD 0x13 /* Min protocol version 2.9 */
37
#define SAHARA_WRITE_DATA_CMD 0x14 /* Min protocol version 3.0 */
38
39
#define SAHARA_PACKET_MAX_SIZE 0xffffU /* MHI_MAX_MTU */
40
#define SAHARA_TRANSFER_MAX_SIZE 0x80000
41
#define SAHARA_READ_MAX_SIZE 0xfff0U /* Avoid unaligned requests */
42
#define SAHARA_NUM_TX_BUF DIV_ROUND_UP(SAHARA_TRANSFER_MAX_SIZE,\
43
SAHARA_PACKET_MAX_SIZE)
44
#define SAHARA_IMAGE_ID_NONE U32_MAX
45
46
#define SAHARA_VERSION 2
47
#define SAHARA_SUCCESS 0
48
#define SAHARA_TABLE_ENTRY_STR_LEN 20
49
50
#define SAHARA_MODE_IMAGE_TX_PENDING 0x0
51
#define SAHARA_MODE_IMAGE_TX_COMPLETE 0x1
52
#define SAHARA_MODE_MEMORY_DEBUG 0x2
53
#define SAHARA_MODE_COMMAND 0x3
54
55
#define SAHARA_HELLO_LENGTH 0x30
56
#define SAHARA_READ_DATA_LENGTH 0x14
57
#define SAHARA_END_OF_IMAGE_LENGTH 0x10
58
#define SAHARA_DONE_LENGTH 0x8
59
#define SAHARA_RESET_LENGTH 0x8
60
#define SAHARA_MEM_DEBUG64_LENGTH 0x18
61
#define SAHARA_MEM_READ64_LENGTH 0x18
62
63
struct sahara_packet {
64
__le32 cmd;
65
__le32 length;
66
67
union {
68
struct {
69
__le32 version;
70
__le32 version_compat;
71
__le32 max_length;
72
__le32 mode;
73
} hello;
74
struct {
75
__le32 version;
76
__le32 version_compat;
77
__le32 status;
78
__le32 mode;
79
} hello_resp;
80
struct {
81
__le32 image;
82
__le32 offset;
83
__le32 length;
84
} read_data;
85
struct {
86
__le32 image;
87
__le32 status;
88
} end_of_image;
89
struct {
90
__le64 table_address;
91
__le64 table_length;
92
} memory_debug64;
93
struct {
94
__le64 memory_address;
95
__le64 memory_length;
96
} memory_read64;
97
};
98
};
99
100
struct sahara_debug_table_entry64 {
101
__le64 type;
102
__le64 address;
103
__le64 length;
104
char description[SAHARA_TABLE_ENTRY_STR_LEN];
105
char filename[SAHARA_TABLE_ENTRY_STR_LEN];
106
};
107
108
struct sahara_dump_table_entry {
109
u64 type;
110
u64 address;
111
u64 length;
112
char description[SAHARA_TABLE_ENTRY_STR_LEN];
113
char filename[SAHARA_TABLE_ENTRY_STR_LEN];
114
};
115
116
#define SAHARA_DUMP_V1_MAGIC 0x1234567890abcdef
117
#define SAHARA_DUMP_V1_VER 1
118
struct sahara_memory_dump_meta_v1 {
119
u64 magic;
120
u64 version;
121
u64 dump_size;
122
u64 table_size;
123
};
124
125
/*
126
* Layout of crashdump provided to user via devcoredump
127
* +------------------------------------------+
128
* | Crashdump Meta structure |
129
* | type: struct sahara_memory_dump_meta_v1 |
130
* +------------------------------------------+
131
* | Crashdump Table |
132
* | type: array of struct |
133
* | sahara_dump_table_entry |
134
* | |
135
* | |
136
* +------------------------------------------+
137
* | Crashdump |
138
* | |
139
* | |
140
* | |
141
* | |
142
* | |
143
* +------------------------------------------+
144
*
145
* First is the metadata header. Userspace can use the magic number to verify
146
* the content type, and then check the version for the rest of the format.
147
* New versions should keep the magic number location/value, and version
148
* location, but increment the version value.
149
*
150
* For v1, the metadata lists the size of the entire dump (header + table +
151
* dump) and the size of the table. Then the dump image table, which describes
152
* the contents of the dump. Finally all the images are listed in order, with
153
* no deadspace in between. Userspace can use the sizes listed in the image
154
* table to reconstruct the individual images.
155
*/
156
157
struct sahara_context {
158
struct sahara_packet *tx[SAHARA_NUM_TX_BUF];
159
struct sahara_packet *rx;
160
struct work_struct fw_work;
161
struct work_struct dump_work;
162
struct work_struct read_data_work;
163
struct mhi_device *mhi_dev;
164
const char * const *image_table;
165
u32 table_size;
166
u32 active_image_id;
167
const struct firmware *firmware;
168
u64 dump_table_address;
169
u64 dump_table_length;
170
size_t rx_size;
171
size_t rx_size_requested;
172
void *mem_dump;
173
size_t mem_dump_sz;
174
struct sahara_dump_table_entry *dump_image;
175
u64 dump_image_offset;
176
void *mem_dump_freespace;
177
u64 dump_images_left;
178
u32 read_data_offset;
179
u32 read_data_length;
180
bool is_mem_dump_mode;
181
bool non_streaming;
182
};
183
184
static const char * const aic100_image_table[] = {
185
[1] = "qcom/aic100/fw1.bin",
186
[2] = "qcom/aic100/fw2.bin",
187
[4] = "qcom/aic100/fw4.bin",
188
[5] = "qcom/aic100/fw5.bin",
189
[6] = "qcom/aic100/fw6.bin",
190
[8] = "qcom/aic100/fw8.bin",
191
[9] = "qcom/aic100/fw9.bin",
192
[10] = "qcom/aic100/fw10.bin",
193
};
194
195
static const char * const aic200_image_table[] = {
196
[5] = "qcom/aic200/uefi.elf",
197
[12] = "qcom/aic200/aic200-nsp.bin",
198
[23] = "qcom/aic200/aop.mbn",
199
[32] = "qcom/aic200/tz.mbn",
200
[33] = "qcom/aic200/hypvm.mbn",
201
[38] = "qcom/aic200/xbl_config.elf",
202
[39] = "qcom/aic200/aic200_abl.elf",
203
[40] = "qcom/aic200/apdp.mbn",
204
[41] = "qcom/aic200/devcfg.mbn",
205
[42] = "qcom/aic200/sec.elf",
206
[43] = "qcom/aic200/aic200-hlos.elf",
207
[49] = "qcom/aic200/shrm.elf",
208
[50] = "qcom/aic200/cpucp.elf",
209
[51] = "qcom/aic200/aop_devcfg.mbn",
210
[54] = "qcom/aic200/qupv3fw.elf",
211
[57] = "qcom/aic200/cpucp_dtbs.elf",
212
[62] = "qcom/aic200/uefi_dtbs.elf",
213
[63] = "qcom/aic200/xbl_ac_config.mbn",
214
[64] = "qcom/aic200/tz_ac_config.mbn",
215
[65] = "qcom/aic200/hyp_ac_config.mbn",
216
[66] = "qcom/aic200/pdp.elf",
217
[67] = "qcom/aic200/pdp_cdb.elf",
218
[68] = "qcom/aic200/sdi.mbn",
219
[69] = "qcom/aic200/dcd.mbn",
220
[73] = "qcom/aic200/gearvm.mbn",
221
[74] = "qcom/aic200/sti.bin",
222
[76] = "qcom/aic200/tz_qti_config.mbn",
223
[78] = "qcom/aic200/pvs.bin",
224
};
225
226
static bool is_streaming(struct sahara_context *context)
227
{
228
return !context->non_streaming;
229
}
230
231
static int sahara_find_image(struct sahara_context *context, u32 image_id)
232
{
233
int ret;
234
235
if (image_id == context->active_image_id)
236
return 0;
237
238
if (context->active_image_id != SAHARA_IMAGE_ID_NONE) {
239
dev_err(&context->mhi_dev->dev, "image id %d is not valid as %d is active\n",
240
image_id, context->active_image_id);
241
return -EINVAL;
242
}
243
244
if (image_id >= context->table_size || !context->image_table[image_id]) {
245
dev_err(&context->mhi_dev->dev, "request for unknown image: %d\n", image_id);
246
return -EINVAL;
247
}
248
249
/*
250
* This image might be optional. The device may continue without it.
251
* Only the device knows. Suppress error messages that could suggest an
252
* a problem when we were actually able to continue.
253
*/
254
ret = firmware_request_nowarn(&context->firmware,
255
context->image_table[image_id],
256
&context->mhi_dev->dev);
257
if (ret) {
258
dev_dbg(&context->mhi_dev->dev, "request for image id %d / file %s failed %d\n",
259
image_id, context->image_table[image_id], ret);
260
return ret;
261
}
262
263
context->active_image_id = image_id;
264
265
return 0;
266
}
267
268
static void sahara_release_image(struct sahara_context *context)
269
{
270
if (context->active_image_id != SAHARA_IMAGE_ID_NONE)
271
release_firmware(context->firmware);
272
context->active_image_id = SAHARA_IMAGE_ID_NONE;
273
}
274
275
static void sahara_send_reset(struct sahara_context *context)
276
{
277
int ret;
278
279
context->is_mem_dump_mode = false;
280
context->read_data_offset = 0;
281
context->read_data_length = 0;
282
283
context->tx[0]->cmd = cpu_to_le32(SAHARA_RESET_CMD);
284
context->tx[0]->length = cpu_to_le32(SAHARA_RESET_LENGTH);
285
286
ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
287
SAHARA_RESET_LENGTH, MHI_EOT);
288
if (ret)
289
dev_err(&context->mhi_dev->dev, "Unable to send reset response %d\n", ret);
290
}
291
292
static void sahara_hello(struct sahara_context *context)
293
{
294
int ret;
295
296
dev_dbg(&context->mhi_dev->dev,
297
"HELLO cmd received. length:%d version:%d version_compat:%d max_length:%d mode:%d\n",
298
le32_to_cpu(context->rx->length),
299
le32_to_cpu(context->rx->hello.version),
300
le32_to_cpu(context->rx->hello.version_compat),
301
le32_to_cpu(context->rx->hello.max_length),
302
le32_to_cpu(context->rx->hello.mode));
303
304
if (le32_to_cpu(context->rx->length) != SAHARA_HELLO_LENGTH) {
305
dev_err(&context->mhi_dev->dev, "Malformed hello packet - length %d\n",
306
le32_to_cpu(context->rx->length));
307
return;
308
}
309
if (le32_to_cpu(context->rx->hello.version) != SAHARA_VERSION) {
310
dev_err(&context->mhi_dev->dev, "Unsupported hello packet - version %d\n",
311
le32_to_cpu(context->rx->hello.version));
312
return;
313
}
314
315
if (le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_PENDING &&
316
le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_COMPLETE &&
317
le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_MEMORY_DEBUG) {
318
dev_err(&context->mhi_dev->dev, "Unsupported hello packet - mode %d\n",
319
le32_to_cpu(context->rx->hello.mode));
320
return;
321
}
322
323
context->tx[0]->cmd = cpu_to_le32(SAHARA_HELLO_RESP_CMD);
324
context->tx[0]->length = cpu_to_le32(SAHARA_HELLO_LENGTH);
325
context->tx[0]->hello_resp.version = cpu_to_le32(SAHARA_VERSION);
326
context->tx[0]->hello_resp.version_compat = cpu_to_le32(SAHARA_VERSION);
327
context->tx[0]->hello_resp.status = cpu_to_le32(SAHARA_SUCCESS);
328
context->tx[0]->hello_resp.mode = context->rx->hello_resp.mode;
329
330
ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
331
SAHARA_HELLO_LENGTH, MHI_EOT);
332
if (ret)
333
dev_err(&context->mhi_dev->dev, "Unable to send hello response %d\n", ret);
334
}
335
336
static int read_data_helper(struct sahara_context *context, int buf_index)
337
{
338
enum mhi_flags mhi_flag;
339
u32 pkt_data_len;
340
int ret;
341
342
pkt_data_len = min(context->read_data_length, SAHARA_PACKET_MAX_SIZE);
343
344
memcpy(context->tx[buf_index],
345
&context->firmware->data[context->read_data_offset],
346
pkt_data_len);
347
348
context->read_data_offset += pkt_data_len;
349
context->read_data_length -= pkt_data_len;
350
351
if (is_streaming(context) || !context->read_data_length)
352
mhi_flag = MHI_EOT;
353
else
354
mhi_flag = MHI_CHAIN;
355
356
ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE,
357
context->tx[buf_index], pkt_data_len, mhi_flag);
358
if (ret) {
359
dev_err(&context->mhi_dev->dev, "Unable to send read_data response %d\n", ret);
360
return ret;
361
}
362
363
return 0;
364
}
365
366
static void sahara_read_data(struct sahara_context *context)
367
{
368
u32 image_id, data_offset, data_len;
369
int ret;
370
int i;
371
372
dev_dbg(&context->mhi_dev->dev,
373
"READ_DATA cmd received. length:%d image:%d offset:%d data_length:%d\n",
374
le32_to_cpu(context->rx->length),
375
le32_to_cpu(context->rx->read_data.image),
376
le32_to_cpu(context->rx->read_data.offset),
377
le32_to_cpu(context->rx->read_data.length));
378
379
if (le32_to_cpu(context->rx->length) != SAHARA_READ_DATA_LENGTH) {
380
dev_err(&context->mhi_dev->dev, "Malformed read_data packet - length %d\n",
381
le32_to_cpu(context->rx->length));
382
return;
383
}
384
385
image_id = le32_to_cpu(context->rx->read_data.image);
386
data_offset = le32_to_cpu(context->rx->read_data.offset);
387
data_len = le32_to_cpu(context->rx->read_data.length);
388
389
ret = sahara_find_image(context, image_id);
390
if (ret) {
391
sahara_send_reset(context);
392
return;
393
}
394
395
/*
396
* Image is released when the device is done with it via
397
* SAHARA_END_OF_IMAGE_CMD. sahara_send_reset() will either cause the
398
* device to retry the operation with a modification, or decide to be
399
* done with the image and trigger SAHARA_END_OF_IMAGE_CMD.
400
* release_image() is called from SAHARA_END_OF_IMAGE_CMD. processing
401
* and is not needed here on error.
402
*/
403
404
if (context->non_streaming && data_len > SAHARA_TRANSFER_MAX_SIZE) {
405
dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data len %d exceeds max xfer size %d\n",
406
data_len, SAHARA_TRANSFER_MAX_SIZE);
407
sahara_send_reset(context);
408
return;
409
}
410
411
if (data_offset >= context->firmware->size) {
412
dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data offset %d exceeds file size %zu\n",
413
data_offset, context->firmware->size);
414
sahara_send_reset(context);
415
return;
416
}
417
418
if (size_add(data_offset, data_len) > context->firmware->size) {
419
dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data offset %d and length %d exceeds file size %zu\n",
420
data_offset, data_len, context->firmware->size);
421
sahara_send_reset(context);
422
return;
423
}
424
425
context->read_data_offset = data_offset;
426
context->read_data_length = data_len;
427
428
if (is_streaming(context)) {
429
schedule_work(&context->read_data_work);
430
return;
431
}
432
433
for (i = 0; i < SAHARA_NUM_TX_BUF && context->read_data_length; ++i) {
434
ret = read_data_helper(context, i);
435
if (ret)
436
break;
437
}
438
}
439
440
static void sahara_end_of_image(struct sahara_context *context)
441
{
442
int ret;
443
444
dev_dbg(&context->mhi_dev->dev,
445
"END_OF_IMAGE cmd received. length:%d image:%d status:%d\n",
446
le32_to_cpu(context->rx->length),
447
le32_to_cpu(context->rx->end_of_image.image),
448
le32_to_cpu(context->rx->end_of_image.status));
449
450
if (le32_to_cpu(context->rx->length) != SAHARA_END_OF_IMAGE_LENGTH) {
451
dev_err(&context->mhi_dev->dev, "Malformed end_of_image packet - length %d\n",
452
le32_to_cpu(context->rx->length));
453
return;
454
}
455
456
if (context->active_image_id != SAHARA_IMAGE_ID_NONE &&
457
le32_to_cpu(context->rx->end_of_image.image) != context->active_image_id) {
458
dev_err(&context->mhi_dev->dev, "Malformed end_of_image packet - image %d is not the active image\n",
459
le32_to_cpu(context->rx->end_of_image.image));
460
return;
461
}
462
463
sahara_release_image(context);
464
465
if (le32_to_cpu(context->rx->end_of_image.status))
466
return;
467
468
context->tx[0]->cmd = cpu_to_le32(SAHARA_DONE_CMD);
469
context->tx[0]->length = cpu_to_le32(SAHARA_DONE_LENGTH);
470
471
ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
472
SAHARA_DONE_LENGTH, MHI_EOT);
473
if (ret)
474
dev_dbg(&context->mhi_dev->dev, "Unable to send done response %d\n", ret);
475
}
476
477
static void sahara_memory_debug64(struct sahara_context *context)
478
{
479
int ret;
480
481
dev_dbg(&context->mhi_dev->dev,
482
"MEMORY DEBUG64 cmd received. length:%d table_address:%#llx table_length:%#llx\n",
483
le32_to_cpu(context->rx->length),
484
le64_to_cpu(context->rx->memory_debug64.table_address),
485
le64_to_cpu(context->rx->memory_debug64.table_length));
486
487
if (le32_to_cpu(context->rx->length) != SAHARA_MEM_DEBUG64_LENGTH) {
488
dev_err(&context->mhi_dev->dev, "Malformed memory debug64 packet - length %d\n",
489
le32_to_cpu(context->rx->length));
490
return;
491
}
492
493
context->dump_table_address = le64_to_cpu(context->rx->memory_debug64.table_address);
494
context->dump_table_length = le64_to_cpu(context->rx->memory_debug64.table_length);
495
496
if (context->dump_table_length % sizeof(struct sahara_debug_table_entry64) != 0 ||
497
!context->dump_table_length) {
498
dev_err(&context->mhi_dev->dev, "Malformed memory debug64 packet - table length %lld\n",
499
context->dump_table_length);
500
return;
501
}
502
503
/*
504
* From this point, the protocol flips. We make memory_read requests to
505
* the device, and the device responds with the raw data. If the device
506
* has an error, it will send an End of Image command. First we need to
507
* request the memory dump table so that we know where all the pieces
508
* of the dump are that we can consume.
509
*/
510
511
context->is_mem_dump_mode = true;
512
513
/*
514
* Assume that the table is smaller than our MTU so that we can read it
515
* in one shot. The spec does not put an upper limit on the table, but
516
* no known device will exceed this.
517
*/
518
if (context->dump_table_length > SAHARA_PACKET_MAX_SIZE) {
519
dev_err(&context->mhi_dev->dev, "Memory dump table length %lld exceeds supported size. Discarding dump\n",
520
context->dump_table_length);
521
sahara_send_reset(context);
522
return;
523
}
524
525
context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD);
526
context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH);
527
context->tx[0]->memory_read64.memory_address = cpu_to_le64(context->dump_table_address);
528
context->tx[0]->memory_read64.memory_length = cpu_to_le64(context->dump_table_length);
529
530
context->rx_size_requested = context->dump_table_length;
531
532
ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
533
SAHARA_MEM_READ64_LENGTH, MHI_EOT);
534
if (ret)
535
dev_err(&context->mhi_dev->dev, "Unable to send read for dump table %d\n", ret);
536
}
537
538
static void sahara_processing(struct work_struct *work)
539
{
540
struct sahara_context *context = container_of(work, struct sahara_context, fw_work);
541
int ret;
542
543
switch (le32_to_cpu(context->rx->cmd)) {
544
case SAHARA_HELLO_CMD:
545
sahara_hello(context);
546
break;
547
case SAHARA_READ_DATA_CMD:
548
sahara_read_data(context);
549
break;
550
case SAHARA_END_OF_IMAGE_CMD:
551
sahara_end_of_image(context);
552
break;
553
case SAHARA_DONE_RESP_CMD:
554
/* Intentional do nothing as we don't need to exit an app */
555
break;
556
case SAHARA_RESET_RESP_CMD:
557
/* Intentional do nothing as we don't need to exit an app */
558
break;
559
case SAHARA_MEM_DEBUG64_CMD:
560
sahara_memory_debug64(context);
561
break;
562
default:
563
dev_err(&context->mhi_dev->dev, "Unknown command %d\n",
564
le32_to_cpu(context->rx->cmd));
565
break;
566
}
567
568
ret = mhi_queue_buf(context->mhi_dev, DMA_FROM_DEVICE, context->rx,
569
SAHARA_PACKET_MAX_SIZE, MHI_EOT);
570
if (ret)
571
dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret);
572
}
573
574
static void sahara_parse_dump_table(struct sahara_context *context)
575
{
576
struct sahara_dump_table_entry *image_out_table;
577
struct sahara_debug_table_entry64 *dev_table;
578
struct sahara_memory_dump_meta_v1 *dump_meta;
579
u64 table_nents;
580
u64 dump_length;
581
u64 mul_bytes;
582
int ret;
583
u64 i;
584
585
table_nents = context->dump_table_length / sizeof(*dev_table);
586
context->dump_images_left = table_nents;
587
dump_length = 0;
588
589
dev_table = (struct sahara_debug_table_entry64 *)(context->rx);
590
for (i = 0; i < table_nents; ++i) {
591
/* Do not trust the device, ensure the strings are terminated */
592
dev_table[i].description[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0;
593
dev_table[i].filename[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0;
594
595
if (check_add_overflow(dump_length,
596
le64_to_cpu(dev_table[i].length),
597
&dump_length)) {
598
/* Discard the dump */
599
sahara_send_reset(context);
600
return;
601
}
602
603
dev_dbg(&context->mhi_dev->dev,
604
"Memory dump table entry %lld type: %lld address: %#llx length: %#llx description: \"%s\" filename \"%s\"\n",
605
i,
606
le64_to_cpu(dev_table[i].type),
607
le64_to_cpu(dev_table[i].address),
608
le64_to_cpu(dev_table[i].length),
609
dev_table[i].description,
610
dev_table[i].filename);
611
}
612
613
if (check_add_overflow(dump_length, (u64)sizeof(*dump_meta), &dump_length)) {
614
/* Discard the dump */
615
sahara_send_reset(context);
616
return;
617
}
618
if (check_mul_overflow((u64)sizeof(*image_out_table), table_nents, &mul_bytes)) {
619
/* Discard the dump */
620
sahara_send_reset(context);
621
return;
622
}
623
if (check_add_overflow(dump_length, mul_bytes, &dump_length)) {
624
/* Discard the dump */
625
sahara_send_reset(context);
626
return;
627
}
628
629
context->mem_dump_sz = dump_length;
630
context->mem_dump = vzalloc(dump_length);
631
if (!context->mem_dump) {
632
/* Discard the dump */
633
sahara_send_reset(context);
634
return;
635
}
636
637
/* Populate the dump metadata and table for userspace */
638
dump_meta = context->mem_dump;
639
dump_meta->magic = SAHARA_DUMP_V1_MAGIC;
640
dump_meta->version = SAHARA_DUMP_V1_VER;
641
dump_meta->dump_size = dump_length;
642
dump_meta->table_size = context->dump_table_length;
643
644
image_out_table = context->mem_dump + sizeof(*dump_meta);
645
for (i = 0; i < table_nents; ++i) {
646
image_out_table[i].type = le64_to_cpu(dev_table[i].type);
647
image_out_table[i].address = le64_to_cpu(dev_table[i].address);
648
image_out_table[i].length = le64_to_cpu(dev_table[i].length);
649
strscpy(image_out_table[i].description, dev_table[i].description,
650
SAHARA_TABLE_ENTRY_STR_LEN);
651
strscpy(image_out_table[i].filename,
652
dev_table[i].filename,
653
SAHARA_TABLE_ENTRY_STR_LEN);
654
}
655
656
context->mem_dump_freespace = &image_out_table[i];
657
658
/* Done parsing the table, switch to image dump mode */
659
context->dump_table_length = 0;
660
661
/* Request the first chunk of the first image */
662
context->dump_image = &image_out_table[0];
663
dump_length = min_t(u64, context->dump_image->length, SAHARA_READ_MAX_SIZE);
664
/* Avoid requesting EOI sized data so that we can identify errors */
665
if (dump_length == SAHARA_END_OF_IMAGE_LENGTH)
666
dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2;
667
668
context->dump_image_offset = dump_length;
669
670
context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD);
671
context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH);
672
context->tx[0]->memory_read64.memory_address = cpu_to_le64(context->dump_image->address);
673
context->tx[0]->memory_read64.memory_length = cpu_to_le64(dump_length);
674
675
context->rx_size_requested = dump_length;
676
677
ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
678
SAHARA_MEM_READ64_LENGTH, MHI_EOT);
679
if (ret)
680
dev_err(&context->mhi_dev->dev, "Unable to send read for dump content %d\n", ret);
681
}
682
683
static void sahara_parse_dump_image(struct sahara_context *context)
684
{
685
u64 dump_length;
686
int ret;
687
688
memcpy(context->mem_dump_freespace, context->rx, context->rx_size);
689
context->mem_dump_freespace += context->rx_size;
690
691
if (context->dump_image_offset >= context->dump_image->length) {
692
/* Need to move to next image */
693
context->dump_image++;
694
context->dump_images_left--;
695
context->dump_image_offset = 0;
696
697
if (!context->dump_images_left) {
698
/* Dump done */
699
dev_coredumpv(context->mhi_dev->mhi_cntrl->cntrl_dev,
700
context->mem_dump,
701
context->mem_dump_sz,
702
GFP_KERNEL);
703
context->mem_dump = NULL;
704
sahara_send_reset(context);
705
return;
706
}
707
}
708
709
/* Get next image chunk */
710
dump_length = context->dump_image->length - context->dump_image_offset;
711
dump_length = min_t(u64, dump_length, SAHARA_READ_MAX_SIZE);
712
/* Avoid requesting EOI sized data so that we can identify errors */
713
if (dump_length == SAHARA_END_OF_IMAGE_LENGTH)
714
dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2;
715
716
context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD);
717
context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH);
718
context->tx[0]->memory_read64.memory_address =
719
cpu_to_le64(context->dump_image->address + context->dump_image_offset);
720
context->tx[0]->memory_read64.memory_length = cpu_to_le64(dump_length);
721
722
context->dump_image_offset += dump_length;
723
context->rx_size_requested = dump_length;
724
725
ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
726
SAHARA_MEM_READ64_LENGTH, MHI_EOT);
727
if (ret)
728
dev_err(&context->mhi_dev->dev,
729
"Unable to send read for dump content %d\n", ret);
730
}
731
732
static void sahara_dump_processing(struct work_struct *work)
733
{
734
struct sahara_context *context = container_of(work, struct sahara_context, dump_work);
735
int ret;
736
737
/*
738
* We should get the expected raw data, but if the device has an error
739
* it is supposed to send EOI with an error code.
740
*/
741
if (context->rx_size != context->rx_size_requested &&
742
context->rx_size != SAHARA_END_OF_IMAGE_LENGTH) {
743
dev_err(&context->mhi_dev->dev,
744
"Unexpected response to read_data. Expected size: %#zx got: %#zx\n",
745
context->rx_size_requested,
746
context->rx_size);
747
goto error;
748
}
749
750
if (context->rx_size == SAHARA_END_OF_IMAGE_LENGTH &&
751
le32_to_cpu(context->rx->cmd) == SAHARA_END_OF_IMAGE_CMD) {
752
dev_err(&context->mhi_dev->dev,
753
"Unexpected EOI response to read_data. Status: %d\n",
754
le32_to_cpu(context->rx->end_of_image.status));
755
goto error;
756
}
757
758
if (context->rx_size == SAHARA_END_OF_IMAGE_LENGTH &&
759
le32_to_cpu(context->rx->cmd) != SAHARA_END_OF_IMAGE_CMD) {
760
dev_err(&context->mhi_dev->dev,
761
"Invalid EOI response to read_data. CMD: %d\n",
762
le32_to_cpu(context->rx->cmd));
763
goto error;
764
}
765
766
/*
767
* Need to know if we received the dump table, or part of a dump image.
768
* Since we get raw data, we cannot tell from the data itself. Instead,
769
* we use the stored dump_table_length, which we zero after we read and
770
* process the entire table.
771
*/
772
if (context->dump_table_length)
773
sahara_parse_dump_table(context);
774
else
775
sahara_parse_dump_image(context);
776
777
ret = mhi_queue_buf(context->mhi_dev, DMA_FROM_DEVICE, context->rx,
778
SAHARA_PACKET_MAX_SIZE, MHI_EOT);
779
if (ret)
780
dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret);
781
782
return;
783
784
error:
785
vfree(context->mem_dump);
786
context->mem_dump = NULL;
787
sahara_send_reset(context);
788
}
789
790
static void sahara_read_data_processing(struct work_struct *work)
791
{
792
struct sahara_context *context = container_of(work, struct sahara_context, read_data_work);
793
794
read_data_helper(context, 0);
795
}
796
797
static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
798
{
799
struct sahara_context *context;
800
int ret;
801
int i;
802
803
context = devm_kzalloc(&mhi_dev->dev, sizeof(*context), GFP_KERNEL);
804
if (!context)
805
return -ENOMEM;
806
807
context->rx = devm_kzalloc(&mhi_dev->dev, SAHARA_PACKET_MAX_SIZE, GFP_KERNEL);
808
if (!context->rx)
809
return -ENOMEM;
810
811
if (!strcmp(mhi_dev->mhi_cntrl->name, "AIC200")) {
812
context->image_table = aic200_image_table;
813
context->table_size = ARRAY_SIZE(aic200_image_table);
814
} else {
815
context->image_table = aic100_image_table;
816
context->table_size = ARRAY_SIZE(aic100_image_table);
817
context->non_streaming = true;
818
}
819
820
/*
821
* There are two firmware implementations for READ_DATA handling.
822
* The older "SBL" implementation defines a Sahara transfer size, and
823
* expects that the response is a single transport transfer. If the
824
* FW wants to transfer a file that is larger than the transfer size,
825
* the FW will issue multiple READ_DATA commands. For this
826
* implementation, we need to allocate enough buffers to contain the
827
* entire Sahara transfer size.
828
*
829
* The newer "XBL" implementation does not define a maximum transfer
830
* size and instead expects the data to be streamed over using the
831
* transport level MTU. The FW will issue a single READ_DATA command
832
* of whatever size, and consume multiple transport level transfers
833
* until the expected amount of data is consumed. For this
834
* implementation we only need a single buffer of the transport MTU
835
* but we'll need to be able to use it multiple times for a single
836
* READ_DATA request.
837
*
838
* AIC100 is the SBL implementation and defines SAHARA_TRANSFER_MAX_SIZE
839
* and we need 9x SAHARA_PACKET_MAX_SIZE to cover that. We can use
840
* MHI_CHAIN to link multiple buffers into a single transfer but the
841
* remote side will not consume the buffers until it sees an EOT, thus
842
* we need to allocate enough buffers to put in the tx fifo to cover an
843
* entire READ_DATA request of the max size.
844
*
845
* AIC200 is the XBL implementation, and so a single buffer will work.
846
*/
847
for (i = 0; i < SAHARA_NUM_TX_BUF; ++i) {
848
context->tx[i] = devm_kzalloc(&mhi_dev->dev,
849
SAHARA_PACKET_MAX_SIZE,
850
GFP_KERNEL);
851
if (!context->tx[i])
852
return -ENOMEM;
853
if (is_streaming(context))
854
break;
855
}
856
857
context->mhi_dev = mhi_dev;
858
INIT_WORK(&context->fw_work, sahara_processing);
859
INIT_WORK(&context->dump_work, sahara_dump_processing);
860
INIT_WORK(&context->read_data_work, sahara_read_data_processing);
861
862
context->active_image_id = SAHARA_IMAGE_ID_NONE;
863
dev_set_drvdata(&mhi_dev->dev, context);
864
865
ret = mhi_prepare_for_transfer(mhi_dev);
866
if (ret)
867
return ret;
868
869
ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, context->rx, SAHARA_PACKET_MAX_SIZE, MHI_EOT);
870
if (ret) {
871
mhi_unprepare_from_transfer(mhi_dev);
872
return ret;
873
}
874
875
return 0;
876
}
877
878
static void sahara_mhi_remove(struct mhi_device *mhi_dev)
879
{
880
struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
881
882
cancel_work_sync(&context->fw_work);
883
cancel_work_sync(&context->dump_work);
884
vfree(context->mem_dump);
885
sahara_release_image(context);
886
mhi_unprepare_from_transfer(mhi_dev);
887
}
888
889
static void sahara_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
890
{
891
struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
892
893
if (!mhi_result->transaction_status && context->read_data_length && is_streaming(context))
894
schedule_work(&context->read_data_work);
895
}
896
897
static void sahara_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
898
{
899
struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
900
901
if (!mhi_result->transaction_status) {
902
context->rx_size = mhi_result->bytes_xferd;
903
if (context->is_mem_dump_mode)
904
schedule_work(&context->dump_work);
905
else
906
schedule_work(&context->fw_work);
907
}
908
909
}
910
911
static const struct mhi_device_id sahara_mhi_match_table[] = {
912
{ .chan = "QAIC_SAHARA", },
913
{},
914
};
915
916
static struct mhi_driver sahara_mhi_driver = {
917
.id_table = sahara_mhi_match_table,
918
.remove = sahara_mhi_remove,
919
.probe = sahara_mhi_probe,
920
.ul_xfer_cb = sahara_mhi_ul_xfer_cb,
921
.dl_xfer_cb = sahara_mhi_dl_xfer_cb,
922
.driver = {
923
.name = "sahara",
924
},
925
};
926
927
int sahara_register(void)
928
{
929
return mhi_driver_register(&sahara_mhi_driver);
930
}
931
932
void sahara_unregister(void)
933
{
934
mhi_driver_unregister(&sahara_mhi_driver);
935
}
936
937