Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/bus/mhi/host/boot.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4
*
5
*/
6
7
#include <linux/delay.h>
8
#include <linux/device.h>
9
#include <linux/dma-direction.h>
10
#include <linux/dma-mapping.h>
11
#include <linux/firmware.h>
12
#include <linux/interrupt.h>
13
#include <linux/list.h>
14
#include <linux/mhi.h>
15
#include <linux/module.h>
16
#include <linux/random.h>
17
#include <linux/slab.h>
18
#include <linux/wait.h>
19
#include "internal.h"
20
21
/* Setup RDDM vector table for RDDM transfer and program RXVEC */
22
int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
23
struct image_info *img_info)
24
{
25
struct mhi_buf *mhi_buf = img_info->mhi_buf;
26
struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
27
void __iomem *base = mhi_cntrl->bhie;
28
struct device *dev = &mhi_cntrl->mhi_dev->dev;
29
u32 sequence_id;
30
unsigned int i;
31
int ret;
32
33
for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
34
bhi_vec->dma_addr = cpu_to_le64(mhi_buf->dma_addr);
35
bhi_vec->size = cpu_to_le64(mhi_buf->len);
36
}
37
38
dev_dbg(dev, "BHIe programming for RDDM\n");
39
40
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
41
upper_32_bits(mhi_buf->dma_addr));
42
43
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
44
lower_32_bits(mhi_buf->dma_addr));
45
46
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
47
sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK);
48
49
ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
50
BHIE_RXVECDB_SEQNUM_BMSK, sequence_id);
51
if (ret) {
52
dev_err(dev, "Failed to write sequence ID for BHIE_RXVECDB\n");
53
return ret;
54
}
55
56
dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n",
57
&mhi_buf->dma_addr, mhi_buf->len, sequence_id);
58
59
return 0;
60
}
61
62
/* Collect RDDM buffer during kernel panic */
63
static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
64
{
65
int ret;
66
u32 rx_status;
67
enum mhi_ee_type ee;
68
const u32 delayus = 2000;
69
u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
70
const u32 rddm_timeout_us = 200000;
71
int rddm_retry = rddm_timeout_us / delayus;
72
void __iomem *base = mhi_cntrl->bhie;
73
struct device *dev = &mhi_cntrl->mhi_dev->dev;
74
75
dev_dbg(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n",
76
to_mhi_pm_state_str(mhi_cntrl->pm_state),
77
mhi_state_str(mhi_cntrl->dev_state),
78
TO_MHI_EXEC_STR(mhi_cntrl->ee));
79
80
/*
81
* This should only be executing during a kernel panic, we expect all
82
* other cores to shutdown while we're collecting RDDM buffer. After
83
* returning from this function, we expect the device to reset.
84
*
85
* Normally, we read/write pm_state only after grabbing the
86
* pm_lock, since we're in a panic, skipping it. Also there is no
87
* guarantee that this state change would take effect since
88
* we're setting it w/o grabbing pm_lock
89
*/
90
mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
91
/* update should take the effect immediately */
92
smp_wmb();
93
94
/*
95
* Make sure device is not already in RDDM. In case the device asserts
96
* and a kernel panic follows, device will already be in RDDM.
97
* Do not trigger SYS ERR again and proceed with waiting for
98
* image download completion.
99
*/
100
ee = mhi_get_exec_env(mhi_cntrl);
101
if (ee == MHI_EE_MAX)
102
goto error_exit_rddm;
103
104
if (ee != MHI_EE_RDDM) {
105
dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n");
106
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
107
108
dev_dbg(dev, "Waiting for device to enter RDDM\n");
109
while (rddm_retry--) {
110
ee = mhi_get_exec_env(mhi_cntrl);
111
if (ee == MHI_EE_RDDM)
112
break;
113
114
udelay(delayus);
115
}
116
117
if (rddm_retry <= 0) {
118
/* Hardware reset so force device to enter RDDM */
119
dev_dbg(dev,
120
"Did not enter RDDM, do a host req reset\n");
121
mhi_soc_reset(mhi_cntrl);
122
udelay(delayus);
123
}
124
125
ee = mhi_get_exec_env(mhi_cntrl);
126
}
127
128
dev_dbg(dev,
129
"Waiting for RDDM image download via BHIe, current EE:%s\n",
130
TO_MHI_EXEC_STR(ee));
131
132
while (retry--) {
133
ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
134
BHIE_RXVECSTATUS_STATUS_BMSK, &rx_status);
135
if (ret)
136
return -EIO;
137
138
if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL)
139
return 0;
140
141
udelay(delayus);
142
}
143
144
ee = mhi_get_exec_env(mhi_cntrl);
145
ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
146
147
dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status);
148
149
error_exit_rddm:
150
dev_err(dev, "RDDM transfer failed. Current EE: %s\n",
151
TO_MHI_EXEC_STR(ee));
152
153
return -EIO;
154
}
155
156
/* Download RDDM image from device */
157
int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic)
158
{
159
void __iomem *base = mhi_cntrl->bhie;
160
struct device *dev = &mhi_cntrl->mhi_dev->dev;
161
u32 rx_status;
162
163
if (in_panic)
164
return __mhi_download_rddm_in_panic(mhi_cntrl);
165
166
dev_dbg(dev, "Waiting for RDDM image download via BHIe\n");
167
168
/* Wait for the image download to complete */
169
wait_event_timeout(mhi_cntrl->state_event,
170
mhi_read_reg_field(mhi_cntrl, base,
171
BHIE_RXVECSTATUS_OFFS,
172
BHIE_RXVECSTATUS_STATUS_BMSK,
173
&rx_status) || rx_status,
174
msecs_to_jiffies(mhi_cntrl->timeout_ms));
175
176
return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
177
}
178
EXPORT_SYMBOL_GPL(mhi_download_rddm_image);
179
180
static void mhi_fw_load_error_dump(struct mhi_controller *mhi_cntrl)
181
{
182
struct device *dev = &mhi_cntrl->mhi_dev->dev;
183
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
184
void __iomem *base = mhi_cntrl->bhi;
185
int ret, i;
186
u32 val;
187
struct {
188
char *name;
189
u32 offset;
190
} error_reg[] = {
191
{ "ERROR_CODE", BHI_ERRCODE },
192
{ "ERROR_DBG1", BHI_ERRDBG1 },
193
{ "ERROR_DBG2", BHI_ERRDBG2 },
194
{ "ERROR_DBG3", BHI_ERRDBG3 },
195
{ NULL },
196
};
197
198
read_lock_bh(pm_lock);
199
if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
200
for (i = 0; error_reg[i].name; i++) {
201
ret = mhi_read_reg(mhi_cntrl, base, error_reg[i].offset, &val);
202
if (ret)
203
break;
204
dev_err(dev, "Reg: %s value: 0x%x\n", error_reg[i].name, val);
205
}
206
}
207
read_unlock_bh(pm_lock);
208
}
209
210
static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
211
const struct mhi_buf *mhi_buf)
212
{
213
void __iomem *base = mhi_cntrl->bhie;
214
struct device *dev = &mhi_cntrl->mhi_dev->dev;
215
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
216
u32 tx_status, sequence_id;
217
int ret;
218
219
read_lock_bh(pm_lock);
220
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
221
read_unlock_bh(pm_lock);
222
return -EIO;
223
}
224
225
sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK);
226
dev_dbg(dev, "Starting image download via BHIe. Sequence ID: %u\n",
227
sequence_id);
228
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
229
upper_32_bits(mhi_buf->dma_addr));
230
231
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
232
lower_32_bits(mhi_buf->dma_addr));
233
234
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
235
236
ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
237
BHIE_TXVECDB_SEQNUM_BMSK, sequence_id);
238
read_unlock_bh(pm_lock);
239
240
if (ret)
241
return ret;
242
243
/* Wait for the image download to complete */
244
ret = wait_event_timeout(mhi_cntrl->state_event,
245
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
246
mhi_read_reg_field(mhi_cntrl, base,
247
BHIE_TXVECSTATUS_OFFS,
248
BHIE_TXVECSTATUS_STATUS_BMSK,
249
&tx_status) || tx_status,
250
msecs_to_jiffies(mhi_cntrl->timeout_ms));
251
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
252
tx_status != BHIE_TXVECSTATUS_STATUS_XFER_COMPL)
253
return -EIO;
254
255
return (!ret) ? -ETIMEDOUT : 0;
256
}
257
258
static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
259
const struct mhi_buf *mhi_buf)
260
{
261
struct device *dev = &mhi_cntrl->mhi_dev->dev;
262
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
263
void __iomem *base = mhi_cntrl->bhi;
264
u32 tx_status, session_id;
265
int ret;
266
267
read_lock_bh(pm_lock);
268
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
269
read_unlock_bh(pm_lock);
270
goto invalid_pm_state;
271
}
272
273
session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK);
274
dev_dbg(dev, "Starting image download via BHI. Session ID: %u\n",
275
session_id);
276
mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
277
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, upper_32_bits(mhi_buf->dma_addr));
278
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, lower_32_bits(mhi_buf->dma_addr));
279
mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, mhi_buf->len);
280
mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id);
281
read_unlock_bh(pm_lock);
282
283
/* Wait for the image download to complete */
284
ret = wait_event_timeout(mhi_cntrl->state_event,
285
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
286
mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS,
287
BHI_STATUS_MASK, &tx_status) || tx_status,
288
msecs_to_jiffies(mhi_cntrl->timeout_ms));
289
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
290
goto invalid_pm_state;
291
292
if (tx_status == BHI_STATUS_ERROR) {
293
dev_err(dev, "Image transfer failed\n");
294
mhi_fw_load_error_dump(mhi_cntrl);
295
goto invalid_pm_state;
296
}
297
298
return (!ret) ? -ETIMEDOUT : 0;
299
300
invalid_pm_state:
301
302
return -EIO;
303
}
304
305
static void mhi_free_bhi_buffer(struct mhi_controller *mhi_cntrl,
306
struct image_info *image_info)
307
{
308
struct mhi_buf *mhi_buf = image_info->mhi_buf;
309
310
dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, mhi_buf->buf, mhi_buf->dma_addr);
311
kfree(image_info->mhi_buf);
312
kfree(image_info);
313
}
314
315
void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
316
struct image_info *image_info)
317
{
318
int i;
319
struct mhi_buf *mhi_buf = image_info->mhi_buf;
320
321
for (i = 0; i < image_info->entries; i++, mhi_buf++)
322
dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
323
mhi_buf->buf, mhi_buf->dma_addr);
324
325
kfree(image_info->mhi_buf);
326
kfree(image_info);
327
}
328
329
static int mhi_alloc_bhi_buffer(struct mhi_controller *mhi_cntrl,
330
struct image_info **image_info,
331
size_t alloc_size)
332
{
333
struct image_info *img_info;
334
struct mhi_buf *mhi_buf;
335
336
img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
337
if (!img_info)
338
return -ENOMEM;
339
340
/* Allocate memory for entry */
341
img_info->mhi_buf = kzalloc(sizeof(*img_info->mhi_buf), GFP_KERNEL);
342
if (!img_info->mhi_buf)
343
goto error_alloc_mhi_buf;
344
345
/* Allocate and populate vector table */
346
mhi_buf = img_info->mhi_buf;
347
348
mhi_buf->len = alloc_size;
349
mhi_buf->buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
350
&mhi_buf->dma_addr, GFP_KERNEL);
351
if (!mhi_buf->buf)
352
goto error_alloc_segment;
353
354
img_info->bhi_vec = NULL;
355
img_info->entries = 1;
356
*image_info = img_info;
357
358
return 0;
359
360
error_alloc_segment:
361
kfree(mhi_buf);
362
error_alloc_mhi_buf:
363
kfree(img_info);
364
365
return -ENOMEM;
366
}
367
368
int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
369
struct image_info **image_info,
370
size_t alloc_size)
371
{
372
size_t seg_size = mhi_cntrl->seg_len;
373
int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
374
int i;
375
struct image_info *img_info;
376
struct mhi_buf *mhi_buf;
377
378
img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
379
if (!img_info)
380
return -ENOMEM;
381
382
/* Allocate memory for entries */
383
img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf),
384
GFP_KERNEL);
385
if (!img_info->mhi_buf)
386
goto error_alloc_mhi_buf;
387
388
/* Allocate and populate vector table */
389
mhi_buf = img_info->mhi_buf;
390
for (i = 0; i < segments; i++, mhi_buf++) {
391
size_t vec_size = seg_size;
392
393
/* Vector table is the last entry */
394
if (i == segments - 1)
395
vec_size = sizeof(struct bhi_vec_entry) * i;
396
397
mhi_buf->len = vec_size;
398
mhi_buf->buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
399
vec_size, &mhi_buf->dma_addr,
400
GFP_KERNEL);
401
if (!mhi_buf->buf)
402
goto error_alloc_segment;
403
}
404
405
img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf;
406
img_info->entries = segments;
407
*image_info = img_info;
408
409
return 0;
410
411
error_alloc_segment:
412
for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
413
dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
414
mhi_buf->buf, mhi_buf->dma_addr);
415
kfree(img_info->mhi_buf);
416
417
error_alloc_mhi_buf:
418
kfree(img_info);
419
420
return -ENOMEM;
421
}
422
423
static void mhi_firmware_copy_bhie(struct mhi_controller *mhi_cntrl,
424
const u8 *buf, size_t remainder,
425
struct image_info *img_info)
426
{
427
size_t to_cpy;
428
struct mhi_buf *mhi_buf = img_info->mhi_buf;
429
struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
430
431
while (remainder) {
432
to_cpy = min(remainder, mhi_buf->len);
433
memcpy(mhi_buf->buf, buf, to_cpy);
434
bhi_vec->dma_addr = cpu_to_le64(mhi_buf->dma_addr);
435
bhi_vec->size = cpu_to_le64(to_cpy);
436
437
buf += to_cpy;
438
remainder -= to_cpy;
439
bhi_vec++;
440
mhi_buf++;
441
}
442
}
443
444
static enum mhi_fw_load_type mhi_fw_load_type_get(const struct mhi_controller *mhi_cntrl)
445
{
446
if (mhi_cntrl->fbc_download) {
447
return MHI_FW_LOAD_FBC;
448
} else {
449
if (mhi_cntrl->seg_len)
450
return MHI_FW_LOAD_BHIE;
451
else
452
return MHI_FW_LOAD_BHI;
453
}
454
}
455
456
static int mhi_load_image_bhi(struct mhi_controller *mhi_cntrl, const u8 *fw_data, size_t size)
457
{
458
struct image_info *image;
459
int ret;
460
461
ret = mhi_alloc_bhi_buffer(mhi_cntrl, &image, size);
462
if (ret)
463
return ret;
464
465
/* Load the firmware into BHI vec table */
466
memcpy(image->mhi_buf->buf, fw_data, size);
467
468
ret = mhi_fw_load_bhi(mhi_cntrl, &image->mhi_buf[image->entries - 1]);
469
mhi_free_bhi_buffer(mhi_cntrl, image);
470
471
return ret;
472
}
473
474
static int mhi_load_image_bhie(struct mhi_controller *mhi_cntrl, const u8 *fw_data, size_t size)
475
{
476
struct image_info *image;
477
int ret;
478
479
ret = mhi_alloc_bhie_table(mhi_cntrl, &image, size);
480
if (ret)
481
return ret;
482
483
mhi_firmware_copy_bhie(mhi_cntrl, fw_data, size, image);
484
485
ret = mhi_fw_load_bhie(mhi_cntrl, &image->mhi_buf[image->entries - 1]);
486
mhi_free_bhie_table(mhi_cntrl, image);
487
488
return ret;
489
}
490
491
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
492
{
493
const struct firmware *firmware = NULL;
494
struct device *dev = &mhi_cntrl->mhi_dev->dev;
495
enum mhi_fw_load_type fw_load_type;
496
enum mhi_pm_state new_state;
497
const char *fw_name;
498
const u8 *fw_data;
499
size_t size, fw_sz;
500
int ret;
501
502
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
503
dev_err(dev, "Device MHI is not in valid state\n");
504
return;
505
}
506
507
/* save hardware info from BHI */
508
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_SERIALNU,
509
&mhi_cntrl->serial_number);
510
if (ret)
511
dev_err(dev, "Could not capture serial number via BHI\n");
512
513
/* wait for ready on pass through or any other execution environment */
514
if (!MHI_FW_LOAD_CAPABLE(mhi_cntrl->ee))
515
goto fw_load_ready_state;
516
517
fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
518
mhi_cntrl->edl_image : mhi_cntrl->fw_image;
519
520
/* check if the driver has already provided the firmware data */
521
if (!fw_name && mhi_cntrl->fbc_download &&
522
mhi_cntrl->fw_data && mhi_cntrl->fw_sz) {
523
if (!mhi_cntrl->sbl_size) {
524
dev_err(dev, "fw_data provided but no sbl_size\n");
525
goto error_fw_load;
526
}
527
528
size = mhi_cntrl->sbl_size;
529
fw_data = mhi_cntrl->fw_data;
530
fw_sz = mhi_cntrl->fw_sz;
531
goto skip_req_fw;
532
}
533
534
if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size ||
535
!mhi_cntrl->seg_len))) {
536
dev_err(dev,
537
"No firmware image defined or !sbl_size || !seg_len\n");
538
goto error_fw_load;
539
}
540
541
ret = request_firmware(&firmware, fw_name, dev);
542
if (ret) {
543
dev_err(dev, "Error loading firmware: %d\n", ret);
544
goto error_fw_load;
545
}
546
547
size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
548
549
/* SBL size provided is maximum size, not necessarily the image size */
550
if (size > firmware->size)
551
size = firmware->size;
552
553
fw_data = firmware->data;
554
fw_sz = firmware->size;
555
556
skip_req_fw:
557
fw_load_type = mhi_fw_load_type_get(mhi_cntrl);
558
if (fw_load_type == MHI_FW_LOAD_BHIE)
559
ret = mhi_load_image_bhie(mhi_cntrl, fw_data, size);
560
else
561
ret = mhi_load_image_bhi(mhi_cntrl, fw_data, size);
562
563
/* Error or in EDL mode, we're done */
564
if (ret) {
565
dev_err(dev, "MHI did not load image over BHI%s, ret: %d\n",
566
fw_load_type == MHI_FW_LOAD_BHIE ? "e" : "",
567
ret);
568
release_firmware(firmware);
569
goto error_fw_load;
570
}
571
572
/* Wait for ready since EDL image was loaded */
573
if (fw_name && fw_name == mhi_cntrl->edl_image) {
574
release_firmware(firmware);
575
goto fw_load_ready_state;
576
}
577
578
write_lock_irq(&mhi_cntrl->pm_lock);
579
mhi_cntrl->dev_state = MHI_STATE_RESET;
580
write_unlock_irq(&mhi_cntrl->pm_lock);
581
582
/*
583
* If we're doing fbc, populate vector tables while
584
* device transitioning into MHI READY state
585
*/
586
if (fw_load_type == MHI_FW_LOAD_FBC) {
587
ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, fw_sz);
588
if (ret) {
589
release_firmware(firmware);
590
goto error_fw_load;
591
}
592
593
/* Load the firmware into BHIE vec table */
594
mhi_firmware_copy_bhie(mhi_cntrl, fw_data, fw_sz, mhi_cntrl->fbc_image);
595
}
596
597
release_firmware(firmware);
598
599
fw_load_ready_state:
600
/* Transitioning into MHI RESET->READY state */
601
ret = mhi_ready_state_transition(mhi_cntrl);
602
if (ret) {
603
dev_err(dev, "MHI did not enter READY state\n");
604
goto error_ready_state;
605
}
606
607
dev_info(dev, "Wait for device to enter SBL or Mission mode\n");
608
return;
609
610
error_ready_state:
611
if (mhi_cntrl->fbc_image) {
612
mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
613
mhi_cntrl->fbc_image = NULL;
614
}
615
616
error_fw_load:
617
write_lock_irq(&mhi_cntrl->pm_lock);
618
new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR);
619
write_unlock_irq(&mhi_cntrl->pm_lock);
620
if (new_state == MHI_PM_FW_DL_ERR)
621
wake_up_all(&mhi_cntrl->state_event);
622
}
623
624
int mhi_download_amss_image(struct mhi_controller *mhi_cntrl)
625
{
626
struct image_info *image_info = mhi_cntrl->fbc_image;
627
struct device *dev = &mhi_cntrl->mhi_dev->dev;
628
enum mhi_pm_state new_state;
629
int ret;
630
631
if (!image_info)
632
return -EIO;
633
634
ret = mhi_fw_load_bhie(mhi_cntrl,
635
/* Vector table is the last entry */
636
&image_info->mhi_buf[image_info->entries - 1]);
637
if (ret) {
638
dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
639
write_lock_irq(&mhi_cntrl->pm_lock);
640
new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR);
641
write_unlock_irq(&mhi_cntrl->pm_lock);
642
if (new_state == MHI_PM_FW_DL_ERR)
643
wake_up_all(&mhi_cntrl->state_event);
644
}
645
646
return ret;
647
}
648
649