Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/bus/mhi/host/pm.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4
*
5
*/
6
7
#include <linux/delay.h>
8
#include <linux/device.h>
9
#include <linux/dma-direction.h>
10
#include <linux/dma-mapping.h>
11
#include <linux/interrupt.h>
12
#include <linux/list.h>
13
#include <linux/mhi.h>
14
#include <linux/module.h>
15
#include <linux/slab.h>
16
#include <linux/wait.h>
17
#include "internal.h"
18
#include "trace.h"
19
20
/*
21
* Not all MHI state transitions are synchronous. Transitions like Linkdown,
22
* SYS_ERR, and shutdown can happen anytime asynchronously. This function will
23
* transition to a new state only if we're allowed to.
24
*
25
* Priority increases as we go down. For instance, from any state in L0, the
26
* transition can be made to states in L1, L2 and L3. A notable exception to
27
* this rule is state DISABLE. From DISABLE state we can only transition to
28
* POR state. Also, while in L2 state, user cannot jump back to previous
29
* L1 or L0 states.
30
*
31
* Valid transitions:
32
* L0: DISABLE <--> POR
33
* POR <--> POR
34
* POR -> M0 -> M2 --> M0
35
* POR -> FW_DL_ERR
36
* FW_DL_ERR <--> FW_DL_ERR
37
* M0 <--> M0
38
* M0 -> FW_DL_ERR
39
* M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
40
* L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS
41
* SYS_ERR_PROCESS -> SYS_ERR_FAIL
42
* SYS_ERR_FAIL -> SYS_ERR_DETECT
43
* SYS_ERR_PROCESS --> POR
44
* L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
45
* SHUTDOWN_PROCESS -> DISABLE
46
* L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
47
* LD_ERR_FATAL_DETECT -> DISABLE
48
*/
49
static const struct mhi_pm_transitions dev_state_transitions[] = {
50
/* L0 States */
51
{
52
MHI_PM_DISABLE,
53
MHI_PM_POR
54
},
55
{
56
MHI_PM_POR,
57
MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
58
MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
59
MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
60
},
61
{
62
MHI_PM_M0,
63
MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
64
MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
65
MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
66
},
67
{
68
MHI_PM_M2,
69
MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
70
MHI_PM_LD_ERR_FATAL_DETECT
71
},
72
{
73
MHI_PM_M3_ENTER,
74
MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
75
MHI_PM_LD_ERR_FATAL_DETECT
76
},
77
{
78
MHI_PM_M3,
79
MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
80
MHI_PM_LD_ERR_FATAL_DETECT
81
},
82
{
83
MHI_PM_M3_EXIT,
84
MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
85
MHI_PM_LD_ERR_FATAL_DETECT
86
},
87
{
88
MHI_PM_FW_DL_ERR,
89
MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
90
MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
91
},
92
/* L1 States */
93
{
94
MHI_PM_SYS_ERR_DETECT,
95
MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
96
MHI_PM_LD_ERR_FATAL_DETECT
97
},
98
{
99
MHI_PM_SYS_ERR_PROCESS,
100
MHI_PM_POR | MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS |
101
MHI_PM_LD_ERR_FATAL_DETECT
102
},
103
{
104
MHI_PM_SYS_ERR_FAIL,
105
MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
106
MHI_PM_LD_ERR_FATAL_DETECT
107
},
108
/* L2 States */
109
{
110
MHI_PM_SHUTDOWN_PROCESS,
111
MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
112
},
113
/* L3 States */
114
{
115
MHI_PM_LD_ERR_FATAL_DETECT,
116
MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE
117
},
118
};
119
120
enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
121
enum mhi_pm_state state)
122
{
123
unsigned long cur_state = mhi_cntrl->pm_state;
124
int index = find_last_bit(&cur_state, 32);
125
126
if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
127
return cur_state;
128
129
if (unlikely(dev_state_transitions[index].from_state != cur_state))
130
return cur_state;
131
132
if (unlikely(!(dev_state_transitions[index].to_states & state)))
133
return cur_state;
134
135
trace_mhi_tryset_pm_state(mhi_cntrl, state);
136
mhi_cntrl->pm_state = state;
137
return mhi_cntrl->pm_state;
138
}
139
140
void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
141
{
142
struct device *dev = &mhi_cntrl->mhi_dev->dev;
143
int ret;
144
145
if (state == MHI_STATE_RESET) {
146
ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
147
MHICTRL_RESET_MASK, 1);
148
} else {
149
ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
150
MHICTRL_MHISTATE_MASK, state);
151
}
152
153
if (ret)
154
dev_err(dev, "Failed to set MHI state to: %s\n",
155
mhi_state_str(state));
156
}
157
158
/* NOP for backward compatibility, host allowed to ring DB in M2 state */
159
static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
160
{
161
}
162
163
static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
164
{
165
mhi_cntrl->wake_get(mhi_cntrl, false);
166
mhi_cntrl->wake_put(mhi_cntrl, true);
167
}
168
169
/* Handle device ready state transition */
170
int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
171
{
172
struct mhi_event *mhi_event;
173
enum mhi_pm_state cur_state;
174
struct device *dev = &mhi_cntrl->mhi_dev->dev;
175
u32 interval_us = 25000; /* poll register field every 25 milliseconds */
176
u32 timeout_ms;
177
int ret, i;
178
179
/* Check if device entered error state */
180
if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
181
dev_err(dev, "Device link is not accessible\n");
182
return -EIO;
183
}
184
185
/* Wait for RESET to be cleared and READY bit to be set by the device */
186
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
187
MHICTRL_RESET_MASK, 0, interval_us,
188
mhi_cntrl->timeout_ms);
189
if (ret) {
190
dev_err(dev, "Device failed to clear MHI Reset\n");
191
return ret;
192
}
193
194
timeout_ms = mhi_cntrl->ready_timeout_ms ?
195
mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
196
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
197
MHISTATUS_READY_MASK, 1, interval_us,
198
timeout_ms);
199
if (ret) {
200
dev_err(dev, "Device failed to enter MHI Ready\n");
201
return ret;
202
}
203
204
dev_dbg(dev, "Device in READY State\n");
205
write_lock_irq(&mhi_cntrl->pm_lock);
206
cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
207
mhi_cntrl->dev_state = MHI_STATE_READY;
208
write_unlock_irq(&mhi_cntrl->pm_lock);
209
210
if (cur_state != MHI_PM_POR) {
211
dev_err(dev, "Error moving to state %s from %s\n",
212
to_mhi_pm_state_str(MHI_PM_POR),
213
to_mhi_pm_state_str(cur_state));
214
return -EIO;
215
}
216
217
read_lock_bh(&mhi_cntrl->pm_lock);
218
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
219
dev_err(dev, "Device registers not accessible\n");
220
goto error_mmio;
221
}
222
223
/* Configure MMIO registers */
224
ret = mhi_init_mmio(mhi_cntrl);
225
if (ret) {
226
dev_err(dev, "Error configuring MMIO registers\n");
227
goto error_mmio;
228
}
229
230
/* Add elements to all SW event rings */
231
mhi_event = mhi_cntrl->mhi_event;
232
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
233
struct mhi_ring *ring = &mhi_event->ring;
234
235
/* Skip if this is an offload or HW event */
236
if (mhi_event->offload_ev || mhi_event->hw_ring)
237
continue;
238
239
ring->wp = ring->base + ring->len - ring->el_size;
240
*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
241
/* Update all cores */
242
smp_wmb();
243
244
/* Ring the event ring db */
245
spin_lock_irq(&mhi_event->lock);
246
mhi_ring_er_db(mhi_event);
247
spin_unlock_irq(&mhi_event->lock);
248
}
249
250
/* Set MHI to M0 state */
251
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
252
read_unlock_bh(&mhi_cntrl->pm_lock);
253
254
return 0;
255
256
error_mmio:
257
read_unlock_bh(&mhi_cntrl->pm_lock);
258
259
return -EIO;
260
}
261
262
int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
263
{
264
enum mhi_pm_state cur_state;
265
struct mhi_chan *mhi_chan;
266
struct device *dev = &mhi_cntrl->mhi_dev->dev;
267
int i;
268
269
write_lock_irq(&mhi_cntrl->pm_lock);
270
mhi_cntrl->dev_state = MHI_STATE_M0;
271
cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
272
write_unlock_irq(&mhi_cntrl->pm_lock);
273
if (unlikely(cur_state != MHI_PM_M0)) {
274
dev_err(dev, "Unable to transition to M0 state\n");
275
return -EIO;
276
}
277
mhi_cntrl->M0++;
278
279
/* Wake up the device */
280
read_lock_bh(&mhi_cntrl->pm_lock);
281
mhi_cntrl->wake_get(mhi_cntrl, true);
282
283
/* Ring all event rings and CMD ring only if we're in mission mode */
284
if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
285
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
286
struct mhi_cmd *mhi_cmd =
287
&mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
288
289
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
290
if (mhi_event->offload_ev)
291
continue;
292
293
spin_lock_irq(&mhi_event->lock);
294
mhi_ring_er_db(mhi_event);
295
spin_unlock_irq(&mhi_event->lock);
296
}
297
298
/* Only ring primary cmd ring if ring is not empty */
299
spin_lock_irq(&mhi_cmd->lock);
300
if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
301
mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
302
spin_unlock_irq(&mhi_cmd->lock);
303
}
304
305
/* Ring channel DB registers */
306
mhi_chan = mhi_cntrl->mhi_chan;
307
for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
308
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
309
310
if (mhi_chan->db_cfg.reset_req) {
311
write_lock_irq(&mhi_chan->lock);
312
mhi_chan->db_cfg.db_mode = true;
313
write_unlock_irq(&mhi_chan->lock);
314
}
315
316
read_lock_irq(&mhi_chan->lock);
317
318
/* Only ring DB if ring is not empty */
319
if (tre_ring->base && tre_ring->wp != tre_ring->rp &&
320
mhi_chan->ch_state == MHI_CH_STATE_ENABLED)
321
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
322
read_unlock_irq(&mhi_chan->lock);
323
}
324
325
mhi_cntrl->wake_put(mhi_cntrl, false);
326
read_unlock_bh(&mhi_cntrl->pm_lock);
327
wake_up_all(&mhi_cntrl->state_event);
328
329
return 0;
330
}
331
332
/*
333
* After receiving the MHI state change event from the device indicating the
334
* transition to M1 state, the host can transition the device to M2 state
335
* for keeping it in low power state.
336
*/
337
void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
338
{
339
enum mhi_pm_state state;
340
struct device *dev = &mhi_cntrl->mhi_dev->dev;
341
342
write_lock_irq(&mhi_cntrl->pm_lock);
343
state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
344
if (state == MHI_PM_M2) {
345
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
346
mhi_cntrl->dev_state = MHI_STATE_M2;
347
348
write_unlock_irq(&mhi_cntrl->pm_lock);
349
350
mhi_cntrl->M2++;
351
wake_up_all(&mhi_cntrl->state_event);
352
353
/* If there are any pending resources, exit M2 immediately */
354
if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
355
atomic_read(&mhi_cntrl->dev_wake))) {
356
dev_dbg(dev,
357
"Exiting M2, pending_pkts: %d dev_wake: %d\n",
358
atomic_read(&mhi_cntrl->pending_pkts),
359
atomic_read(&mhi_cntrl->dev_wake));
360
read_lock_bh(&mhi_cntrl->pm_lock);
361
mhi_cntrl->wake_get(mhi_cntrl, true);
362
mhi_cntrl->wake_put(mhi_cntrl, true);
363
read_unlock_bh(&mhi_cntrl->pm_lock);
364
} else {
365
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
366
}
367
} else {
368
write_unlock_irq(&mhi_cntrl->pm_lock);
369
}
370
}
371
372
/* MHI M3 completion handler */
373
int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
374
{
375
enum mhi_pm_state state;
376
struct device *dev = &mhi_cntrl->mhi_dev->dev;
377
378
write_lock_irq(&mhi_cntrl->pm_lock);
379
mhi_cntrl->dev_state = MHI_STATE_M3;
380
state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
381
write_unlock_irq(&mhi_cntrl->pm_lock);
382
if (state != MHI_PM_M3) {
383
dev_err(dev, "Unable to transition to M3 state\n");
384
return -EIO;
385
}
386
387
mhi_cntrl->M3++;
388
wake_up_all(&mhi_cntrl->state_event);
389
390
return 0;
391
}
392
393
/* Handle device Mission Mode transition */
394
static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
395
{
396
struct mhi_event *mhi_event;
397
struct device *dev = &mhi_cntrl->mhi_dev->dev;
398
enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
399
int i, ret;
400
401
dev_dbg(dev, "Processing Mission Mode transition\n");
402
403
write_lock_irq(&mhi_cntrl->pm_lock);
404
if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
405
ee = mhi_get_exec_env(mhi_cntrl);
406
407
if (!MHI_IN_MISSION_MODE(ee)) {
408
mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
409
write_unlock_irq(&mhi_cntrl->pm_lock);
410
wake_up_all(&mhi_cntrl->state_event);
411
return -EIO;
412
}
413
mhi_cntrl->ee = ee;
414
write_unlock_irq(&mhi_cntrl->pm_lock);
415
416
wake_up_all(&mhi_cntrl->state_event);
417
418
device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
419
mhi_destroy_device);
420
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
421
422
/* Force MHI to be in M0 state before continuing */
423
ret = __mhi_device_get_sync(mhi_cntrl);
424
if (ret)
425
return ret;
426
427
read_lock_bh(&mhi_cntrl->pm_lock);
428
429
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
430
ret = -EIO;
431
goto error_mission_mode;
432
}
433
434
/* Add elements to all HW event rings */
435
mhi_event = mhi_cntrl->mhi_event;
436
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
437
struct mhi_ring *ring = &mhi_event->ring;
438
439
if (mhi_event->offload_ev || !mhi_event->hw_ring)
440
continue;
441
442
ring->wp = ring->base + ring->len - ring->el_size;
443
*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
444
/* Update to all cores */
445
smp_wmb();
446
447
spin_lock_irq(&mhi_event->lock);
448
if (MHI_DB_ACCESS_VALID(mhi_cntrl))
449
mhi_ring_er_db(mhi_event);
450
spin_unlock_irq(&mhi_event->lock);
451
}
452
453
read_unlock_bh(&mhi_cntrl->pm_lock);
454
455
/*
456
* The MHI devices are only created when the client device switches its
457
* Execution Environment (EE) to either SBL or AMSS states
458
*/
459
mhi_create_devices(mhi_cntrl);
460
461
read_lock_bh(&mhi_cntrl->pm_lock);
462
463
error_mission_mode:
464
mhi_cntrl->wake_put(mhi_cntrl, false);
465
read_unlock_bh(&mhi_cntrl->pm_lock);
466
467
return ret;
468
}
469
470
/* Handle shutdown transitions */
471
static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
472
bool destroy_device)
473
{
474
enum mhi_pm_state cur_state;
475
struct mhi_event *mhi_event;
476
struct mhi_cmd_ctxt *cmd_ctxt;
477
struct mhi_cmd *mhi_cmd;
478
struct mhi_event_ctxt *er_ctxt;
479
struct device *dev = &mhi_cntrl->mhi_dev->dev;
480
int ret, i;
481
482
dev_dbg(dev, "Processing disable transition with PM state: %s\n",
483
to_mhi_pm_state_str(mhi_cntrl->pm_state));
484
485
mutex_lock(&mhi_cntrl->pm_mutex);
486
487
/* Trigger MHI RESET so that the device will not access host memory */
488
if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
489
/* Skip MHI RESET if in RDDM state */
490
if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM)
491
goto skip_mhi_reset;
492
493
dev_dbg(dev, "Triggering MHI Reset in device\n");
494
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
495
496
/* Wait for the reset bit to be cleared by the device */
497
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
498
MHICTRL_RESET_MASK, 0, 25000, mhi_cntrl->timeout_ms);
499
if (ret)
500
dev_err(dev, "Device failed to clear MHI Reset\n");
501
502
/*
503
* Device will clear BHI_INTVEC as a part of RESET processing,
504
* hence re-program it
505
*/
506
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
507
508
if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
509
/* wait for ready to be set */
510
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs,
511
MHISTATUS, MHISTATUS_READY_MASK,
512
1, 25000, mhi_cntrl->timeout_ms);
513
if (ret)
514
dev_err(dev, "Device failed to enter READY state\n");
515
}
516
}
517
518
skip_mhi_reset:
519
dev_dbg(dev,
520
"Waiting for all pending event ring processing to complete\n");
521
mhi_event = mhi_cntrl->mhi_event;
522
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
523
if (mhi_event->offload_ev)
524
continue;
525
disable_irq(mhi_cntrl->irq[mhi_event->irq]);
526
tasklet_kill(&mhi_event->task);
527
}
528
529
/* Release lock and wait for all pending threads to complete */
530
mutex_unlock(&mhi_cntrl->pm_mutex);
531
dev_dbg(dev, "Waiting for all pending threads to complete\n");
532
wake_up_all(&mhi_cntrl->state_event);
533
534
/*
535
* Only destroy the 'struct device' for channels if indicated by the
536
* 'destroy_device' flag. Because, during system suspend or hibernation
537
* state, there is no need to destroy the 'struct device' as the endpoint
538
* device would still be physically attached to the machine.
539
*/
540
if (destroy_device) {
541
dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
542
device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
543
}
544
545
mutex_lock(&mhi_cntrl->pm_mutex);
546
547
WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
548
WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
549
550
/* Reset the ev rings and cmd rings */
551
dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
552
mhi_cmd = mhi_cntrl->mhi_cmd;
553
cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
554
for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
555
struct mhi_ring *ring = &mhi_cmd->ring;
556
557
ring->rp = ring->base;
558
ring->wp = ring->base;
559
cmd_ctxt->rp = cmd_ctxt->rbase;
560
cmd_ctxt->wp = cmd_ctxt->rbase;
561
}
562
563
mhi_event = mhi_cntrl->mhi_event;
564
er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
565
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
566
mhi_event++) {
567
struct mhi_ring *ring = &mhi_event->ring;
568
569
/* Skip offload events */
570
if (mhi_event->offload_ev)
571
continue;
572
573
ring->rp = ring->base;
574
ring->wp = ring->base;
575
er_ctxt->rp = er_ctxt->rbase;
576
er_ctxt->wp = er_ctxt->rbase;
577
}
578
579
/* Move to disable state */
580
write_lock_irq(&mhi_cntrl->pm_lock);
581
cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
582
write_unlock_irq(&mhi_cntrl->pm_lock);
583
if (unlikely(cur_state != MHI_PM_DISABLE))
584
dev_err(dev, "Error moving from PM state: %s to: %s\n",
585
to_mhi_pm_state_str(cur_state),
586
to_mhi_pm_state_str(MHI_PM_DISABLE));
587
588
dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
589
to_mhi_pm_state_str(mhi_cntrl->pm_state),
590
mhi_state_str(mhi_cntrl->dev_state));
591
592
mutex_unlock(&mhi_cntrl->pm_mutex);
593
}
594
595
/* Handle system error transitions */
596
static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
597
{
598
enum mhi_pm_state cur_state, prev_state;
599
enum dev_st_transition next_state;
600
struct mhi_event *mhi_event;
601
struct mhi_cmd_ctxt *cmd_ctxt;
602
struct mhi_cmd *mhi_cmd;
603
struct mhi_event_ctxt *er_ctxt;
604
struct device *dev = &mhi_cntrl->mhi_dev->dev;
605
bool reset_device = false;
606
int ret, i;
607
608
dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
609
to_mhi_pm_state_str(mhi_cntrl->pm_state),
610
to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
611
612
/* We must notify MHI control driver so it can clean up first */
613
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
614
615
mutex_lock(&mhi_cntrl->pm_mutex);
616
write_lock_irq(&mhi_cntrl->pm_lock);
617
prev_state = mhi_cntrl->pm_state;
618
cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
619
write_unlock_irq(&mhi_cntrl->pm_lock);
620
621
if (cur_state != MHI_PM_SYS_ERR_PROCESS) {
622
dev_err(dev, "Failed to transition from PM state: %s to: %s\n",
623
to_mhi_pm_state_str(cur_state),
624
to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
625
goto exit_sys_error_transition;
626
}
627
628
mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
629
mhi_cntrl->dev_state = MHI_STATE_RESET;
630
631
/* Wake up threads waiting for state transition */
632
wake_up_all(&mhi_cntrl->state_event);
633
634
if (MHI_REG_ACCESS_VALID(prev_state)) {
635
/*
636
* If the device is in PBL or SBL, it will only respond to
637
* RESET if the device is in SYSERR state. SYSERR might
638
* already be cleared at this point.
639
*/
640
enum mhi_state cur_state = mhi_get_mhi_state(mhi_cntrl);
641
enum mhi_ee_type cur_ee = mhi_get_exec_env(mhi_cntrl);
642
643
if (cur_state == MHI_STATE_SYS_ERR)
644
reset_device = true;
645
else if (cur_ee != MHI_EE_PBL && cur_ee != MHI_EE_SBL)
646
reset_device = true;
647
}
648
649
/* Trigger MHI RESET so that the device will not access host memory */
650
if (reset_device) {
651
u32 in_reset = -1;
652
unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
653
654
dev_dbg(dev, "Triggering MHI Reset in device\n");
655
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
656
657
/* Wait for the reset bit to be cleared by the device */
658
ret = wait_event_timeout(mhi_cntrl->state_event,
659
mhi_read_reg_field(mhi_cntrl,
660
mhi_cntrl->regs,
661
MHICTRL,
662
MHICTRL_RESET_MASK,
663
&in_reset) ||
664
!in_reset, timeout);
665
if (!ret || in_reset) {
666
dev_err(dev, "Device failed to exit MHI Reset state\n");
667
write_lock_irq(&mhi_cntrl->pm_lock);
668
cur_state = mhi_tryset_pm_state(mhi_cntrl,
669
MHI_PM_SYS_ERR_FAIL);
670
write_unlock_irq(&mhi_cntrl->pm_lock);
671
/* Shutdown may have occurred, otherwise cleanup now */
672
if (cur_state != MHI_PM_SYS_ERR_FAIL)
673
goto exit_sys_error_transition;
674
}
675
676
/*
677
* Device will clear BHI_INTVEC as a part of RESET processing,
678
* hence re-program it
679
*/
680
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
681
}
682
683
dev_dbg(dev,
684
"Waiting for all pending event ring processing to complete\n");
685
mhi_event = mhi_cntrl->mhi_event;
686
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
687
if (mhi_event->offload_ev)
688
continue;
689
tasklet_kill(&mhi_event->task);
690
}
691
692
/* Release lock and wait for all pending threads to complete */
693
mutex_unlock(&mhi_cntrl->pm_mutex);
694
dev_dbg(dev, "Waiting for all pending threads to complete\n");
695
wake_up_all(&mhi_cntrl->state_event);
696
697
dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
698
device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
699
700
mutex_lock(&mhi_cntrl->pm_mutex);
701
702
WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
703
WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
704
705
/* Reset the ev rings and cmd rings */
706
dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
707
mhi_cmd = mhi_cntrl->mhi_cmd;
708
cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
709
for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
710
struct mhi_ring *ring = &mhi_cmd->ring;
711
712
ring->rp = ring->base;
713
ring->wp = ring->base;
714
cmd_ctxt->rp = cmd_ctxt->rbase;
715
cmd_ctxt->wp = cmd_ctxt->rbase;
716
}
717
718
mhi_event = mhi_cntrl->mhi_event;
719
er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
720
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
721
mhi_event++) {
722
struct mhi_ring *ring = &mhi_event->ring;
723
724
/* Skip offload events */
725
if (mhi_event->offload_ev)
726
continue;
727
728
ring->rp = ring->base;
729
ring->wp = ring->base;
730
er_ctxt->rp = er_ctxt->rbase;
731
er_ctxt->wp = er_ctxt->rbase;
732
}
733
734
/* Transition to next state */
735
if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
736
write_lock_irq(&mhi_cntrl->pm_lock);
737
cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
738
write_unlock_irq(&mhi_cntrl->pm_lock);
739
if (cur_state != MHI_PM_POR) {
740
dev_err(dev, "Error moving to state %s from %s\n",
741
to_mhi_pm_state_str(MHI_PM_POR),
742
to_mhi_pm_state_str(cur_state));
743
goto exit_sys_error_transition;
744
}
745
next_state = DEV_ST_TRANSITION_PBL;
746
} else {
747
next_state = DEV_ST_TRANSITION_READY;
748
}
749
750
mhi_queue_state_transition(mhi_cntrl, next_state);
751
752
exit_sys_error_transition:
753
dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
754
to_mhi_pm_state_str(mhi_cntrl->pm_state),
755
mhi_state_str(mhi_cntrl->dev_state));
756
757
mutex_unlock(&mhi_cntrl->pm_mutex);
758
}
759
760
/* Queue a new work item and schedule work */
761
int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
762
enum dev_st_transition state)
763
{
764
struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
765
unsigned long flags;
766
767
if (!item)
768
return -ENOMEM;
769
770
item->state = state;
771
spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
772
list_add_tail(&item->node, &mhi_cntrl->transition_list);
773
spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
774
775
queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker);
776
777
return 0;
778
}
779
780
/* SYS_ERR worker */
781
void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
782
{
783
struct device *dev = &mhi_cntrl->mhi_dev->dev;
784
785
/* skip if controller supports RDDM */
786
if (mhi_cntrl->rddm_image) {
787
dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
788
return;
789
}
790
791
mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
792
}
793
794
/* Device State Transition worker */
795
void mhi_pm_st_worker(struct work_struct *work)
796
{
797
struct state_transition *itr, *tmp;
798
LIST_HEAD(head);
799
struct mhi_controller *mhi_cntrl = container_of(work,
800
struct mhi_controller,
801
st_worker);
802
803
spin_lock_irq(&mhi_cntrl->transition_lock);
804
list_splice_tail_init(&mhi_cntrl->transition_list, &head);
805
spin_unlock_irq(&mhi_cntrl->transition_lock);
806
807
list_for_each_entry_safe(itr, tmp, &head, node) {
808
list_del(&itr->node);
809
810
trace_mhi_pm_st_transition(mhi_cntrl, itr->state);
811
812
switch (itr->state) {
813
case DEV_ST_TRANSITION_PBL:
814
write_lock_irq(&mhi_cntrl->pm_lock);
815
if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
816
mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
817
write_unlock_irq(&mhi_cntrl->pm_lock);
818
mhi_fw_load_handler(mhi_cntrl);
819
break;
820
case DEV_ST_TRANSITION_SBL:
821
write_lock_irq(&mhi_cntrl->pm_lock);
822
mhi_cntrl->ee = MHI_EE_SBL;
823
write_unlock_irq(&mhi_cntrl->pm_lock);
824
/*
825
* The MHI devices are only created when the client
826
* device switches its Execution Environment (EE) to
827
* either SBL or AMSS states
828
*/
829
mhi_create_devices(mhi_cntrl);
830
if (mhi_cntrl->fbc_download)
831
mhi_download_amss_image(mhi_cntrl);
832
break;
833
case DEV_ST_TRANSITION_MISSION_MODE:
834
mhi_pm_mission_mode_transition(mhi_cntrl);
835
break;
836
case DEV_ST_TRANSITION_FP:
837
write_lock_irq(&mhi_cntrl->pm_lock);
838
mhi_cntrl->ee = MHI_EE_FP;
839
write_unlock_irq(&mhi_cntrl->pm_lock);
840
mhi_create_devices(mhi_cntrl);
841
break;
842
case DEV_ST_TRANSITION_READY:
843
mhi_ready_state_transition(mhi_cntrl);
844
break;
845
case DEV_ST_TRANSITION_SYS_ERR:
846
mhi_pm_sys_error_transition(mhi_cntrl);
847
break;
848
case DEV_ST_TRANSITION_DISABLE:
849
mhi_pm_disable_transition(mhi_cntrl, false);
850
break;
851
case DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE:
852
mhi_pm_disable_transition(mhi_cntrl, true);
853
break;
854
default:
855
break;
856
}
857
kfree(itr);
858
}
859
}
860
861
int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
862
{
863
struct mhi_chan *itr, *tmp;
864
struct device *dev = &mhi_cntrl->mhi_dev->dev;
865
enum mhi_pm_state new_state;
866
int ret;
867
868
if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
869
return -EINVAL;
870
871
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
872
return -EIO;
873
874
/* Return busy if there are any pending resources */
875
if (atomic_read(&mhi_cntrl->dev_wake) ||
876
atomic_read(&mhi_cntrl->pending_pkts))
877
return -EBUSY;
878
879
/* Take MHI out of M2 state */
880
read_lock_bh(&mhi_cntrl->pm_lock);
881
mhi_cntrl->wake_get(mhi_cntrl, false);
882
read_unlock_bh(&mhi_cntrl->pm_lock);
883
884
ret = wait_event_timeout(mhi_cntrl->state_event,
885
mhi_cntrl->dev_state == MHI_STATE_M0 ||
886
mhi_cntrl->dev_state == MHI_STATE_M1 ||
887
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
888
msecs_to_jiffies(mhi_cntrl->timeout_ms));
889
890
read_lock_bh(&mhi_cntrl->pm_lock);
891
mhi_cntrl->wake_put(mhi_cntrl, false);
892
read_unlock_bh(&mhi_cntrl->pm_lock);
893
894
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
895
dev_err(dev,
896
"Could not enter M0/M1 state");
897
return -EIO;
898
}
899
900
write_lock_irq(&mhi_cntrl->pm_lock);
901
902
if (atomic_read(&mhi_cntrl->dev_wake) ||
903
atomic_read(&mhi_cntrl->pending_pkts)) {
904
write_unlock_irq(&mhi_cntrl->pm_lock);
905
return -EBUSY;
906
}
907
908
dev_dbg(dev, "Allowing M3 transition\n");
909
new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
910
if (new_state != MHI_PM_M3_ENTER) {
911
write_unlock_irq(&mhi_cntrl->pm_lock);
912
dev_err(dev,
913
"Error setting to PM state: %s from: %s\n",
914
to_mhi_pm_state_str(MHI_PM_M3_ENTER),
915
to_mhi_pm_state_str(mhi_cntrl->pm_state));
916
return -EIO;
917
}
918
919
/* Set MHI to M3 and wait for completion */
920
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
921
write_unlock_irq(&mhi_cntrl->pm_lock);
922
dev_dbg(dev, "Waiting for M3 completion\n");
923
924
ret = wait_event_timeout(mhi_cntrl->state_event,
925
mhi_cntrl->dev_state == MHI_STATE_M3 ||
926
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
927
msecs_to_jiffies(mhi_cntrl->timeout_ms));
928
929
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
930
dev_err(dev,
931
"Did not enter M3 state, MHI state: %s, PM state: %s\n",
932
mhi_state_str(mhi_cntrl->dev_state),
933
to_mhi_pm_state_str(mhi_cntrl->pm_state));
934
return -EIO;
935
}
936
937
/* Notify clients about entering LPM */
938
list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
939
mutex_lock(&itr->mutex);
940
if (itr->mhi_dev)
941
mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
942
mutex_unlock(&itr->mutex);
943
}
944
945
return 0;
946
}
947
EXPORT_SYMBOL_GPL(mhi_pm_suspend);
948
949
static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force)
950
{
951
struct mhi_chan *itr, *tmp;
952
struct device *dev = &mhi_cntrl->mhi_dev->dev;
953
enum mhi_pm_state cur_state;
954
int ret;
955
956
dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n",
957
to_mhi_pm_state_str(mhi_cntrl->pm_state),
958
mhi_state_str(mhi_cntrl->dev_state));
959
960
if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
961
return 0;
962
963
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
964
return -EIO;
965
966
if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) {
967
dev_warn(dev, "Resuming from non M3 state (%s)\n",
968
mhi_state_str(mhi_get_mhi_state(mhi_cntrl)));
969
if (!force)
970
return -EINVAL;
971
}
972
973
/* Notify clients about exiting LPM */
974
list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
975
mutex_lock(&itr->mutex);
976
if (itr->mhi_dev)
977
mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
978
mutex_unlock(&itr->mutex);
979
}
980
981
write_lock_irq(&mhi_cntrl->pm_lock);
982
cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
983
if (cur_state != MHI_PM_M3_EXIT) {
984
write_unlock_irq(&mhi_cntrl->pm_lock);
985
dev_info(dev,
986
"Error setting to PM state: %s from: %s\n",
987
to_mhi_pm_state_str(MHI_PM_M3_EXIT),
988
to_mhi_pm_state_str(mhi_cntrl->pm_state));
989
return -EIO;
990
}
991
992
/* Set MHI to M0 and wait for completion */
993
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
994
write_unlock_irq(&mhi_cntrl->pm_lock);
995
996
ret = wait_event_timeout(mhi_cntrl->state_event,
997
mhi_cntrl->dev_state == MHI_STATE_M0 ||
998
mhi_cntrl->dev_state == MHI_STATE_M2 ||
999
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1000
msecs_to_jiffies(mhi_cntrl->timeout_ms));
1001
1002
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1003
dev_err(dev,
1004
"Did not enter M0 state, MHI state: %s, PM state: %s\n",
1005
mhi_state_str(mhi_cntrl->dev_state),
1006
to_mhi_pm_state_str(mhi_cntrl->pm_state));
1007
return -EIO;
1008
}
1009
1010
return 0;
1011
}
1012
1013
int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
1014
{
1015
return __mhi_pm_resume(mhi_cntrl, false);
1016
}
1017
EXPORT_SYMBOL_GPL(mhi_pm_resume);
1018
1019
int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl)
1020
{
1021
return __mhi_pm_resume(mhi_cntrl, true);
1022
}
1023
EXPORT_SYMBOL_GPL(mhi_pm_resume_force);
1024
1025
int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
1026
{
1027
int ret;
1028
1029
/* Wake up the device */
1030
read_lock_bh(&mhi_cntrl->pm_lock);
1031
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1032
read_unlock_bh(&mhi_cntrl->pm_lock);
1033
return -EIO;
1034
}
1035
mhi_cntrl->wake_get(mhi_cntrl, true);
1036
if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1037
mhi_trigger_resume(mhi_cntrl);
1038
read_unlock_bh(&mhi_cntrl->pm_lock);
1039
1040
ret = wait_event_timeout(mhi_cntrl->state_event,
1041
mhi_cntrl->pm_state == MHI_PM_M0 ||
1042
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1043
msecs_to_jiffies(mhi_cntrl->timeout_ms));
1044
1045
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1046
read_lock_bh(&mhi_cntrl->pm_lock);
1047
mhi_cntrl->wake_put(mhi_cntrl, false);
1048
read_unlock_bh(&mhi_cntrl->pm_lock);
1049
return -EIO;
1050
}
1051
1052
return 0;
1053
}
1054
1055
/* Assert device wake db */
1056
static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
1057
{
1058
unsigned long flags;
1059
1060
/*
1061
* If force flag is set, then increment the wake count value and
1062
* ring wake db
1063
*/
1064
if (unlikely(force)) {
1065
spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1066
atomic_inc(&mhi_cntrl->dev_wake);
1067
if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
1068
!mhi_cntrl->wake_set) {
1069
mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
1070
mhi_cntrl->wake_set = true;
1071
}
1072
spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1073
} else {
1074
/*
1075
* If resources are already requested, then just increment
1076
* the wake count value and return
1077
*/
1078
if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
1079
return;
1080
1081
spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1082
if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
1083
MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
1084
!mhi_cntrl->wake_set) {
1085
mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
1086
mhi_cntrl->wake_set = true;
1087
}
1088
spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1089
}
1090
}
1091
1092
/* De-assert device wake db */
1093
static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
1094
bool override)
1095
{
1096
unsigned long flags;
1097
1098
/*
1099
* Only continue if there is a single resource, else just decrement
1100
* and return
1101
*/
1102
if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
1103
return;
1104
1105
spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1106
if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
1107
MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
1108
mhi_cntrl->wake_set) {
1109
mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
1110
mhi_cntrl->wake_set = false;
1111
}
1112
spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1113
}
1114
1115
int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
1116
{
1117
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
1118
enum mhi_state state;
1119
enum mhi_ee_type current_ee;
1120
enum dev_st_transition next_state;
1121
struct device *dev = &mhi_cntrl->mhi_dev->dev;
1122
u32 interval_us = 25000; /* poll register field every 25 milliseconds */
1123
int ret, i;
1124
1125
dev_info(dev, "Requested to power ON\n");
1126
1127
/* Supply default wake routines if not provided by controller driver */
1128
if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
1129
!mhi_cntrl->wake_toggle) {
1130
mhi_cntrl->wake_get = mhi_assert_dev_wake;
1131
mhi_cntrl->wake_put = mhi_deassert_dev_wake;
1132
mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
1133
mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
1134
}
1135
1136
mutex_lock(&mhi_cntrl->pm_mutex);
1137
mhi_cntrl->pm_state = MHI_PM_DISABLE;
1138
1139
/* Setup BHI INTVEC */
1140
write_lock_irq(&mhi_cntrl->pm_lock);
1141
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1142
mhi_cntrl->pm_state = MHI_PM_POR;
1143
mhi_cntrl->ee = MHI_EE_MAX;
1144
current_ee = mhi_get_exec_env(mhi_cntrl);
1145
write_unlock_irq(&mhi_cntrl->pm_lock);
1146
1147
/* Confirm that the device is in valid exec env */
1148
if (!MHI_POWER_UP_CAPABLE(current_ee)) {
1149
dev_err(dev, "%s is not a valid EE for power on\n",
1150
TO_MHI_EXEC_STR(current_ee));
1151
ret = -EIO;
1152
goto error_exit;
1153
}
1154
1155
state = mhi_get_mhi_state(mhi_cntrl);
1156
dev_dbg(dev, "Attempting power on with EE: %s, state: %s\n",
1157
TO_MHI_EXEC_STR(current_ee), mhi_state_str(state));
1158
1159
if (state == MHI_STATE_SYS_ERR) {
1160
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
1161
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
1162
MHICTRL_RESET_MASK, 0, interval_us,
1163
mhi_cntrl->timeout_ms);
1164
if (ret) {
1165
dev_info(dev, "Failed to reset MHI due to syserr state\n");
1166
goto error_exit;
1167
}
1168
1169
/*
1170
* device cleares INTVEC as part of RESET processing,
1171
* re-program it
1172
*/
1173
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1174
}
1175
1176
/* IRQs have been requested during probe, so we just need to enable them. */
1177
enable_irq(mhi_cntrl->irq[0]);
1178
1179
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
1180
if (mhi_event->offload_ev)
1181
continue;
1182
1183
enable_irq(mhi_cntrl->irq[mhi_event->irq]);
1184
}
1185
1186
/* Transition to next state */
1187
next_state = MHI_IN_PBL(current_ee) ?
1188
DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
1189
1190
mhi_queue_state_transition(mhi_cntrl, next_state);
1191
1192
mutex_unlock(&mhi_cntrl->pm_mutex);
1193
1194
dev_info(dev, "Power on setup success\n");
1195
1196
return 0;
1197
1198
error_exit:
1199
mhi_cntrl->pm_state = MHI_PM_DISABLE;
1200
mutex_unlock(&mhi_cntrl->pm_mutex);
1201
1202
return ret;
1203
}
1204
EXPORT_SYMBOL_GPL(mhi_async_power_up);
1205
1206
static void __mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful,
1207
bool destroy_device)
1208
{
1209
enum mhi_pm_state cur_state, transition_state;
1210
struct device *dev = &mhi_cntrl->mhi_dev->dev;
1211
1212
mutex_lock(&mhi_cntrl->pm_mutex);
1213
write_lock_irq(&mhi_cntrl->pm_lock);
1214
cur_state = mhi_cntrl->pm_state;
1215
if (cur_state == MHI_PM_DISABLE) {
1216
write_unlock_irq(&mhi_cntrl->pm_lock);
1217
mutex_unlock(&mhi_cntrl->pm_mutex);
1218
return; /* Already powered down */
1219
}
1220
1221
/* If it's not a graceful shutdown, force MHI to linkdown state */
1222
transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS :
1223
MHI_PM_LD_ERR_FATAL_DETECT;
1224
1225
cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
1226
if (cur_state != transition_state) {
1227
dev_err(dev, "Failed to move to state: %s from: %s\n",
1228
to_mhi_pm_state_str(transition_state),
1229
to_mhi_pm_state_str(mhi_cntrl->pm_state));
1230
/* Force link down or error fatal detected state */
1231
mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
1232
}
1233
1234
/* mark device inactive to avoid any further host processing */
1235
mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
1236
mhi_cntrl->dev_state = MHI_STATE_RESET;
1237
1238
wake_up_all(&mhi_cntrl->state_event);
1239
1240
write_unlock_irq(&mhi_cntrl->pm_lock);
1241
mutex_unlock(&mhi_cntrl->pm_mutex);
1242
1243
if (destroy_device)
1244
mhi_queue_state_transition(mhi_cntrl,
1245
DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE);
1246
else
1247
mhi_queue_state_transition(mhi_cntrl,
1248
DEV_ST_TRANSITION_DISABLE);
1249
1250
/* Wait for shutdown to complete */
1251
flush_work(&mhi_cntrl->st_worker);
1252
1253
disable_irq(mhi_cntrl->irq[0]);
1254
}
1255
1256
void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
1257
{
1258
__mhi_power_down(mhi_cntrl, graceful, true);
1259
}
1260
EXPORT_SYMBOL_GPL(mhi_power_down);
1261
1262
void mhi_power_down_keep_dev(struct mhi_controller *mhi_cntrl,
1263
bool graceful)
1264
{
1265
__mhi_power_down(mhi_cntrl, graceful, false);
1266
}
1267
EXPORT_SYMBOL_GPL(mhi_power_down_keep_dev);
1268
1269
int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
1270
{
1271
int ret = mhi_async_power_up(mhi_cntrl);
1272
u32 timeout_ms;
1273
1274
if (ret)
1275
return ret;
1276
1277
/* Some devices need more time to set ready during power up */
1278
timeout_ms = mhi_cntrl->ready_timeout_ms ?
1279
mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
1280
wait_event_timeout(mhi_cntrl->state_event,
1281
MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
1282
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1283
msecs_to_jiffies(timeout_ms));
1284
1285
ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
1286
if (ret)
1287
mhi_power_down(mhi_cntrl, false);
1288
1289
return ret;
1290
}
1291
EXPORT_SYMBOL(mhi_sync_power_up);
1292
1293
int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
1294
{
1295
struct device *dev = &mhi_cntrl->mhi_dev->dev;
1296
int ret;
1297
1298
/* Check if device is already in RDDM */
1299
if (mhi_cntrl->ee == MHI_EE_RDDM)
1300
return 0;
1301
1302
dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
1303
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
1304
1305
/* Wait for RDDM event */
1306
ret = wait_event_timeout(mhi_cntrl->state_event,
1307
mhi_cntrl->ee == MHI_EE_RDDM,
1308
msecs_to_jiffies(mhi_cntrl->timeout_ms));
1309
ret = ret ? 0 : -EIO;
1310
1311
return ret;
1312
}
1313
EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
1314
1315
int mhi_device_get_sync(struct mhi_device *mhi_dev)
1316
{
1317
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1318
int ret;
1319
1320
ret = __mhi_device_get_sync(mhi_cntrl);
1321
if (!ret)
1322
mhi_dev->dev_wake++;
1323
1324
return ret;
1325
}
1326
EXPORT_SYMBOL_GPL(mhi_device_get_sync);
1327
1328
void mhi_device_put(struct mhi_device *mhi_dev)
1329
{
1330
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1331
1332
mhi_dev->dev_wake--;
1333
read_lock_bh(&mhi_cntrl->pm_lock);
1334
if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1335
mhi_trigger_resume(mhi_cntrl);
1336
1337
mhi_cntrl->wake_put(mhi_cntrl, false);
1338
read_unlock_bh(&mhi_cntrl->pm_lock);
1339
}
1340
EXPORT_SYMBOL_GPL(mhi_device_put);
1341
1342