Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/base/power/wakeirq.c
49962 views
1
// SPDX-License-Identifier: GPL-2.0
2
/* Device wakeirq helper functions */
3
#include <linux/device.h>
4
#include <linux/interrupt.h>
5
#include <linux/irq.h>
6
#include <linux/slab.h>
7
#include <linux/pm_runtime.h>
8
#include <linux/pm_wakeirq.h>
9
10
#include "power.h"
11
12
/**
13
* dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ
14
* @dev: Device entry
15
* @wirq: Wake irq specific data
16
*
17
* Internal function to attach a dedicated wake-up interrupt as a wake IRQ.
18
*/
19
static int dev_pm_attach_wake_irq(struct device *dev, struct wake_irq *wirq)
20
{
21
unsigned long flags;
22
23
if (!dev || !wirq)
24
return -EINVAL;
25
26
spin_lock_irqsave(&dev->power.lock, flags);
27
if (dev_WARN_ONCE(dev, dev->power.wakeirq,
28
"wake irq already initialized\n")) {
29
spin_unlock_irqrestore(&dev->power.lock, flags);
30
return -EEXIST;
31
}
32
33
dev->power.wakeirq = wirq;
34
device_wakeup_attach_irq(dev, wirq);
35
36
spin_unlock_irqrestore(&dev->power.lock, flags);
37
return 0;
38
}
39
40
/**
41
* dev_pm_set_wake_irq - Attach device IO interrupt as wake IRQ
42
* @dev: Device entry
43
* @irq: Device IO interrupt
44
*
45
* Attach a device IO interrupt as a wake IRQ. The wake IRQ gets
46
* automatically configured for wake-up from suspend based
47
* on the device specific sysfs wakeup entry. Typically called
48
* during driver probe after calling device_init_wakeup().
49
*/
50
int dev_pm_set_wake_irq(struct device *dev, int irq)
51
{
52
struct wake_irq *wirq;
53
int err;
54
55
if (irq < 0)
56
return -EINVAL;
57
58
wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
59
if (!wirq)
60
return -ENOMEM;
61
62
wirq->dev = dev;
63
wirq->irq = irq;
64
65
err = dev_pm_attach_wake_irq(dev, wirq);
66
if (err)
67
kfree(wirq);
68
69
return err;
70
}
71
EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq);
72
73
/**
74
* dev_pm_clear_wake_irq - Detach a device IO interrupt wake IRQ
75
* @dev: Device entry
76
*
77
* Detach a device wake IRQ and free resources.
78
*
79
* Note that it's OK for drivers to call this without calling
80
* dev_pm_set_wake_irq() as all the driver instances may not have
81
* a wake IRQ configured. This avoid adding wake IRQ specific
82
* checks into the drivers.
83
*/
84
void dev_pm_clear_wake_irq(struct device *dev)
85
{
86
struct wake_irq *wirq;
87
unsigned long flags;
88
89
spin_lock_irqsave(&dev->power.lock, flags);
90
wirq = dev->power.wakeirq;
91
if (!wirq) {
92
spin_unlock_irqrestore(&dev->power.lock, flags);
93
return;
94
}
95
96
device_wakeup_detach_irq(dev);
97
dev->power.wakeirq = NULL;
98
spin_unlock_irqrestore(&dev->power.lock, flags);
99
100
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
101
free_irq(wirq->irq, wirq);
102
wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
103
}
104
kfree(wirq->name);
105
kfree(wirq);
106
}
107
EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
108
109
static void devm_pm_clear_wake_irq(void *dev)
110
{
111
dev_pm_clear_wake_irq(dev);
112
}
113
114
/**
115
* devm_pm_set_wake_irq - device-managed variant of dev_pm_set_wake_irq
116
* @dev: Device entry
117
* @irq: Device IO interrupt
118
*
119
*
120
* Attach a device IO interrupt as a wake IRQ, same with dev_pm_set_wake_irq,
121
* but the device will be auto clear wake capability on driver detach.
122
*/
123
int devm_pm_set_wake_irq(struct device *dev, int irq)
124
{
125
int ret;
126
127
ret = dev_pm_set_wake_irq(dev, irq);
128
if (ret)
129
return ret;
130
131
return devm_add_action_or_reset(dev, devm_pm_clear_wake_irq, dev);
132
}
133
EXPORT_SYMBOL_GPL(devm_pm_set_wake_irq);
134
135
/**
136
* handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
137
* @irq: Device specific dedicated wake-up interrupt
138
* @_wirq: Wake IRQ data
139
*
140
* Some devices have a separate wake-up interrupt in addition to the
141
* device IO interrupt. The wake-up interrupt signals that a device
142
* should be woken up from it's idle state. This handler uses device
143
* specific pm_runtime functions to wake the device, and then it's
144
* up to the device to do whatever it needs to. Note that as the
145
* device may need to restore context and start up regulators, we
146
* use a threaded IRQ.
147
*
148
* Also note that we are not resending the lost device interrupts.
149
* We assume that the wake-up interrupt just needs to wake-up the
150
* device, and then device's pm_runtime_resume() can deal with the
151
* situation.
152
*/
153
static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
154
{
155
struct wake_irq *wirq = _wirq;
156
int res;
157
158
/* Maybe abort suspend? */
159
if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
160
pm_wakeup_event(wirq->dev, 0);
161
162
return IRQ_HANDLED;
163
}
164
165
/* We don't want RPM_ASYNC or RPM_NOWAIT here */
166
res = pm_runtime_resume(wirq->dev);
167
if (res < 0)
168
dev_warn(wirq->dev,
169
"wake IRQ with no resume: %i\n", res);
170
171
return IRQ_HANDLED;
172
}
173
174
static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag)
175
{
176
struct wake_irq *wirq;
177
int err;
178
179
if (irq < 0)
180
return -EINVAL;
181
182
wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
183
if (!wirq)
184
return -ENOMEM;
185
186
wirq->name = kasprintf(GFP_KERNEL, "%s:wakeup", dev_name(dev));
187
if (!wirq->name) {
188
err = -ENOMEM;
189
goto err_free;
190
}
191
192
wirq->dev = dev;
193
wirq->irq = irq;
194
195
/* Prevent deferred spurious wakeirqs with disable_irq_nosync() */
196
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
197
198
/*
199
* Consumer device may need to power up and restore state
200
* so we use a threaded irq.
201
*/
202
err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
203
IRQF_ONESHOT | IRQF_NO_AUTOEN,
204
wirq->name, wirq);
205
if (err)
206
goto err_free_name;
207
208
err = dev_pm_attach_wake_irq(dev, wirq);
209
if (err)
210
goto err_free_irq;
211
212
wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag;
213
214
return err;
215
216
err_free_irq:
217
free_irq(irq, wirq);
218
err_free_name:
219
kfree(wirq->name);
220
err_free:
221
kfree(wirq);
222
223
return err;
224
}
225
226
/**
227
* dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
228
* @dev: Device entry
229
* @irq: Device wake-up interrupt
230
*
231
* Unless your hardware has separate wake-up interrupts in addition
232
* to the device IO interrupts, you don't need this.
233
*
234
* Sets up a threaded interrupt handler for a device that has
235
* a dedicated wake-up interrupt in addition to the device IO
236
* interrupt.
237
*/
238
int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
239
{
240
return __dev_pm_set_dedicated_wake_irq(dev, irq, 0);
241
}
242
EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
243
244
/**
245
* dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt
246
* with reverse enable ordering
247
* @dev: Device entry
248
* @irq: Device wake-up interrupt
249
*
250
* Unless your hardware has separate wake-up interrupts in addition
251
* to the device IO interrupts, you don't need this.
252
*
253
* Sets up a threaded interrupt handler for a device that has a dedicated
254
* wake-up interrupt in addition to the device IO interrupt. It sets
255
* the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend()
256
* to enable dedicated wake-up interrupt after running the runtime suspend
257
* callback for @dev.
258
*/
259
int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
260
{
261
return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE);
262
}
263
EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse);
264
265
/**
266
* dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt
267
* @dev: Device
268
* @can_change_status: Can change wake-up interrupt status
269
*
270
* Enables wakeirq conditionally. We need to enable wake-up interrupt
271
* lazily on the first rpm_suspend(). This is needed as the consumer device
272
* starts in RPM_SUSPENDED state, and the first pm_runtime_get() would
273
* otherwise try to disable already disabled wakeirq. The wake-up interrupt
274
* starts disabled with IRQ_NOAUTOEN set.
275
*
276
* Should be only called from rpm_suspend() and rpm_resume() path.
277
* Caller must hold &dev->power.lock to change wirq->status
278
*/
279
void dev_pm_enable_wake_irq_check(struct device *dev,
280
bool can_change_status)
281
{
282
struct wake_irq *wirq = dev->power.wakeirq;
283
284
if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
285
return;
286
287
if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
288
goto enable;
289
} else if (can_change_status) {
290
wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
291
goto enable;
292
}
293
294
return;
295
296
enable:
297
if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) {
298
enable_irq(wirq->irq);
299
wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
300
}
301
}
302
303
/**
304
* dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
305
* @dev: Device
306
* @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE
307
*
308
* Disables wake-up interrupt conditionally based on status.
309
* Should be only called from rpm_suspend() and rpm_resume() path.
310
*/
311
void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable)
312
{
313
struct wake_irq *wirq = dev->power.wakeirq;
314
315
if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
316
return;
317
318
if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
319
return;
320
321
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) {
322
wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED;
323
disable_irq_nosync(wirq->irq);
324
}
325
}
326
327
/**
328
* dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before
329
* @dev: Device using the wake IRQ
330
*
331
* Enable wake IRQ conditionally based on status, mainly used if want to
332
* enable wake IRQ after running ->runtime_suspend() which depends on
333
* WAKE_IRQ_DEDICATED_REVERSE.
334
*
335
* Should be only called from rpm_suspend() path.
336
*/
337
void dev_pm_enable_wake_irq_complete(struct device *dev)
338
{
339
struct wake_irq *wirq = dev->power.wakeirq;
340
341
if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
342
return;
343
344
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
345
wirq->status & WAKE_IRQ_DEDICATED_REVERSE) {
346
enable_irq(wirq->irq);
347
wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
348
}
349
}
350
351
/**
352
* dev_pm_arm_wake_irq - Arm device wake-up
353
* @wirq: Device wake-up interrupt
354
*
355
* Sets up the wake-up event conditionally based on the
356
* device_may_wake().
357
*/
358
void dev_pm_arm_wake_irq(struct wake_irq *wirq)
359
{
360
if (!wirq)
361
return;
362
363
if (device_may_wakeup(wirq->dev)) {
364
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
365
!(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
366
enable_irq(wirq->irq);
367
368
enable_irq_wake(wirq->irq);
369
}
370
}
371
372
/**
373
* dev_pm_disarm_wake_irq - Disarm device wake-up
374
* @wirq: Device wake-up interrupt
375
*
376
* Clears up the wake-up event conditionally based on the
377
* device_may_wake().
378
*/
379
void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
380
{
381
if (!wirq)
382
return;
383
384
if (device_may_wakeup(wirq->dev)) {
385
disable_irq_wake(wirq->irq);
386
387
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
388
!(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
389
disable_irq_nosync(wirq->irq);
390
}
391
}
392
393