Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/cpuidle/cpuidle-riscv-sbi.c
52654 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* RISC-V SBI CPU idle driver.
4
*
5
* Copyright (c) 2021 Western Digital Corporation or its affiliates.
6
* Copyright (c) 2022 Ventana Micro Systems Inc.
7
*/
8
9
#define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt
10
11
#include <linux/cleanup.h>
12
#include <linux/cpuhotplug.h>
13
#include <linux/cpuidle.h>
14
#include <linux/cpumask.h>
15
#include <linux/cpu_pm.h>
16
#include <linux/cpu_cooling.h>
17
#include <linux/kernel.h>
18
#include <linux/module.h>
19
#include <linux/of.h>
20
#include <linux/slab.h>
21
#include <linux/string.h>
22
#include <linux/platform_device.h>
23
#include <linux/pm_domain.h>
24
#include <linux/pm_runtime.h>
25
#include <asm/cpuidle.h>
26
#include <asm/sbi.h>
27
#include <asm/smp.h>
28
#include <asm/suspend.h>
29
30
#include "cpuidle.h"
31
#include "dt_idle_states.h"
32
#include "dt_idle_genpd.h"
33
34
struct sbi_cpuidle_data {
35
u32 *states;
36
struct device *dev;
37
};
38
39
struct sbi_domain_state {
40
bool available;
41
u32 state;
42
};
43
44
static DEFINE_PER_CPU_READ_MOSTLY(struct sbi_cpuidle_data, sbi_cpuidle_data);
45
static DEFINE_PER_CPU(struct sbi_domain_state, domain_state);
46
static bool sbi_cpuidle_use_osi;
47
static bool sbi_cpuidle_use_cpuhp;
48
49
static inline void sbi_set_domain_state(u32 state)
50
{
51
struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
52
53
data->available = true;
54
data->state = state;
55
}
56
57
static inline u32 sbi_get_domain_state(void)
58
{
59
struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
60
61
return data->state;
62
}
63
64
static inline void sbi_clear_domain_state(void)
65
{
66
struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
67
68
data->available = false;
69
}
70
71
static inline bool sbi_is_domain_state_available(void)
72
{
73
struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
74
75
return data->available;
76
}
77
78
static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
79
struct cpuidle_driver *drv, int idx)
80
{
81
u32 *states = __this_cpu_read(sbi_cpuidle_data.states);
82
u32 state = states[idx];
83
84
if (state & SBI_HSM_SUSP_NON_RET_BIT)
85
return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend, idx, state);
86
else
87
return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend,
88
idx, state);
89
}
90
91
static __cpuidle int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
92
struct cpuidle_driver *drv, int idx,
93
bool s2idle)
94
{
95
struct sbi_cpuidle_data *data = this_cpu_ptr(&sbi_cpuidle_data);
96
u32 *states = data->states;
97
struct device *pd_dev = data->dev;
98
u32 state;
99
int ret;
100
101
ret = cpu_pm_enter();
102
if (ret)
103
return -1;
104
105
/* Do runtime PM to manage a hierarchical CPU toplogy. */
106
if (s2idle)
107
dev_pm_genpd_suspend(pd_dev);
108
else
109
pm_runtime_put_sync_suspend(pd_dev);
110
111
ct_cpuidle_enter();
112
113
if (sbi_is_domain_state_available())
114
state = sbi_get_domain_state();
115
else
116
state = states[idx];
117
118
ret = riscv_sbi_hart_suspend(state) ? -1 : idx;
119
120
ct_cpuidle_exit();
121
122
if (s2idle)
123
dev_pm_genpd_resume(pd_dev);
124
else
125
pm_runtime_get_sync(pd_dev);
126
127
cpu_pm_exit();
128
129
/* Clear the domain state to start fresh when back from idle. */
130
sbi_clear_domain_state();
131
return ret;
132
}
133
134
static int sbi_enter_domain_idle_state(struct cpuidle_device *dev,
135
struct cpuidle_driver *drv, int idx)
136
{
137
return __sbi_enter_domain_idle_state(dev, drv, idx, false);
138
}
139
140
static int sbi_enter_s2idle_domain_idle_state(struct cpuidle_device *dev,
141
struct cpuidle_driver *drv,
142
int idx)
143
{
144
return __sbi_enter_domain_idle_state(dev, drv, idx, true);
145
}
146
147
static int sbi_cpuidle_cpuhp_up(unsigned int cpu)
148
{
149
struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev);
150
151
if (pd_dev)
152
pm_runtime_get_sync(pd_dev);
153
154
return 0;
155
}
156
157
static int sbi_cpuidle_cpuhp_down(unsigned int cpu)
158
{
159
struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev);
160
161
if (pd_dev) {
162
pm_runtime_put_sync(pd_dev);
163
/* Clear domain state to start fresh at next online. */
164
sbi_clear_domain_state();
165
}
166
167
return 0;
168
}
169
170
static void sbi_idle_init_cpuhp(void)
171
{
172
int err;
173
174
if (!sbi_cpuidle_use_cpuhp)
175
return;
176
177
err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
178
"cpuidle/sbi:online",
179
sbi_cpuidle_cpuhp_up,
180
sbi_cpuidle_cpuhp_down);
181
if (err)
182
pr_warn("Failed %d while setup cpuhp state\n", err);
183
}
184
185
static const struct of_device_id sbi_cpuidle_state_match[] = {
186
{ .compatible = "riscv,idle-state",
187
.data = sbi_cpuidle_enter_state },
188
{ },
189
};
190
191
static int sbi_dt_parse_state_node(struct device_node *np, u32 *state)
192
{
193
int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state);
194
195
if (err) {
196
pr_warn("%pOF missing riscv,sbi-suspend-param property\n", np);
197
return err;
198
}
199
200
if (!riscv_sbi_suspend_state_is_valid(*state)) {
201
pr_warn("Invalid SBI suspend state %#x\n", *state);
202
return -EINVAL;
203
}
204
205
return 0;
206
}
207
208
static int sbi_dt_cpu_init_topology(struct cpuidle_driver *drv,
209
struct sbi_cpuidle_data *data,
210
unsigned int state_count, int cpu)
211
{
212
/* Currently limit the hierarchical topology to be used in OSI mode. */
213
if (!sbi_cpuidle_use_osi)
214
return 0;
215
216
data->dev = dt_idle_attach_cpu(cpu, "sbi");
217
if (IS_ERR_OR_NULL(data->dev))
218
return PTR_ERR_OR_ZERO(data->dev);
219
220
/*
221
* Using the deepest state for the CPU to trigger a potential selection
222
* of a shared state for the domain, assumes the domain states are all
223
* deeper states.
224
*/
225
drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE;
226
drv->states[state_count - 1].enter = sbi_enter_domain_idle_state;
227
drv->states[state_count - 1].enter_s2idle =
228
sbi_enter_s2idle_domain_idle_state;
229
sbi_cpuidle_use_cpuhp = true;
230
231
return 0;
232
}
233
234
static int sbi_cpuidle_dt_init_states(struct device *dev,
235
struct cpuidle_driver *drv,
236
unsigned int cpu,
237
unsigned int state_count)
238
{
239
struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
240
struct device_node *state_node;
241
u32 *states;
242
int i, ret;
243
244
struct device_node *cpu_node __free(device_node) = of_cpu_device_node_get(cpu);
245
if (!cpu_node)
246
return -ENODEV;
247
248
states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL);
249
if (!states)
250
return -ENOMEM;
251
252
/* Parse SBI specific details from state DT nodes */
253
for (i = 1; i < state_count; i++) {
254
state_node = of_get_cpu_state_node(cpu_node, i - 1);
255
if (!state_node)
256
break;
257
258
ret = sbi_dt_parse_state_node(state_node, &states[i]);
259
of_node_put(state_node);
260
261
if (ret)
262
return ret;
263
264
pr_debug("sbi-state %#x index %d\n", states[i], i);
265
}
266
if (i != state_count)
267
return -ENODEV;
268
269
/* Initialize optional data, used for the hierarchical topology. */
270
ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu);
271
if (ret < 0)
272
return ret;
273
274
/* Store states in the per-cpu struct. */
275
data->states = states;
276
277
return 0;
278
}
279
280
static void sbi_cpuidle_deinit_cpu(int cpu)
281
{
282
struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
283
284
dt_idle_detach_cpu(data->dev);
285
sbi_cpuidle_use_cpuhp = false;
286
}
287
288
static int sbi_cpuidle_init_cpu(struct device *dev, int cpu)
289
{
290
struct cpuidle_driver *drv;
291
unsigned int state_count = 0;
292
int ret = 0;
293
294
drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
295
if (!drv)
296
return -ENOMEM;
297
298
drv->name = "sbi_cpuidle";
299
drv->owner = THIS_MODULE;
300
drv->cpumask = (struct cpumask *)cpumask_of(cpu);
301
302
/* RISC-V architectural WFI to be represented as state index 0. */
303
drv->states[0].enter = sbi_cpuidle_enter_state;
304
drv->states[0].exit_latency = 1;
305
drv->states[0].target_residency = 1;
306
drv->states[0].power_usage = UINT_MAX;
307
strscpy(drv->states[0].name, "WFI");
308
strscpy(drv->states[0].desc, "RISC-V WFI");
309
310
/*
311
* If no DT idle states are detected (ret == 0) let the driver
312
* initialization fail accordingly since there is no reason to
313
* initialize the idle driver if only wfi is supported, the
314
* default archictectural back-end already executes wfi
315
* on idle entry.
316
*/
317
ret = dt_init_idle_driver(drv, sbi_cpuidle_state_match, 1);
318
if (ret <= 0) {
319
pr_debug("HART%ld: failed to parse DT idle states\n",
320
cpuid_to_hartid_map(cpu));
321
return ret ? : -ENODEV;
322
}
323
state_count = ret + 1; /* Include WFI state as well */
324
325
/* Initialize idle states from DT. */
326
ret = sbi_cpuidle_dt_init_states(dev, drv, cpu, state_count);
327
if (ret) {
328
pr_err("HART%ld: failed to init idle states\n",
329
cpuid_to_hartid_map(cpu));
330
return ret;
331
}
332
333
if (cpuidle_disabled())
334
return 0;
335
336
ret = cpuidle_register(drv, NULL);
337
if (ret)
338
goto deinit;
339
340
cpuidle_cooling_register(drv);
341
342
return 0;
343
deinit:
344
sbi_cpuidle_deinit_cpu(cpu);
345
return ret;
346
}
347
348
#ifdef CONFIG_DT_IDLE_GENPD
349
350
static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
351
{
352
struct genpd_power_state *state = &pd->states[pd->state_idx];
353
u32 *pd_state;
354
355
if (!state->data)
356
return 0;
357
358
/* OSI mode is enabled, set the corresponding domain state. */
359
pd_state = state->data;
360
sbi_set_domain_state(*pd_state);
361
362
return 0;
363
}
364
365
struct sbi_pd_provider {
366
struct list_head link;
367
struct device_node *node;
368
};
369
370
static LIST_HEAD(sbi_pd_providers);
371
372
static int sbi_pd_init(struct device_node *np)
373
{
374
struct generic_pm_domain *pd;
375
struct sbi_pd_provider *pd_provider;
376
struct dev_power_governor *pd_gov;
377
int ret = -ENOMEM;
378
379
pd = dt_idle_pd_alloc(np, sbi_dt_parse_state_node);
380
if (!pd)
381
goto out;
382
383
pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL);
384
if (!pd_provider)
385
goto free_pd;
386
387
pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
388
389
/* Allow power off when OSI is available. */
390
if (sbi_cpuidle_use_osi)
391
pd->power_off = sbi_cpuidle_pd_power_off;
392
else
393
pd->flags |= GENPD_FLAG_ALWAYS_ON;
394
395
/* Use governor for CPU PM domains if it has some states to manage. */
396
pd_gov = pd->states ? &pm_domain_cpu_gov : NULL;
397
398
ret = pm_genpd_init(pd, pd_gov, false);
399
if (ret)
400
goto free_pd_prov;
401
402
ret = of_genpd_add_provider_simple(np, pd);
403
if (ret)
404
goto remove_pd;
405
406
pd_provider->node = of_node_get(np);
407
list_add(&pd_provider->link, &sbi_pd_providers);
408
409
pr_debug("init PM domain %s\n", pd->name);
410
return 0;
411
412
remove_pd:
413
pm_genpd_remove(pd);
414
free_pd_prov:
415
kfree(pd_provider);
416
free_pd:
417
dt_idle_pd_free(pd);
418
out:
419
pr_err("failed to init PM domain ret=%d %pOF\n", ret, np);
420
return ret;
421
}
422
423
static void sbi_pd_remove(void)
424
{
425
struct sbi_pd_provider *pd_provider, *it;
426
struct generic_pm_domain *genpd;
427
428
list_for_each_entry_safe(pd_provider, it, &sbi_pd_providers, link) {
429
of_genpd_del_provider(pd_provider->node);
430
431
genpd = of_genpd_remove_last(pd_provider->node);
432
if (!IS_ERR(genpd))
433
kfree(genpd);
434
435
of_node_put(pd_provider->node);
436
list_del(&pd_provider->link);
437
kfree(pd_provider);
438
}
439
}
440
441
static int sbi_genpd_probe(struct device_node *np)
442
{
443
int ret = 0, pd_count = 0;
444
445
if (!np)
446
return -ENODEV;
447
448
/*
449
* Parse child nodes for the "#power-domain-cells" property and
450
* initialize a genpd/genpd-of-provider pair when it's found.
451
*/
452
for_each_child_of_node_scoped(np, node) {
453
if (!of_property_present(node, "#power-domain-cells"))
454
continue;
455
456
ret = sbi_pd_init(node);
457
if (ret)
458
goto remove_pd;
459
460
pd_count++;
461
}
462
463
/* Bail out if not using the hierarchical CPU topology. */
464
if (!pd_count)
465
goto no_pd;
466
467
/* Link genpd masters/subdomains to model the CPU topology. */
468
ret = dt_idle_pd_init_topology(np);
469
if (ret)
470
goto remove_pd;
471
472
return 0;
473
474
remove_pd:
475
sbi_pd_remove();
476
pr_err("failed to create CPU PM domains ret=%d\n", ret);
477
no_pd:
478
return ret;
479
}
480
481
#else
482
483
static inline int sbi_genpd_probe(struct device_node *np)
484
{
485
return 0;
486
}
487
488
#endif
489
490
static int sbi_cpuidle_probe(struct platform_device *pdev)
491
{
492
int cpu, ret;
493
struct cpuidle_driver *drv;
494
struct cpuidle_device *dev;
495
struct device_node *pds_node;
496
497
/* Detect OSI support based on CPU DT nodes */
498
sbi_cpuidle_use_osi = true;
499
for_each_possible_cpu(cpu) {
500
struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
501
if (np &&
502
of_property_present(np, "power-domains") &&
503
of_property_present(np, "power-domain-names")) {
504
continue;
505
} else {
506
sbi_cpuidle_use_osi = false;
507
break;
508
}
509
}
510
511
/* Populate generic power domains from DT nodes */
512
pds_node = of_find_node_by_path("/cpus/power-domains");
513
if (pds_node) {
514
ret = sbi_genpd_probe(pds_node);
515
of_node_put(pds_node);
516
if (ret)
517
return ret;
518
}
519
520
/* Initialize CPU idle driver for each present CPU */
521
for_each_present_cpu(cpu) {
522
ret = sbi_cpuidle_init_cpu(&pdev->dev, cpu);
523
if (ret) {
524
pr_debug("HART%ld: idle driver init failed\n",
525
cpuid_to_hartid_map(cpu));
526
goto out_fail;
527
}
528
}
529
530
/* Setup CPU hotplut notifiers */
531
sbi_idle_init_cpuhp();
532
533
if (cpuidle_disabled())
534
pr_info("cpuidle is disabled\n");
535
else
536
pr_info("idle driver registered for all CPUs\n");
537
538
return 0;
539
540
out_fail:
541
while (--cpu >= 0) {
542
dev = per_cpu(cpuidle_devices, cpu);
543
drv = cpuidle_get_cpu_driver(dev);
544
cpuidle_unregister(drv);
545
sbi_cpuidle_deinit_cpu(cpu);
546
}
547
548
return ret;
549
}
550
551
static struct platform_driver sbi_cpuidle_driver = {
552
.probe = sbi_cpuidle_probe,
553
.driver = {
554
.name = "sbi-cpuidle",
555
},
556
};
557
558
static int __init sbi_cpuidle_init(void)
559
{
560
int ret;
561
struct platform_device *pdev;
562
563
if (!riscv_sbi_hsm_is_supported())
564
return 0;
565
566
ret = platform_driver_register(&sbi_cpuidle_driver);
567
if (ret)
568
return ret;
569
570
pdev = platform_device_register_simple("sbi-cpuidle",
571
-1, NULL, 0);
572
if (IS_ERR(pdev)) {
573
platform_driver_unregister(&sbi_cpuidle_driver);
574
return PTR_ERR(pdev);
575
}
576
577
return 0;
578
}
579
arch_initcall(sbi_cpuidle_init);
580
581