Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/cpufreq/cpufreq.c
49528 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* linux/drivers/cpufreq/cpufreq.c
4
*
5
* Copyright (C) 2001 Russell King
6
* (C) 2002 - 2003 Dominik Brodowski <[email protected]>
7
* (C) 2013 Viresh Kumar <[email protected]>
8
*
9
* Oct 2005 - Ashok Raj <[email protected]>
10
* Added handling for CPU hotplug
11
* Feb 2006 - Jacob Shin <[email protected]>
12
* Fix handling for CPU hotplug -- affected CPUs
13
*/
14
15
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17
#include <linux/cpu.h>
18
#include <linux/cpufreq.h>
19
#include <linux/cpu_cooling.h>
20
#include <linux/delay.h>
21
#include <linux/device.h>
22
#include <linux/init.h>
23
#include <linux/kernel_stat.h>
24
#include <linux/module.h>
25
#include <linux/mutex.h>
26
#include <linux/pm_qos.h>
27
#include <linux/slab.h>
28
#include <linux/string_choices.h>
29
#include <linux/suspend.h>
30
#include <linux/syscore_ops.h>
31
#include <linux/tick.h>
32
#include <linux/units.h>
33
#include <trace/events/power.h>
34
35
static LIST_HEAD(cpufreq_policy_list);
36
37
/* Macros to iterate over CPU policies */
38
#define for_each_suitable_policy(__policy, __active) \
39
list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
40
if ((__active) == !policy_is_inactive(__policy))
41
42
#define for_each_active_policy(__policy) \
43
for_each_suitable_policy(__policy, true)
44
#define for_each_inactive_policy(__policy) \
45
for_each_suitable_policy(__policy, false)
46
47
/* Iterate over governors */
48
static LIST_HEAD(cpufreq_governor_list);
49
#define for_each_governor(__governor) \
50
list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
51
52
static char default_governor[CPUFREQ_NAME_LEN];
53
54
/*
55
* The "cpufreq driver" - the arch- or hardware-dependent low
56
* level driver of CPUFreq support, and its spinlock. This lock
57
* also protects the cpufreq_cpu_data array.
58
*/
59
static struct cpufreq_driver *cpufreq_driver;
60
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
61
static DEFINE_RWLOCK(cpufreq_driver_lock);
62
63
static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
64
bool cpufreq_supports_freq_invariance(void)
65
{
66
return static_branch_likely(&cpufreq_freq_invariance);
67
}
68
69
/* Flag to suspend/resume CPUFreq governors */
70
static bool cpufreq_suspended;
71
72
static inline bool has_target(void)
73
{
74
return cpufreq_driver->target_index || cpufreq_driver->target;
75
}
76
77
bool has_target_index(void)
78
{
79
return !!cpufreq_driver->target_index;
80
}
81
82
/* internal prototypes */
83
static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
84
static int cpufreq_init_governor(struct cpufreq_policy *policy);
85
static void cpufreq_exit_governor(struct cpufreq_policy *policy);
86
static void cpufreq_governor_limits(struct cpufreq_policy *policy);
87
static int cpufreq_set_policy(struct cpufreq_policy *policy,
88
struct cpufreq_governor *new_gov,
89
unsigned int new_pol);
90
static bool cpufreq_boost_supported(void);
91
static int cpufreq_boost_trigger_state(int state);
92
93
/*
94
* Two notifier lists: the "policy" list is involved in the
95
* validation process for a new CPU frequency policy; the
96
* "transition" list for kernel code that needs to handle
97
* changes to devices when the CPU clock speed changes.
98
* The mutex locks both lists.
99
*/
100
static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
101
SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
102
103
static int off __read_mostly;
104
static int cpufreq_disabled(void)
105
{
106
return off;
107
}
108
void disable_cpufreq(void)
109
{
110
off = 1;
111
}
112
EXPORT_SYMBOL_GPL(disable_cpufreq);
113
114
static DEFINE_MUTEX(cpufreq_governor_mutex);
115
116
bool have_governor_per_policy(void)
117
{
118
return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
119
}
120
EXPORT_SYMBOL_GPL(have_governor_per_policy);
121
122
static struct kobject *cpufreq_global_kobject;
123
124
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
125
{
126
if (have_governor_per_policy())
127
return &policy->kobj;
128
else
129
return cpufreq_global_kobject;
130
}
131
EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
132
133
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
134
{
135
struct kernel_cpustat kcpustat;
136
u64 cur_wall_time;
137
u64 idle_time;
138
u64 busy_time;
139
140
cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
141
142
kcpustat_cpu_fetch(&kcpustat, cpu);
143
144
busy_time = kcpustat.cpustat[CPUTIME_USER];
145
busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
146
busy_time += kcpustat.cpustat[CPUTIME_IRQ];
147
busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
148
busy_time += kcpustat.cpustat[CPUTIME_STEAL];
149
busy_time += kcpustat.cpustat[CPUTIME_NICE];
150
151
idle_time = cur_wall_time - busy_time;
152
if (wall)
153
*wall = div_u64(cur_wall_time, NSEC_PER_USEC);
154
155
return div_u64(idle_time, NSEC_PER_USEC);
156
}
157
158
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
159
{
160
u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
161
162
if (idle_time == -1ULL)
163
return get_cpu_idle_time_jiffy(cpu, wall);
164
else if (!io_busy)
165
idle_time += get_cpu_iowait_time_us(cpu, wall);
166
167
return idle_time;
168
}
169
EXPORT_SYMBOL_GPL(get_cpu_idle_time);
170
171
/*
172
* This is a generic cpufreq init() routine which can be used by cpufreq
173
* drivers of SMP systems. It will do following:
174
* - validate & show freq table passed
175
* - set policies transition latency
176
* - policy->cpus with all possible CPUs
177
*/
178
void cpufreq_generic_init(struct cpufreq_policy *policy,
179
struct cpufreq_frequency_table *table,
180
unsigned int transition_latency)
181
{
182
policy->freq_table = table;
183
policy->cpuinfo.transition_latency = transition_latency;
184
185
/*
186
* The driver only supports the SMP configuration where all processors
187
* share the clock and voltage and clock.
188
*/
189
cpumask_setall(policy->cpus);
190
}
191
EXPORT_SYMBOL_GPL(cpufreq_generic_init);
192
193
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
194
{
195
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
196
197
return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
198
}
199
EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
200
201
unsigned int cpufreq_generic_get(unsigned int cpu)
202
{
203
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
204
205
if (!policy || IS_ERR(policy->clk)) {
206
pr_err("%s: No %s associated to cpu: %d\n",
207
__func__, policy ? "clk" : "policy", cpu);
208
return 0;
209
}
210
211
return clk_get_rate(policy->clk) / 1000;
212
}
213
EXPORT_SYMBOL_GPL(cpufreq_generic_get);
214
215
/**
216
* cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
217
* @cpu: CPU to find the policy for.
218
*
219
* Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
220
* the kobject reference counter of that policy. Return a valid policy on
221
* success or NULL on failure.
222
*
223
* The policy returned by this function has to be released with the help of
224
* cpufreq_cpu_put() to balance its kobject reference counter properly.
225
*/
226
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
227
{
228
struct cpufreq_policy *policy = NULL;
229
unsigned long flags;
230
231
if (WARN_ON(cpu >= nr_cpu_ids))
232
return NULL;
233
234
/* get the cpufreq driver */
235
read_lock_irqsave(&cpufreq_driver_lock, flags);
236
237
if (cpufreq_driver) {
238
/* get the CPU */
239
policy = cpufreq_cpu_get_raw(cpu);
240
if (policy)
241
kobject_get(&policy->kobj);
242
}
243
244
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
245
246
return policy;
247
}
248
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
249
250
/**
251
* cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
252
* @policy: cpufreq policy returned by cpufreq_cpu_get().
253
*/
254
void cpufreq_cpu_put(struct cpufreq_policy *policy)
255
{
256
kobject_put(&policy->kobj);
257
}
258
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
259
260
/*********************************************************************
261
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
262
*********************************************************************/
263
264
/**
265
* adjust_jiffies - Adjust the system "loops_per_jiffy".
266
* @val: CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
267
* @ci: Frequency change information.
268
*
269
* This function alters the system "loops_per_jiffy" for the clock
270
* speed change. Note that loops_per_jiffy cannot be updated on SMP
271
* systems as each CPU might be scaled differently. So, use the arch
272
* per-CPU loops_per_jiffy value wherever possible.
273
*/
274
static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
275
{
276
#ifndef CONFIG_SMP
277
static unsigned long l_p_j_ref;
278
static unsigned int l_p_j_ref_freq;
279
280
if (ci->flags & CPUFREQ_CONST_LOOPS)
281
return;
282
283
if (!l_p_j_ref_freq) {
284
l_p_j_ref = loops_per_jiffy;
285
l_p_j_ref_freq = ci->old;
286
pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
287
l_p_j_ref, l_p_j_ref_freq);
288
}
289
if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
290
loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
291
ci->new);
292
pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
293
loops_per_jiffy, ci->new);
294
}
295
#endif
296
}
297
298
/**
299
* cpufreq_notify_transition - Notify frequency transition and adjust jiffies.
300
* @policy: cpufreq policy to enable fast frequency switching for.
301
* @freqs: contain details of the frequency update.
302
* @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
303
*
304
* This function calls the transition notifiers and adjust_jiffies().
305
*
306
* It is called twice on all CPU frequency changes that have external effects.
307
*/
308
static void cpufreq_notify_transition(struct cpufreq_policy *policy,
309
struct cpufreq_freqs *freqs,
310
unsigned int state)
311
{
312
int cpu;
313
314
BUG_ON(irqs_disabled());
315
316
if (cpufreq_disabled())
317
return;
318
319
freqs->policy = policy;
320
freqs->flags = cpufreq_driver->flags;
321
pr_debug("notification %u of frequency transition to %u kHz\n",
322
state, freqs->new);
323
324
switch (state) {
325
case CPUFREQ_PRECHANGE:
326
/*
327
* Detect if the driver reported a value as "old frequency"
328
* which is not equal to what the cpufreq core thinks is
329
* "old frequency".
330
*/
331
if (policy->cur && policy->cur != freqs->old) {
332
pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
333
freqs->old, policy->cur);
334
freqs->old = policy->cur;
335
}
336
337
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
338
CPUFREQ_PRECHANGE, freqs);
339
340
adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
341
break;
342
343
case CPUFREQ_POSTCHANGE:
344
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
345
pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
346
cpumask_pr_args(policy->cpus));
347
348
for_each_cpu(cpu, policy->cpus)
349
trace_cpu_frequency(freqs->new, cpu);
350
351
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
352
CPUFREQ_POSTCHANGE, freqs);
353
354
cpufreq_stats_record_transition(policy, freqs->new);
355
policy->cur = freqs->new;
356
}
357
}
358
359
/* Do post notifications when there are chances that transition has failed */
360
static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
361
struct cpufreq_freqs *freqs, int transition_failed)
362
{
363
cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
364
if (!transition_failed)
365
return;
366
367
swap(freqs->old, freqs->new);
368
cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
369
cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
370
}
371
372
void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
373
struct cpufreq_freqs *freqs)
374
{
375
376
/*
377
* Catch double invocations of _begin() which lead to self-deadlock.
378
* ASYNC_NOTIFICATION drivers are left out because the cpufreq core
379
* doesn't invoke _begin() on their behalf, and hence the chances of
380
* double invocations are very low. Moreover, there are scenarios
381
* where these checks can emit false-positive warnings in these
382
* drivers; so we avoid that by skipping them altogether.
383
*/
384
WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
385
&& current == policy->transition_task);
386
387
wait:
388
wait_event(policy->transition_wait, !policy->transition_ongoing);
389
390
spin_lock(&policy->transition_lock);
391
392
if (unlikely(policy->transition_ongoing)) {
393
spin_unlock(&policy->transition_lock);
394
goto wait;
395
}
396
397
policy->transition_ongoing = true;
398
policy->transition_task = current;
399
400
spin_unlock(&policy->transition_lock);
401
402
cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
403
}
404
EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
405
406
void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
407
struct cpufreq_freqs *freqs, int transition_failed)
408
{
409
if (WARN_ON(!policy->transition_ongoing))
410
return;
411
412
cpufreq_notify_post_transition(policy, freqs, transition_failed);
413
414
arch_set_freq_scale(policy->related_cpus,
415
policy->cur,
416
arch_scale_freq_ref(policy->cpu));
417
418
spin_lock(&policy->transition_lock);
419
policy->transition_ongoing = false;
420
policy->transition_task = NULL;
421
spin_unlock(&policy->transition_lock);
422
423
wake_up(&policy->transition_wait);
424
}
425
EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
426
427
/*
428
* Fast frequency switching status count. Positive means "enabled", negative
429
* means "disabled" and 0 means "not decided yet".
430
*/
431
static int cpufreq_fast_switch_count;
432
static DEFINE_MUTEX(cpufreq_fast_switch_lock);
433
434
static void cpufreq_list_transition_notifiers(void)
435
{
436
struct notifier_block *nb;
437
438
pr_info("Registered transition notifiers:\n");
439
440
mutex_lock(&cpufreq_transition_notifier_list.mutex);
441
442
for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
443
pr_info("%pS\n", nb->notifier_call);
444
445
mutex_unlock(&cpufreq_transition_notifier_list.mutex);
446
}
447
448
/**
449
* cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
450
* @policy: cpufreq policy to enable fast frequency switching for.
451
*
452
* Try to enable fast frequency switching for @policy.
453
*
454
* The attempt will fail if there is at least one transition notifier registered
455
* at this point, as fast frequency switching is quite fundamentally at odds
456
* with transition notifiers. Thus if successful, it will make registration of
457
* transition notifiers fail going forward.
458
*/
459
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
460
{
461
lockdep_assert_held(&policy->rwsem);
462
463
if (!policy->fast_switch_possible)
464
return;
465
466
mutex_lock(&cpufreq_fast_switch_lock);
467
if (cpufreq_fast_switch_count >= 0) {
468
cpufreq_fast_switch_count++;
469
policy->fast_switch_enabled = true;
470
} else {
471
pr_warn("CPU%u: Fast frequency switching not enabled\n",
472
policy->cpu);
473
cpufreq_list_transition_notifiers();
474
}
475
mutex_unlock(&cpufreq_fast_switch_lock);
476
}
477
EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
478
479
/**
480
* cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
481
* @policy: cpufreq policy to disable fast frequency switching for.
482
*/
483
void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
484
{
485
mutex_lock(&cpufreq_fast_switch_lock);
486
if (policy->fast_switch_enabled) {
487
policy->fast_switch_enabled = false;
488
if (!WARN_ON(cpufreq_fast_switch_count <= 0))
489
cpufreq_fast_switch_count--;
490
}
491
mutex_unlock(&cpufreq_fast_switch_lock);
492
}
493
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
494
495
static unsigned int __resolve_freq(struct cpufreq_policy *policy,
496
unsigned int target_freq,
497
unsigned int min, unsigned int max,
498
unsigned int relation)
499
{
500
unsigned int idx;
501
502
target_freq = clamp_val(target_freq, min, max);
503
504
if (!policy->freq_table)
505
return target_freq;
506
507
idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation);
508
policy->cached_resolved_idx = idx;
509
policy->cached_target_freq = target_freq;
510
return policy->freq_table[idx].frequency;
511
}
512
513
/**
514
* cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
515
* one.
516
* @policy: associated policy to interrogate
517
* @target_freq: target frequency to resolve.
518
*
519
* The target to driver frequency mapping is cached in the policy.
520
*
521
* Return: Lowest driver-supported frequency greater than or equal to the
522
* given target_freq, subject to policy (min/max) and driver limitations.
523
*/
524
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
525
unsigned int target_freq)
526
{
527
unsigned int min = READ_ONCE(policy->min);
528
unsigned int max = READ_ONCE(policy->max);
529
530
/*
531
* If this function runs in parallel with cpufreq_set_policy(), it may
532
* read policy->min before the update and policy->max after the update
533
* or the other way around, so there is no ordering guarantee.
534
*
535
* Resolve this by always honoring the max (in case it comes from
536
* thermal throttling or similar).
537
*/
538
if (unlikely(min > max))
539
min = max;
540
541
return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE);
542
}
543
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
544
545
unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
546
{
547
unsigned int latency;
548
549
if (policy->transition_delay_us)
550
return policy->transition_delay_us;
551
552
latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
553
if (latency)
554
/* Give a 50% breathing room between updates */
555
return latency + (latency >> 1);
556
557
return USEC_PER_MSEC;
558
}
559
EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
560
561
/*********************************************************************
562
* SYSFS INTERFACE *
563
*********************************************************************/
564
static ssize_t show_boost(struct kobject *kobj,
565
struct kobj_attribute *attr, char *buf)
566
{
567
return sysfs_emit(buf, "%d\n", cpufreq_driver->boost_enabled);
568
}
569
570
static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
571
const char *buf, size_t count)
572
{
573
bool enable;
574
575
if (kstrtobool(buf, &enable))
576
return -EINVAL;
577
578
if (cpufreq_boost_trigger_state(enable)) {
579
pr_err("%s: Cannot %s BOOST!\n",
580
__func__, str_enable_disable(enable));
581
return -EINVAL;
582
}
583
584
pr_debug("%s: cpufreq BOOST %s\n",
585
__func__, str_enabled_disabled(enable));
586
587
return count;
588
}
589
define_one_global_rw(boost);
590
591
static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
592
{
593
return sysfs_emit(buf, "%d\n", policy->boost_enabled);
594
}
595
596
static int policy_set_boost(struct cpufreq_policy *policy, bool enable)
597
{
598
int ret;
599
600
if (policy->boost_enabled == enable)
601
return 0;
602
603
policy->boost_enabled = enable;
604
605
ret = cpufreq_driver->set_boost(policy, enable);
606
if (ret)
607
policy->boost_enabled = !policy->boost_enabled;
608
609
return ret;
610
}
611
612
static ssize_t store_local_boost(struct cpufreq_policy *policy,
613
const char *buf, size_t count)
614
{
615
int ret;
616
bool enable;
617
618
if (kstrtobool(buf, &enable))
619
return -EINVAL;
620
621
if (!cpufreq_driver->boost_enabled)
622
return -EINVAL;
623
624
if (!policy->boost_supported)
625
return -EINVAL;
626
627
ret = policy_set_boost(policy, enable);
628
if (!ret)
629
return count;
630
631
return ret;
632
}
633
634
static struct freq_attr local_boost = __ATTR(boost, 0644, show_local_boost, store_local_boost);
635
636
static struct cpufreq_governor *find_governor(const char *str_governor)
637
{
638
struct cpufreq_governor *t;
639
640
for_each_governor(t)
641
if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
642
return t;
643
644
return NULL;
645
}
646
647
static struct cpufreq_governor *get_governor(const char *str_governor)
648
{
649
struct cpufreq_governor *t;
650
651
mutex_lock(&cpufreq_governor_mutex);
652
t = find_governor(str_governor);
653
if (!t)
654
goto unlock;
655
656
if (!try_module_get(t->owner))
657
t = NULL;
658
659
unlock:
660
mutex_unlock(&cpufreq_governor_mutex);
661
662
return t;
663
}
664
665
static unsigned int cpufreq_parse_policy(char *str_governor)
666
{
667
if (!strncasecmp(str_governor, "performance", strlen("performance")))
668
return CPUFREQ_POLICY_PERFORMANCE;
669
670
if (!strncasecmp(str_governor, "powersave", strlen("powersave")))
671
return CPUFREQ_POLICY_POWERSAVE;
672
673
return CPUFREQ_POLICY_UNKNOWN;
674
}
675
676
/**
677
* cpufreq_parse_governor - parse a governor string only for has_target()
678
* @str_governor: Governor name.
679
*/
680
static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
681
{
682
struct cpufreq_governor *t;
683
684
t = get_governor(str_governor);
685
if (t)
686
return t;
687
688
if (request_module("cpufreq_%s", str_governor))
689
return NULL;
690
691
return get_governor(str_governor);
692
}
693
694
/*
695
* cpufreq_per_cpu_attr_read() / show_##file_name() -
696
* print out cpufreq information
697
*
698
* Write out information from cpufreq_driver->policy[cpu]; object must be
699
* "unsigned int".
700
*/
701
702
#define show_one(file_name, object) \
703
static ssize_t show_##file_name \
704
(struct cpufreq_policy *policy, char *buf) \
705
{ \
706
return sysfs_emit(buf, "%u\n", policy->object); \
707
}
708
709
show_one(cpuinfo_min_freq, cpuinfo.min_freq);
710
show_one(cpuinfo_max_freq, cpuinfo.max_freq);
711
show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
712
show_one(scaling_min_freq, min);
713
show_one(scaling_max_freq, max);
714
715
__weak int arch_freq_get_on_cpu(int cpu)
716
{
717
return -EOPNOTSUPP;
718
}
719
720
static inline bool cpufreq_avg_freq_supported(struct cpufreq_policy *policy)
721
{
722
return arch_freq_get_on_cpu(policy->cpu) != -EOPNOTSUPP;
723
}
724
725
static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
726
{
727
ssize_t ret;
728
int freq;
729
730
freq = IS_ENABLED(CONFIG_CPUFREQ_ARCH_CUR_FREQ)
731
? arch_freq_get_on_cpu(policy->cpu)
732
: 0;
733
734
if (freq > 0)
735
ret = sysfs_emit(buf, "%u\n", freq);
736
else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
737
ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu));
738
else
739
ret = sysfs_emit(buf, "%u\n", policy->cur);
740
return ret;
741
}
742
743
/*
744
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
745
*/
746
#define store_one(file_name, object) \
747
static ssize_t store_##file_name \
748
(struct cpufreq_policy *policy, const char *buf, size_t count) \
749
{ \
750
unsigned long val; \
751
int ret; \
752
\
753
ret = kstrtoul(buf, 0, &val); \
754
if (ret) \
755
return ret; \
756
\
757
ret = freq_qos_update_request(policy->object##_freq_req, val);\
758
return ret >= 0 ? count : ret; \
759
}
760
761
store_one(scaling_min_freq, min);
762
store_one(scaling_max_freq, max);
763
764
/*
765
* show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
766
*/
767
static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
768
char *buf)
769
{
770
unsigned int cur_freq = __cpufreq_get(policy);
771
772
if (cur_freq)
773
return sysfs_emit(buf, "%u\n", cur_freq);
774
775
return sysfs_emit(buf, "<unknown>\n");
776
}
777
778
/*
779
* show_cpuinfo_avg_freq - average CPU frequency as detected by hardware
780
*/
781
static ssize_t show_cpuinfo_avg_freq(struct cpufreq_policy *policy,
782
char *buf)
783
{
784
int avg_freq = arch_freq_get_on_cpu(policy->cpu);
785
786
if (avg_freq > 0)
787
return sysfs_emit(buf, "%u\n", avg_freq);
788
return avg_freq != 0 ? avg_freq : -EINVAL;
789
}
790
791
/*
792
* show_scaling_governor - show the current policy for the specified CPU
793
*/
794
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
795
{
796
if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
797
return sysfs_emit(buf, "powersave\n");
798
else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
799
return sysfs_emit(buf, "performance\n");
800
else if (policy->governor)
801
return sysfs_emit(buf, "%s\n", policy->governor->name);
802
return -EINVAL;
803
}
804
805
/*
806
* store_scaling_governor - store policy for the specified CPU
807
*/
808
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
809
const char *buf, size_t count)
810
{
811
char str_governor[CPUFREQ_NAME_LEN];
812
int ret;
813
814
ret = sscanf(buf, "%15s", str_governor);
815
if (ret != 1)
816
return -EINVAL;
817
818
if (cpufreq_driver->setpolicy) {
819
unsigned int new_pol;
820
821
new_pol = cpufreq_parse_policy(str_governor);
822
if (!new_pol)
823
return -EINVAL;
824
825
ret = cpufreq_set_policy(policy, NULL, new_pol);
826
} else {
827
struct cpufreq_governor *new_gov;
828
829
new_gov = cpufreq_parse_governor(str_governor);
830
if (!new_gov)
831
return -EINVAL;
832
833
ret = cpufreq_set_policy(policy, new_gov,
834
CPUFREQ_POLICY_UNKNOWN);
835
836
module_put(new_gov->owner);
837
}
838
839
return ret ? ret : count;
840
}
841
842
/*
843
* show_scaling_driver - show the cpufreq driver currently loaded
844
*/
845
static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
846
{
847
return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
848
}
849
850
/*
851
* show_scaling_available_governors - show the available CPUfreq governors
852
*/
853
static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
854
char *buf)
855
{
856
ssize_t i = 0;
857
struct cpufreq_governor *t;
858
859
if (!has_target()) {
860
i += sysfs_emit(buf, "performance powersave");
861
goto out;
862
}
863
864
mutex_lock(&cpufreq_governor_mutex);
865
for_each_governor(t) {
866
if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
867
- (CPUFREQ_NAME_LEN + 2)))
868
break;
869
i += sysfs_emit_at(buf, i, "%s ", t->name);
870
}
871
mutex_unlock(&cpufreq_governor_mutex);
872
out:
873
i += sysfs_emit_at(buf, i, "\n");
874
return i;
875
}
876
877
ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
878
{
879
ssize_t i = 0;
880
unsigned int cpu;
881
882
for_each_cpu(cpu, mask) {
883
i += sysfs_emit_at(buf, i, "%u ", cpu);
884
if (i >= (PAGE_SIZE - 5))
885
break;
886
}
887
888
/* Remove the extra space at the end */
889
i--;
890
891
i += sysfs_emit_at(buf, i, "\n");
892
return i;
893
}
894
EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
895
896
/*
897
* show_related_cpus - show the CPUs affected by each transition even if
898
* hw coordination is in use
899
*/
900
static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
901
{
902
return cpufreq_show_cpus(policy->related_cpus, buf);
903
}
904
905
/*
906
* show_affected_cpus - show the CPUs affected by each transition
907
*/
908
static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
909
{
910
return cpufreq_show_cpus(policy->cpus, buf);
911
}
912
913
static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
914
const char *buf, size_t count)
915
{
916
unsigned int freq = 0;
917
int ret;
918
919
if (!policy->governor || !policy->governor->store_setspeed)
920
return -EINVAL;
921
922
ret = kstrtouint(buf, 0, &freq);
923
if (ret)
924
return ret;
925
926
policy->governor->store_setspeed(policy, freq);
927
928
return count;
929
}
930
931
static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
932
{
933
if (!policy->governor || !policy->governor->show_setspeed)
934
return sysfs_emit(buf, "<unsupported>\n");
935
936
return policy->governor->show_setspeed(policy, buf);
937
}
938
939
/*
940
* show_bios_limit - show the current cpufreq HW/BIOS limitation
941
*/
942
static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
943
{
944
unsigned int limit;
945
int ret;
946
ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
947
if (!ret)
948
return sysfs_emit(buf, "%u\n", limit);
949
return sysfs_emit(buf, "%u\n", policy->cpuinfo.max_freq);
950
}
951
952
cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
953
cpufreq_freq_attr_ro(cpuinfo_avg_freq);
954
cpufreq_freq_attr_ro(cpuinfo_min_freq);
955
cpufreq_freq_attr_ro(cpuinfo_max_freq);
956
cpufreq_freq_attr_ro(cpuinfo_transition_latency);
957
cpufreq_freq_attr_ro(scaling_available_governors);
958
cpufreq_freq_attr_ro(scaling_driver);
959
cpufreq_freq_attr_ro(scaling_cur_freq);
960
cpufreq_freq_attr_ro(bios_limit);
961
cpufreq_freq_attr_ro(related_cpus);
962
cpufreq_freq_attr_ro(affected_cpus);
963
cpufreq_freq_attr_rw(scaling_min_freq);
964
cpufreq_freq_attr_rw(scaling_max_freq);
965
cpufreq_freq_attr_rw(scaling_governor);
966
cpufreq_freq_attr_rw(scaling_setspeed);
967
968
static struct attribute *cpufreq_attrs[] = {
969
&cpuinfo_min_freq.attr,
970
&cpuinfo_max_freq.attr,
971
&cpuinfo_transition_latency.attr,
972
&scaling_cur_freq.attr,
973
&scaling_min_freq.attr,
974
&scaling_max_freq.attr,
975
&affected_cpus.attr,
976
&related_cpus.attr,
977
&scaling_governor.attr,
978
&scaling_driver.attr,
979
&scaling_available_governors.attr,
980
&scaling_setspeed.attr,
981
NULL
982
};
983
ATTRIBUTE_GROUPS(cpufreq);
984
985
#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
986
#define to_attr(a) container_of(a, struct freq_attr, attr)
987
988
static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
989
{
990
struct cpufreq_policy *policy = to_policy(kobj);
991
struct freq_attr *fattr = to_attr(attr);
992
993
if (!fattr->show)
994
return -EIO;
995
996
guard(cpufreq_policy_read)(policy);
997
998
if (likely(!policy_is_inactive(policy)))
999
return fattr->show(policy, buf);
1000
1001
return -EBUSY;
1002
}
1003
1004
static ssize_t store(struct kobject *kobj, struct attribute *attr,
1005
const char *buf, size_t count)
1006
{
1007
struct cpufreq_policy *policy = to_policy(kobj);
1008
struct freq_attr *fattr = to_attr(attr);
1009
1010
if (!fattr->store)
1011
return -EIO;
1012
1013
guard(cpufreq_policy_write)(policy);
1014
1015
if (likely(!policy_is_inactive(policy)))
1016
return fattr->store(policy, buf, count);
1017
1018
return -EBUSY;
1019
}
1020
1021
static void cpufreq_sysfs_release(struct kobject *kobj)
1022
{
1023
struct cpufreq_policy *policy = to_policy(kobj);
1024
pr_debug("last reference is dropped\n");
1025
complete(&policy->kobj_unregister);
1026
}
1027
1028
static const struct sysfs_ops sysfs_ops = {
1029
.show = show,
1030
.store = store,
1031
};
1032
1033
static const struct kobj_type ktype_cpufreq = {
1034
.sysfs_ops = &sysfs_ops,
1035
.default_groups = cpufreq_groups,
1036
.release = cpufreq_sysfs_release,
1037
};
1038
1039
static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
1040
struct device *dev)
1041
{
1042
if (unlikely(!dev))
1043
return;
1044
1045
if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1046
return;
1047
1048
dev_dbg(dev, "%s: Adding symlink\n", __func__);
1049
if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1050
dev_err(dev, "cpufreq symlink creation failed\n");
1051
}
1052
1053
static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu,
1054
struct device *dev)
1055
{
1056
dev_dbg(dev, "%s: Removing symlink\n", __func__);
1057
sysfs_remove_link(&dev->kobj, "cpufreq");
1058
cpumask_clear_cpu(cpu, policy->real_cpus);
1059
}
1060
1061
static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1062
{
1063
struct freq_attr **drv_attr;
1064
int ret = 0;
1065
1066
/* Attributes that need freq_table */
1067
if (policy->freq_table) {
1068
ret = sysfs_create_file(&policy->kobj,
1069
&cpufreq_freq_attr_scaling_available_freqs.attr);
1070
if (ret)
1071
return ret;
1072
1073
if (cpufreq_boost_supported()) {
1074
ret = sysfs_create_file(&policy->kobj,
1075
&cpufreq_freq_attr_scaling_boost_freqs.attr);
1076
if (ret)
1077
return ret;
1078
}
1079
}
1080
1081
/* set up files for this cpu device */
1082
drv_attr = cpufreq_driver->attr;
1083
while (drv_attr && *drv_attr) {
1084
ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1085
if (ret)
1086
return ret;
1087
drv_attr++;
1088
}
1089
if (cpufreq_driver->get) {
1090
ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1091
if (ret)
1092
return ret;
1093
}
1094
1095
if (cpufreq_avg_freq_supported(policy)) {
1096
ret = sysfs_create_file(&policy->kobj, &cpuinfo_avg_freq.attr);
1097
if (ret)
1098
return ret;
1099
}
1100
1101
if (cpufreq_driver->bios_limit) {
1102
ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1103
if (ret)
1104
return ret;
1105
}
1106
1107
if (cpufreq_boost_supported()) {
1108
ret = sysfs_create_file(&policy->kobj, &local_boost.attr);
1109
if (ret)
1110
return ret;
1111
}
1112
1113
return 0;
1114
}
1115
1116
static int cpufreq_init_policy(struct cpufreq_policy *policy)
1117
{
1118
struct cpufreq_governor *gov = NULL;
1119
unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1120
int ret;
1121
1122
if (has_target()) {
1123
/* Update policy governor to the one used before hotplug. */
1124
if (policy->last_governor[0] != '\0')
1125
gov = get_governor(policy->last_governor);
1126
if (gov) {
1127
pr_debug("Restoring governor %s for cpu %d\n",
1128
gov->name, policy->cpu);
1129
} else {
1130
gov = get_governor(default_governor);
1131
}
1132
1133
if (!gov) {
1134
gov = cpufreq_default_governor();
1135
__module_get(gov->owner);
1136
}
1137
1138
} else {
1139
1140
/* Use the default policy if there is no last_policy. */
1141
if (policy->last_policy) {
1142
pol = policy->last_policy;
1143
} else {
1144
pol = cpufreq_parse_policy(default_governor);
1145
/*
1146
* In case the default governor is neither "performance"
1147
* nor "powersave", fall back to the initial policy
1148
* value set by the driver.
1149
*/
1150
if (pol == CPUFREQ_POLICY_UNKNOWN)
1151
pol = policy->policy;
1152
}
1153
if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1154
pol != CPUFREQ_POLICY_POWERSAVE)
1155
return -ENODATA;
1156
}
1157
1158
ret = cpufreq_set_policy(policy, gov, pol);
1159
if (gov)
1160
module_put(gov->owner);
1161
1162
return ret;
1163
}
1164
1165
static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1166
{
1167
int ret = 0;
1168
1169
/* Has this CPU been taken care of already? */
1170
if (cpumask_test_cpu(cpu, policy->cpus))
1171
return 0;
1172
1173
guard(cpufreq_policy_write)(policy);
1174
1175
if (has_target())
1176
cpufreq_stop_governor(policy);
1177
1178
cpumask_set_cpu(cpu, policy->cpus);
1179
1180
if (has_target()) {
1181
ret = cpufreq_start_governor(policy);
1182
if (ret)
1183
pr_err("%s: Failed to start governor\n", __func__);
1184
}
1185
1186
return ret;
1187
}
1188
1189
void refresh_frequency_limits(struct cpufreq_policy *policy)
1190
{
1191
if (!policy_is_inactive(policy)) {
1192
pr_debug("updating policy for CPU %u\n", policy->cpu);
1193
1194
cpufreq_set_policy(policy, policy->governor, policy->policy);
1195
}
1196
}
1197
EXPORT_SYMBOL(refresh_frequency_limits);
1198
1199
static void handle_update(struct work_struct *work)
1200
{
1201
struct cpufreq_policy *policy =
1202
container_of(work, struct cpufreq_policy, update);
1203
1204
pr_debug("handle_update for cpu %u called\n", policy->cpu);
1205
1206
guard(cpufreq_policy_write)(policy);
1207
1208
refresh_frequency_limits(policy);
1209
}
1210
1211
static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1212
void *data)
1213
{
1214
struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1215
1216
schedule_work(&policy->update);
1217
return 0;
1218
}
1219
1220
static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1221
void *data)
1222
{
1223
struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1224
1225
schedule_work(&policy->update);
1226
return 0;
1227
}
1228
1229
static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1230
{
1231
struct kobject *kobj;
1232
struct completion *cmp;
1233
1234
scoped_guard(cpufreq_policy_write, policy) {
1235
cpufreq_stats_free_table(policy);
1236
kobj = &policy->kobj;
1237
cmp = &policy->kobj_unregister;
1238
}
1239
kobject_put(kobj);
1240
1241
/*
1242
* We need to make sure that the underlying kobj is
1243
* actually not referenced anymore by anybody before we
1244
* proceed with unloading.
1245
*/
1246
pr_debug("waiting for dropping of refcount\n");
1247
wait_for_completion(cmp);
1248
pr_debug("wait complete\n");
1249
}
1250
1251
static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1252
{
1253
struct cpufreq_policy *policy;
1254
struct device *dev = get_cpu_device(cpu);
1255
int ret;
1256
1257
if (!dev)
1258
return NULL;
1259
1260
policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1261
if (!policy)
1262
return NULL;
1263
1264
if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1265
goto err_free_policy;
1266
1267
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1268
goto err_free_cpumask;
1269
1270
if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1271
goto err_free_rcpumask;
1272
1273
init_completion(&policy->kobj_unregister);
1274
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1275
cpufreq_global_kobject, "policy%u", cpu);
1276
if (ret) {
1277
dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1278
/*
1279
* The entire policy object will be freed below, but the extra
1280
* memory allocated for the kobject name needs to be freed by
1281
* releasing the kobject.
1282
*/
1283
kobject_put(&policy->kobj);
1284
goto err_free_real_cpus;
1285
}
1286
1287
init_rwsem(&policy->rwsem);
1288
1289
freq_constraints_init(&policy->constraints);
1290
1291
policy->nb_min.notifier_call = cpufreq_notifier_min;
1292
policy->nb_max.notifier_call = cpufreq_notifier_max;
1293
1294
ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1295
&policy->nb_min);
1296
if (ret) {
1297
dev_err(dev, "Failed to register MIN QoS notifier: %d (CPU%u)\n",
1298
ret, cpu);
1299
goto err_kobj_remove;
1300
}
1301
1302
ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1303
&policy->nb_max);
1304
if (ret) {
1305
dev_err(dev, "Failed to register MAX QoS notifier: %d (CPU%u)\n",
1306
ret, cpu);
1307
goto err_min_qos_notifier;
1308
}
1309
1310
INIT_LIST_HEAD(&policy->policy_list);
1311
spin_lock_init(&policy->transition_lock);
1312
init_waitqueue_head(&policy->transition_wait);
1313
INIT_WORK(&policy->update, handle_update);
1314
1315
return policy;
1316
1317
err_min_qos_notifier:
1318
freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1319
&policy->nb_min);
1320
err_kobj_remove:
1321
cpufreq_policy_put_kobj(policy);
1322
err_free_real_cpus:
1323
free_cpumask_var(policy->real_cpus);
1324
err_free_rcpumask:
1325
free_cpumask_var(policy->related_cpus);
1326
err_free_cpumask:
1327
free_cpumask_var(policy->cpus);
1328
err_free_policy:
1329
kfree(policy);
1330
1331
return NULL;
1332
}
1333
1334
static void cpufreq_policy_free(struct cpufreq_policy *policy)
1335
{
1336
unsigned long flags;
1337
int cpu;
1338
1339
/*
1340
* The callers must ensure the policy is inactive by now, to avoid any
1341
* races with show()/store() callbacks.
1342
*/
1343
if (unlikely(!policy_is_inactive(policy)))
1344
pr_warn("%s: Freeing active policy\n", __func__);
1345
1346
/* Remove policy from list */
1347
write_lock_irqsave(&cpufreq_driver_lock, flags);
1348
list_del(&policy->policy_list);
1349
1350
for_each_cpu(cpu, policy->related_cpus)
1351
per_cpu(cpufreq_cpu_data, cpu) = NULL;
1352
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1353
1354
freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1355
&policy->nb_max);
1356
freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1357
&policy->nb_min);
1358
1359
/* Cancel any pending policy->update work before freeing the policy. */
1360
cancel_work_sync(&policy->update);
1361
1362
if (policy->max_freq_req) {
1363
/*
1364
* Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
1365
* notification, since CPUFREQ_CREATE_POLICY notification was
1366
* sent after adding max_freq_req earlier.
1367
*/
1368
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1369
CPUFREQ_REMOVE_POLICY, policy);
1370
freq_qos_remove_request(policy->max_freq_req);
1371
}
1372
1373
freq_qos_remove_request(policy->min_freq_req);
1374
kfree(policy->min_freq_req);
1375
1376
cpufreq_policy_put_kobj(policy);
1377
free_cpumask_var(policy->real_cpus);
1378
free_cpumask_var(policy->related_cpus);
1379
free_cpumask_var(policy->cpus);
1380
kfree(policy);
1381
}
1382
1383
static int cpufreq_policy_online(struct cpufreq_policy *policy,
1384
unsigned int cpu, bool new_policy)
1385
{
1386
unsigned long flags;
1387
unsigned int j;
1388
int ret;
1389
1390
guard(cpufreq_policy_write)(policy);
1391
1392
policy->cpu = cpu;
1393
policy->governor = NULL;
1394
1395
if (!new_policy && cpufreq_driver->online) {
1396
/* Recover policy->cpus using related_cpus */
1397
cpumask_copy(policy->cpus, policy->related_cpus);
1398
1399
ret = cpufreq_driver->online(policy);
1400
if (ret) {
1401
pr_debug("%s: %d: initialization failed\n", __func__,
1402
__LINE__);
1403
goto out_exit_policy;
1404
}
1405
} else {
1406
cpumask_copy(policy->cpus, cpumask_of(cpu));
1407
1408
/*
1409
* Call driver. From then on the cpufreq must be able
1410
* to accept all calls to ->verify and ->setpolicy for this CPU.
1411
*/
1412
ret = cpufreq_driver->init(policy);
1413
if (ret) {
1414
pr_debug("%s: %d: initialization failed\n", __func__,
1415
__LINE__);
1416
goto out_clear_policy;
1417
}
1418
1419
/*
1420
* The initialization has succeeded and the policy is online.
1421
* If there is a problem with its frequency table, take it
1422
* offline and drop it.
1423
*/
1424
if (policy->freq_table_sorted != CPUFREQ_TABLE_SORTED_ASCENDING &&
1425
policy->freq_table_sorted != CPUFREQ_TABLE_SORTED_DESCENDING) {
1426
ret = cpufreq_table_validate_and_sort(policy);
1427
if (ret)
1428
goto out_offline_policy;
1429
}
1430
1431
/* related_cpus should at least include policy->cpus. */
1432
cpumask_copy(policy->related_cpus, policy->cpus);
1433
}
1434
1435
/*
1436
* affected cpus must always be the one, which are online. We aren't
1437
* managing offline cpus here.
1438
*/
1439
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1440
1441
if (new_policy) {
1442
for_each_cpu(j, policy->related_cpus) {
1443
per_cpu(cpufreq_cpu_data, j) = policy;
1444
add_cpu_dev_symlink(policy, j, get_cpu_device(j));
1445
}
1446
1447
policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1448
GFP_KERNEL);
1449
if (!policy->min_freq_req) {
1450
ret = -ENOMEM;
1451
goto out_destroy_policy;
1452
}
1453
1454
ret = freq_qos_add_request(&policy->constraints,
1455
policy->min_freq_req, FREQ_QOS_MIN,
1456
FREQ_QOS_MIN_DEFAULT_VALUE);
1457
if (ret < 0) {
1458
/*
1459
* So we don't call freq_qos_remove_request() for an
1460
* uninitialized request.
1461
*/
1462
kfree(policy->min_freq_req);
1463
policy->min_freq_req = NULL;
1464
goto out_destroy_policy;
1465
}
1466
1467
/*
1468
* This must be initialized right here to avoid calling
1469
* freq_qos_remove_request() on uninitialized request in case
1470
* of errors.
1471
*/
1472
policy->max_freq_req = policy->min_freq_req + 1;
1473
1474
ret = freq_qos_add_request(&policy->constraints,
1475
policy->max_freq_req, FREQ_QOS_MAX,
1476
FREQ_QOS_MAX_DEFAULT_VALUE);
1477
if (ret < 0) {
1478
policy->max_freq_req = NULL;
1479
goto out_destroy_policy;
1480
}
1481
1482
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1483
CPUFREQ_CREATE_POLICY, policy);
1484
} else {
1485
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
1486
if (ret < 0)
1487
goto out_destroy_policy;
1488
}
1489
1490
if (cpufreq_driver->get && has_target()) {
1491
policy->cur = cpufreq_driver->get(policy->cpu);
1492
if (!policy->cur) {
1493
ret = -EIO;
1494
pr_err("%s: ->get() failed\n", __func__);
1495
goto out_destroy_policy;
1496
}
1497
}
1498
1499
/*
1500
* Sometimes boot loaders set CPU frequency to a value outside of
1501
* frequency table present with cpufreq core. In such cases CPU might be
1502
* unstable if it has to run on that frequency for long duration of time
1503
* and so its better to set it to a frequency which is specified in
1504
* freq-table. This also makes cpufreq stats inconsistent as
1505
* cpufreq-stats would fail to register because current frequency of CPU
1506
* isn't found in freq-table.
1507
*
1508
* Because we don't want this change to effect boot process badly, we go
1509
* for the next freq which is >= policy->cur ('cur' must be set by now,
1510
* otherwise we will end up setting freq to lowest of the table as 'cur'
1511
* is initialized to zero).
1512
*
1513
* We are passing target-freq as "policy->cur - 1" otherwise
1514
* __cpufreq_driver_target() would simply fail, as policy->cur will be
1515
* equal to target-freq.
1516
*/
1517
if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1518
&& has_target()) {
1519
unsigned int old_freq = policy->cur;
1520
1521
/* Are we running at unknown frequency ? */
1522
ret = cpufreq_frequency_table_get_index(policy, old_freq);
1523
if (ret == -EINVAL) {
1524
ret = __cpufreq_driver_target(policy, old_freq - 1,
1525
CPUFREQ_RELATION_L);
1526
1527
/*
1528
* Reaching here after boot in a few seconds may not
1529
* mean that system will remain stable at "unknown"
1530
* frequency for longer duration. Hence, a BUG_ON().
1531
*/
1532
BUG_ON(ret);
1533
pr_info("%s: CPU%d: Running at unlisted initial frequency: %u kHz, changing to: %u kHz\n",
1534
__func__, policy->cpu, old_freq, policy->cur);
1535
}
1536
}
1537
1538
if (new_policy) {
1539
ret = cpufreq_add_dev_interface(policy);
1540
if (ret)
1541
goto out_destroy_policy;
1542
1543
cpufreq_stats_create_table(policy);
1544
1545
write_lock_irqsave(&cpufreq_driver_lock, flags);
1546
list_add(&policy->policy_list, &cpufreq_policy_list);
1547
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1548
1549
/*
1550
* Register with the energy model before
1551
* em_rebuild_sched_domains() is called, which will result
1552
* in rebuilding of the sched domains, which should only be done
1553
* once the energy model is properly initialized for the policy
1554
* first.
1555
*
1556
* Also, this should be called before the policy is registered
1557
* with cooling framework.
1558
*/
1559
if (cpufreq_driver->register_em)
1560
cpufreq_driver->register_em(policy);
1561
}
1562
1563
ret = cpufreq_init_policy(policy);
1564
if (ret) {
1565
pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1566
__func__, cpu, ret);
1567
goto out_destroy_policy;
1568
}
1569
1570
return 0;
1571
1572
out_destroy_policy:
1573
for_each_cpu(j, policy->real_cpus)
1574
remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
1575
1576
out_offline_policy:
1577
if (cpufreq_driver->offline)
1578
cpufreq_driver->offline(policy);
1579
1580
out_exit_policy:
1581
if (cpufreq_driver->exit)
1582
cpufreq_driver->exit(policy);
1583
1584
out_clear_policy:
1585
cpumask_clear(policy->cpus);
1586
1587
return ret;
1588
}
1589
1590
static int cpufreq_online(unsigned int cpu)
1591
{
1592
struct cpufreq_policy *policy;
1593
bool new_policy;
1594
int ret;
1595
1596
pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1597
1598
/* Check if this CPU already has a policy to manage it */
1599
policy = per_cpu(cpufreq_cpu_data, cpu);
1600
if (policy) {
1601
WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1602
if (!policy_is_inactive(policy))
1603
return cpufreq_add_policy_cpu(policy, cpu);
1604
1605
/* This is the only online CPU for the policy. Start over. */
1606
new_policy = false;
1607
} else {
1608
new_policy = true;
1609
policy = cpufreq_policy_alloc(cpu);
1610
if (!policy)
1611
return -ENOMEM;
1612
}
1613
1614
ret = cpufreq_policy_online(policy, cpu, new_policy);
1615
if (ret) {
1616
cpufreq_policy_free(policy);
1617
return ret;
1618
}
1619
1620
kobject_uevent(&policy->kobj, KOBJ_ADD);
1621
1622
/* Callback for handling stuff after policy is ready */
1623
if (cpufreq_driver->ready)
1624
cpufreq_driver->ready(policy);
1625
1626
/* Register cpufreq cooling only for a new policy */
1627
if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
1628
policy->cdev = of_cpufreq_cooling_register(policy);
1629
1630
/*
1631
* Let the per-policy boost flag mirror the cpufreq_driver boost during
1632
* initialization for a new policy. For an existing policy, maintain the
1633
* previous boost value unless global boost is disabled.
1634
*/
1635
if (cpufreq_driver->set_boost && policy->boost_supported &&
1636
(new_policy || !cpufreq_boost_enabled())) {
1637
ret = policy_set_boost(policy, cpufreq_boost_enabled());
1638
if (ret) {
1639
/* If the set_boost fails, the online operation is not affected */
1640
pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu,
1641
str_enable_disable(cpufreq_boost_enabled()));
1642
}
1643
}
1644
1645
pr_debug("initialization complete\n");
1646
1647
return 0;
1648
}
1649
1650
/**
1651
* cpufreq_add_dev - the cpufreq interface for a CPU device.
1652
* @dev: CPU device.
1653
* @sif: Subsystem interface structure pointer (not used)
1654
*/
1655
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1656
{
1657
struct cpufreq_policy *policy;
1658
unsigned cpu = dev->id;
1659
int ret;
1660
1661
dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1662
1663
if (cpu_online(cpu)) {
1664
ret = cpufreq_online(cpu);
1665
if (ret)
1666
return ret;
1667
}
1668
1669
/* Create sysfs link on CPU registration */
1670
policy = per_cpu(cpufreq_cpu_data, cpu);
1671
if (policy)
1672
add_cpu_dev_symlink(policy, cpu, dev);
1673
1674
return 0;
1675
}
1676
1677
static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
1678
{
1679
int ret;
1680
1681
if (has_target())
1682
cpufreq_stop_governor(policy);
1683
1684
cpumask_clear_cpu(cpu, policy->cpus);
1685
1686
if (!policy_is_inactive(policy)) {
1687
/* Nominate a new CPU if necessary. */
1688
if (cpu == policy->cpu)
1689
policy->cpu = cpumask_any(policy->cpus);
1690
1691
/* Start the governor again for the active policy. */
1692
if (has_target()) {
1693
ret = cpufreq_start_governor(policy);
1694
if (ret)
1695
pr_err("%s: Failed to start governor\n", __func__);
1696
}
1697
1698
return;
1699
}
1700
1701
if (has_target()) {
1702
strscpy(policy->last_governor, policy->governor->name,
1703
CPUFREQ_NAME_LEN);
1704
cpufreq_exit_governor(policy);
1705
} else {
1706
policy->last_policy = policy->policy;
1707
}
1708
1709
/*
1710
* Perform the ->offline() during light-weight tear-down, as
1711
* that allows fast recovery when the CPU comes back.
1712
*/
1713
if (cpufreq_driver->offline) {
1714
cpufreq_driver->offline(policy);
1715
return;
1716
}
1717
1718
if (cpufreq_driver->exit)
1719
cpufreq_driver->exit(policy);
1720
1721
policy->freq_table = NULL;
1722
}
1723
1724
static int cpufreq_offline(unsigned int cpu)
1725
{
1726
struct cpufreq_policy *policy;
1727
1728
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1729
1730
policy = cpufreq_cpu_get_raw(cpu);
1731
if (!policy) {
1732
pr_debug("%s: No cpu_data found\n", __func__);
1733
return 0;
1734
}
1735
1736
guard(cpufreq_policy_write)(policy);
1737
1738
__cpufreq_offline(cpu, policy);
1739
1740
return 0;
1741
}
1742
1743
/*
1744
* cpufreq_remove_dev - remove a CPU device
1745
*
1746
* Removes the cpufreq interface for a CPU device.
1747
*/
1748
static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1749
{
1750
unsigned int cpu = dev->id;
1751
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1752
1753
if (!policy)
1754
return;
1755
1756
scoped_guard(cpufreq_policy_write, policy) {
1757
if (cpu_online(cpu))
1758
__cpufreq_offline(cpu, policy);
1759
1760
remove_cpu_dev_symlink(policy, cpu, dev);
1761
1762
if (!cpumask_empty(policy->real_cpus))
1763
return;
1764
1765
/*
1766
* Unregister cpufreq cooling once all the CPUs of the policy
1767
* are removed.
1768
*/
1769
if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1770
cpufreq_cooling_unregister(policy->cdev);
1771
policy->cdev = NULL;
1772
}
1773
1774
/* We did light-weight exit earlier, do full tear down now */
1775
if (cpufreq_driver->offline && cpufreq_driver->exit)
1776
cpufreq_driver->exit(policy);
1777
}
1778
1779
cpufreq_policy_free(policy);
1780
}
1781
1782
/**
1783
* cpufreq_out_of_sync - Fix up actual and saved CPU frequency difference.
1784
* @policy: Policy managing CPUs.
1785
* @new_freq: New CPU frequency.
1786
*
1787
* Adjust to the current frequency first and clean up later by either calling
1788
* cpufreq_update_policy(), or scheduling handle_update().
1789
*/
1790
static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1791
unsigned int new_freq)
1792
{
1793
struct cpufreq_freqs freqs;
1794
1795
pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1796
policy->cur, new_freq);
1797
1798
freqs.old = policy->cur;
1799
freqs.new = new_freq;
1800
1801
cpufreq_freq_transition_begin(policy, &freqs);
1802
cpufreq_freq_transition_end(policy, &freqs, 0);
1803
}
1804
1805
static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1806
{
1807
unsigned int new_freq;
1808
1809
if (!cpufreq_driver->get)
1810
return 0;
1811
1812
new_freq = cpufreq_driver->get(policy->cpu);
1813
if (!new_freq)
1814
return 0;
1815
1816
/*
1817
* If fast frequency switching is used with the given policy, the check
1818
* against policy->cur is pointless, so skip it in that case.
1819
*/
1820
if (policy->fast_switch_enabled || !has_target())
1821
return new_freq;
1822
1823
if (policy->cur != new_freq) {
1824
/*
1825
* For some platforms, the frequency returned by hardware may be
1826
* slightly different from what is provided in the frequency
1827
* table, for example hardware may return 499 MHz instead of 500
1828
* MHz. In such cases it is better to avoid getting into
1829
* unnecessary frequency updates.
1830
*/
1831
if (abs(policy->cur - new_freq) < KHZ_PER_MHZ)
1832
return policy->cur;
1833
1834
cpufreq_out_of_sync(policy, new_freq);
1835
if (update)
1836
schedule_work(&policy->update);
1837
}
1838
1839
return new_freq;
1840
}
1841
1842
/**
1843
* cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1844
* @cpu: CPU number
1845
*
1846
* This is the last known freq, without actually getting it from the driver.
1847
* Return value will be same as what is shown in scaling_cur_freq in sysfs.
1848
*/
1849
unsigned int cpufreq_quick_get(unsigned int cpu)
1850
{
1851
unsigned long flags;
1852
1853
read_lock_irqsave(&cpufreq_driver_lock, flags);
1854
1855
if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1856
unsigned int ret_freq = cpufreq_driver->get(cpu);
1857
1858
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1859
1860
return ret_freq;
1861
}
1862
1863
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1864
1865
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
1866
if (policy)
1867
return policy->cur;
1868
1869
return 0;
1870
}
1871
EXPORT_SYMBOL(cpufreq_quick_get);
1872
1873
/**
1874
* cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1875
* @cpu: CPU number
1876
*
1877
* Just return the max possible frequency for a given CPU.
1878
*/
1879
unsigned int cpufreq_quick_get_max(unsigned int cpu)
1880
{
1881
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
1882
if (policy)
1883
return policy->max;
1884
1885
return 0;
1886
}
1887
EXPORT_SYMBOL(cpufreq_quick_get_max);
1888
1889
/**
1890
* cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
1891
* @cpu: CPU number
1892
*
1893
* The default return value is the max_freq field of cpuinfo.
1894
*/
1895
__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
1896
{
1897
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
1898
if (policy)
1899
return policy->cpuinfo.max_freq;
1900
1901
return 0;
1902
}
1903
EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
1904
1905
static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1906
{
1907
if (unlikely(policy_is_inactive(policy)))
1908
return 0;
1909
1910
return cpufreq_verify_current_freq(policy, true);
1911
}
1912
1913
/**
1914
* cpufreq_get - get the current CPU frequency (in kHz)
1915
* @cpu: CPU number
1916
*
1917
* Get the CPU current (static) CPU frequency
1918
*/
1919
unsigned int cpufreq_get(unsigned int cpu)
1920
{
1921
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
1922
if (!policy)
1923
return 0;
1924
1925
guard(cpufreq_policy_read)(policy);
1926
1927
return __cpufreq_get(policy);
1928
}
1929
EXPORT_SYMBOL(cpufreq_get);
1930
1931
static struct subsys_interface cpufreq_interface = {
1932
.name = "cpufreq",
1933
.subsys = &cpu_subsys,
1934
.add_dev = cpufreq_add_dev,
1935
.remove_dev = cpufreq_remove_dev,
1936
};
1937
1938
/*
1939
* In case platform wants some specific frequency to be configured
1940
* during suspend..
1941
*/
1942
int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1943
{
1944
int ret;
1945
1946
if (!policy->suspend_freq) {
1947
pr_debug("%s: suspend_freq not defined\n", __func__);
1948
return 0;
1949
}
1950
1951
pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1952
policy->suspend_freq);
1953
1954
ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1955
CPUFREQ_RELATION_H);
1956
if (ret)
1957
pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1958
__func__, policy->suspend_freq, ret);
1959
1960
return ret;
1961
}
1962
EXPORT_SYMBOL(cpufreq_generic_suspend);
1963
1964
/**
1965
* cpufreq_suspend() - Suspend CPUFreq governors.
1966
*
1967
* Called during system wide Suspend/Hibernate cycles for suspending governors
1968
* as some platforms can't change frequency after this point in suspend cycle.
1969
* Because some of the devices (like: i2c, regulators, etc) they use for
1970
* changing frequency are suspended quickly after this point.
1971
*/
1972
void cpufreq_suspend(void)
1973
{
1974
struct cpufreq_policy *policy;
1975
1976
if (!cpufreq_driver)
1977
return;
1978
1979
if (!has_target() && !cpufreq_driver->suspend)
1980
goto suspend;
1981
1982
pr_debug("%s: Suspending Governors\n", __func__);
1983
1984
for_each_active_policy(policy) {
1985
if (has_target()) {
1986
scoped_guard(cpufreq_policy_write, policy) {
1987
cpufreq_stop_governor(policy);
1988
}
1989
}
1990
1991
if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1992
pr_err("%s: Failed to suspend driver: %s\n", __func__,
1993
cpufreq_driver->name);
1994
}
1995
1996
suspend:
1997
cpufreq_suspended = true;
1998
}
1999
2000
/**
2001
* cpufreq_resume() - Resume CPUFreq governors.
2002
*
2003
* Called during system wide Suspend/Hibernate cycle for resuming governors that
2004
* are suspended with cpufreq_suspend().
2005
*/
2006
void cpufreq_resume(void)
2007
{
2008
struct cpufreq_policy *policy;
2009
int ret;
2010
2011
if (!cpufreq_driver)
2012
return;
2013
2014
if (unlikely(!cpufreq_suspended))
2015
return;
2016
2017
cpufreq_suspended = false;
2018
2019
if (!has_target() && !cpufreq_driver->resume)
2020
return;
2021
2022
pr_debug("%s: Resuming Governors\n", __func__);
2023
2024
for_each_active_policy(policy) {
2025
if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
2026
pr_err("%s: Failed to resume driver: %s\n", __func__,
2027
cpufreq_driver->name);
2028
} else if (has_target()) {
2029
scoped_guard(cpufreq_policy_write, policy) {
2030
ret = cpufreq_start_governor(policy);
2031
}
2032
2033
if (ret)
2034
pr_err("%s: Failed to start governor for CPU%u's policy\n",
2035
__func__, policy->cpu);
2036
}
2037
}
2038
}
2039
2040
/**
2041
* cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
2042
* @flags: Flags to test against the current cpufreq driver's flags.
2043
*
2044
* Assumes that the driver is there, so callers must ensure that this is the
2045
* case.
2046
*/
2047
bool cpufreq_driver_test_flags(u16 flags)
2048
{
2049
return !!(cpufreq_driver->flags & flags);
2050
}
2051
2052
/**
2053
* cpufreq_get_current_driver - Return the current driver's name.
2054
*
2055
* Return the name string of the currently registered cpufreq driver or NULL if
2056
* none.
2057
*/
2058
const char *cpufreq_get_current_driver(void)
2059
{
2060
if (cpufreq_driver)
2061
return cpufreq_driver->name;
2062
2063
return NULL;
2064
}
2065
EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
2066
2067
/**
2068
* cpufreq_get_driver_data - Return current driver data.
2069
*
2070
* Return the private data of the currently registered cpufreq driver, or NULL
2071
* if no cpufreq driver has been registered.
2072
*/
2073
void *cpufreq_get_driver_data(void)
2074
{
2075
if (cpufreq_driver)
2076
return cpufreq_driver->driver_data;
2077
2078
return NULL;
2079
}
2080
EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
2081
2082
/*********************************************************************
2083
* NOTIFIER LISTS INTERFACE *
2084
*********************************************************************/
2085
2086
/**
2087
* cpufreq_register_notifier - Register a notifier with cpufreq.
2088
* @nb: notifier function to register.
2089
* @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2090
*
2091
* Add a notifier to one of two lists: either a list of notifiers that run on
2092
* clock rate changes (once before and once after every transition), or a list
2093
* of notifiers that ron on cpufreq policy changes.
2094
*
2095
* This function may sleep and it has the same return values as
2096
* blocking_notifier_chain_register().
2097
*/
2098
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
2099
{
2100
int ret;
2101
2102
if (cpufreq_disabled())
2103
return -EINVAL;
2104
2105
switch (list) {
2106
case CPUFREQ_TRANSITION_NOTIFIER:
2107
mutex_lock(&cpufreq_fast_switch_lock);
2108
2109
if (cpufreq_fast_switch_count > 0) {
2110
mutex_unlock(&cpufreq_fast_switch_lock);
2111
return -EBUSY;
2112
}
2113
ret = srcu_notifier_chain_register(
2114
&cpufreq_transition_notifier_list, nb);
2115
if (!ret)
2116
cpufreq_fast_switch_count--;
2117
2118
mutex_unlock(&cpufreq_fast_switch_lock);
2119
break;
2120
case CPUFREQ_POLICY_NOTIFIER:
2121
ret = blocking_notifier_chain_register(
2122
&cpufreq_policy_notifier_list, nb);
2123
break;
2124
default:
2125
ret = -EINVAL;
2126
}
2127
2128
return ret;
2129
}
2130
EXPORT_SYMBOL(cpufreq_register_notifier);
2131
2132
/**
2133
* cpufreq_unregister_notifier - Unregister a notifier from cpufreq.
2134
* @nb: notifier block to be unregistered.
2135
* @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2136
*
2137
* Remove a notifier from one of the cpufreq notifier lists.
2138
*
2139
* This function may sleep and it has the same return values as
2140
* blocking_notifier_chain_unregister().
2141
*/
2142
int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
2143
{
2144
int ret;
2145
2146
if (cpufreq_disabled())
2147
return -EINVAL;
2148
2149
switch (list) {
2150
case CPUFREQ_TRANSITION_NOTIFIER:
2151
mutex_lock(&cpufreq_fast_switch_lock);
2152
2153
ret = srcu_notifier_chain_unregister(
2154
&cpufreq_transition_notifier_list, nb);
2155
if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
2156
cpufreq_fast_switch_count++;
2157
2158
mutex_unlock(&cpufreq_fast_switch_lock);
2159
break;
2160
case CPUFREQ_POLICY_NOTIFIER:
2161
ret = blocking_notifier_chain_unregister(
2162
&cpufreq_policy_notifier_list, nb);
2163
break;
2164
default:
2165
ret = -EINVAL;
2166
}
2167
2168
return ret;
2169
}
2170
EXPORT_SYMBOL(cpufreq_unregister_notifier);
2171
2172
2173
/*********************************************************************
2174
* GOVERNORS *
2175
*********************************************************************/
2176
2177
/**
2178
* cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
2179
* @policy: cpufreq policy to switch the frequency for.
2180
* @target_freq: New frequency to set (may be approximate).
2181
*
2182
* Carry out a fast frequency switch without sleeping.
2183
*
2184
* The driver's ->fast_switch() callback invoked by this function must be
2185
* suitable for being called from within RCU-sched read-side critical sections
2186
* and it is expected to select the minimum available frequency greater than or
2187
* equal to @target_freq (CPUFREQ_RELATION_L).
2188
*
2189
* This function must not be called if policy->fast_switch_enabled is unset.
2190
*
2191
* Governors calling this function must guarantee that it will never be invoked
2192
* twice in parallel for the same policy and that it will never be called in
2193
* parallel with either ->target() or ->target_index() for the same policy.
2194
*
2195
* Returns the actual frequency set for the CPU.
2196
*
2197
* If 0 is returned by the driver's ->fast_switch() callback to indicate an
2198
* error condition, the hardware configuration must be preserved.
2199
*/
2200
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2201
unsigned int target_freq)
2202
{
2203
unsigned int freq;
2204
int cpu;
2205
2206
target_freq = clamp_val(target_freq, policy->min, policy->max);
2207
freq = cpufreq_driver->fast_switch(policy, target_freq);
2208
2209
if (!freq)
2210
return 0;
2211
2212
policy->cur = freq;
2213
arch_set_freq_scale(policy->related_cpus, freq,
2214
arch_scale_freq_ref(policy->cpu));
2215
cpufreq_stats_record_transition(policy, freq);
2216
2217
if (trace_cpu_frequency_enabled()) {
2218
for_each_cpu(cpu, policy->cpus)
2219
trace_cpu_frequency(freq, cpu);
2220
}
2221
2222
return freq;
2223
}
2224
EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2225
2226
/**
2227
* cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
2228
* @cpu: Target CPU.
2229
* @min_perf: Minimum (required) performance level (units of @capacity).
2230
* @target_perf: Target (desired) performance level (units of @capacity).
2231
* @capacity: Capacity of the target CPU.
2232
*
2233
* Carry out a fast performance level switch of @cpu without sleeping.
2234
*
2235
* The driver's ->adjust_perf() callback invoked by this function must be
2236
* suitable for being called from within RCU-sched read-side critical sections
2237
* and it is expected to select a suitable performance level equal to or above
2238
* @min_perf and preferably equal to or below @target_perf.
2239
*
2240
* This function must not be called if policy->fast_switch_enabled is unset.
2241
*
2242
* Governors calling this function must guarantee that it will never be invoked
2243
* twice in parallel for the same CPU and that it will never be called in
2244
* parallel with either ->target() or ->target_index() or ->fast_switch() for
2245
* the same CPU.
2246
*/
2247
void cpufreq_driver_adjust_perf(unsigned int cpu,
2248
unsigned long min_perf,
2249
unsigned long target_perf,
2250
unsigned long capacity)
2251
{
2252
cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
2253
}
2254
2255
/**
2256
* cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback.
2257
*
2258
* Return 'true' if the ->adjust_perf callback is present for the
2259
* current driver or 'false' otherwise.
2260
*/
2261
bool cpufreq_driver_has_adjust_perf(void)
2262
{
2263
return !!cpufreq_driver->adjust_perf;
2264
}
2265
2266
/* Must set freqs->new to intermediate frequency */
2267
static int __target_intermediate(struct cpufreq_policy *policy,
2268
struct cpufreq_freqs *freqs, int index)
2269
{
2270
int ret;
2271
2272
freqs->new = cpufreq_driver->get_intermediate(policy, index);
2273
2274
/* We don't need to switch to intermediate freq */
2275
if (!freqs->new)
2276
return 0;
2277
2278
pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2279
__func__, policy->cpu, freqs->old, freqs->new);
2280
2281
cpufreq_freq_transition_begin(policy, freqs);
2282
ret = cpufreq_driver->target_intermediate(policy, index);
2283
cpufreq_freq_transition_end(policy, freqs, ret);
2284
2285
if (ret)
2286
pr_err("%s: Failed to change to intermediate frequency: %d\n",
2287
__func__, ret);
2288
2289
return ret;
2290
}
2291
2292
static int __target_index(struct cpufreq_policy *policy, int index)
2293
{
2294
struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2295
unsigned int restore_freq, intermediate_freq = 0;
2296
unsigned int newfreq = policy->freq_table[index].frequency;
2297
int retval = -EINVAL;
2298
bool notify;
2299
2300
if (newfreq == policy->cur)
2301
return 0;
2302
2303
/* Save last value to restore later on errors */
2304
restore_freq = policy->cur;
2305
2306
notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2307
if (notify) {
2308
/* Handle switching to intermediate frequency */
2309
if (cpufreq_driver->get_intermediate) {
2310
retval = __target_intermediate(policy, &freqs, index);
2311
if (retval)
2312
return retval;
2313
2314
intermediate_freq = freqs.new;
2315
/* Set old freq to intermediate */
2316
if (intermediate_freq)
2317
freqs.old = freqs.new;
2318
}
2319
2320
freqs.new = newfreq;
2321
pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2322
__func__, policy->cpu, freqs.old, freqs.new);
2323
2324
cpufreq_freq_transition_begin(policy, &freqs);
2325
}
2326
2327
retval = cpufreq_driver->target_index(policy, index);
2328
if (retval)
2329
pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2330
retval);
2331
2332
if (notify) {
2333
cpufreq_freq_transition_end(policy, &freqs, retval);
2334
2335
/*
2336
* Failed after setting to intermediate freq? Driver should have
2337
* reverted back to initial frequency and so should we. Check
2338
* here for intermediate_freq instead of get_intermediate, in
2339
* case we haven't switched to intermediate freq at all.
2340
*/
2341
if (unlikely(retval && intermediate_freq)) {
2342
freqs.old = intermediate_freq;
2343
freqs.new = restore_freq;
2344
cpufreq_freq_transition_begin(policy, &freqs);
2345
cpufreq_freq_transition_end(policy, &freqs, 0);
2346
}
2347
}
2348
2349
return retval;
2350
}
2351
2352
int __cpufreq_driver_target(struct cpufreq_policy *policy,
2353
unsigned int target_freq,
2354
unsigned int relation)
2355
{
2356
unsigned int old_target_freq = target_freq;
2357
2358
if (cpufreq_disabled())
2359
return -ENODEV;
2360
2361
target_freq = __resolve_freq(policy, target_freq, policy->min,
2362
policy->max, relation);
2363
2364
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2365
policy->cpu, target_freq, relation, old_target_freq);
2366
2367
/*
2368
* This might look like a redundant call as we are checking it again
2369
* after finding index. But it is left intentionally for cases where
2370
* exactly same freq is called again and so we can save on few function
2371
* calls.
2372
*/
2373
if (target_freq == policy->cur &&
2374
!(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
2375
return 0;
2376
2377
if (cpufreq_driver->target) {
2378
/*
2379
* If the driver hasn't setup a single inefficient frequency,
2380
* it's unlikely it knows how to decode CPUFREQ_RELATION_E.
2381
*/
2382
if (!policy->efficiencies_available)
2383
relation &= ~CPUFREQ_RELATION_E;
2384
2385
return cpufreq_driver->target(policy, target_freq, relation);
2386
}
2387
2388
if (!cpufreq_driver->target_index)
2389
return -EINVAL;
2390
2391
return __target_index(policy, policy->cached_resolved_idx);
2392
}
2393
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2394
2395
int cpufreq_driver_target(struct cpufreq_policy *policy,
2396
unsigned int target_freq,
2397
unsigned int relation)
2398
{
2399
guard(cpufreq_policy_write)(policy);
2400
2401
return __cpufreq_driver_target(policy, target_freq, relation);
2402
}
2403
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2404
2405
__weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2406
{
2407
return NULL;
2408
}
2409
2410
static int cpufreq_init_governor(struct cpufreq_policy *policy)
2411
{
2412
int ret;
2413
2414
/* Don't start any governor operations if we are entering suspend */
2415
if (cpufreq_suspended)
2416
return 0;
2417
/*
2418
* Governor might not be initiated here if ACPI _PPC changed
2419
* notification happened, so check it.
2420
*/
2421
if (!policy->governor)
2422
return -EINVAL;
2423
2424
/* Platform doesn't want dynamic frequency switching ? */
2425
if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
2426
cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2427
struct cpufreq_governor *gov = cpufreq_fallback_governor();
2428
2429
if (gov) {
2430
pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2431
policy->governor->name, gov->name);
2432
policy->governor = gov;
2433
} else {
2434
return -EINVAL;
2435
}
2436
}
2437
2438
if (!try_module_get(policy->governor->owner))
2439
return -EINVAL;
2440
2441
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2442
2443
if (policy->governor->init) {
2444
ret = policy->governor->init(policy);
2445
if (ret) {
2446
module_put(policy->governor->owner);
2447
return ret;
2448
}
2449
}
2450
2451
policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
2452
2453
return 0;
2454
}
2455
2456
static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2457
{
2458
if (cpufreq_suspended || !policy->governor)
2459
return;
2460
2461
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2462
2463
if (policy->governor->exit)
2464
policy->governor->exit(policy);
2465
2466
module_put(policy->governor->owner);
2467
}
2468
2469
int cpufreq_start_governor(struct cpufreq_policy *policy)
2470
{
2471
int ret;
2472
2473
if (cpufreq_suspended)
2474
return 0;
2475
2476
if (!policy->governor)
2477
return -EINVAL;
2478
2479
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2480
2481
cpufreq_verify_current_freq(policy, false);
2482
2483
if (policy->governor->start) {
2484
ret = policy->governor->start(policy);
2485
if (ret)
2486
return ret;
2487
}
2488
2489
if (policy->governor->limits)
2490
policy->governor->limits(policy);
2491
2492
return 0;
2493
}
2494
2495
void cpufreq_stop_governor(struct cpufreq_policy *policy)
2496
{
2497
if (cpufreq_suspended || !policy->governor)
2498
return;
2499
2500
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2501
2502
if (policy->governor->stop)
2503
policy->governor->stop(policy);
2504
}
2505
2506
static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2507
{
2508
if (cpufreq_suspended || !policy->governor)
2509
return;
2510
2511
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2512
2513
if (policy->governor->limits)
2514
policy->governor->limits(policy);
2515
}
2516
2517
int cpufreq_register_governor(struct cpufreq_governor *governor)
2518
{
2519
int err;
2520
2521
if (!governor)
2522
return -EINVAL;
2523
2524
if (cpufreq_disabled())
2525
return -ENODEV;
2526
2527
mutex_lock(&cpufreq_governor_mutex);
2528
2529
err = -EBUSY;
2530
if (!find_governor(governor->name)) {
2531
err = 0;
2532
list_add(&governor->governor_list, &cpufreq_governor_list);
2533
}
2534
2535
mutex_unlock(&cpufreq_governor_mutex);
2536
return err;
2537
}
2538
EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2539
2540
void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2541
{
2542
struct cpufreq_policy *policy;
2543
unsigned long flags;
2544
2545
if (!governor)
2546
return;
2547
2548
if (cpufreq_disabled())
2549
return;
2550
2551
/* clear last_governor for all inactive policies */
2552
read_lock_irqsave(&cpufreq_driver_lock, flags);
2553
for_each_inactive_policy(policy) {
2554
if (!strcmp(policy->last_governor, governor->name)) {
2555
policy->governor = NULL;
2556
policy->last_governor[0] = '\0';
2557
}
2558
}
2559
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2560
2561
mutex_lock(&cpufreq_governor_mutex);
2562
list_del(&governor->governor_list);
2563
mutex_unlock(&cpufreq_governor_mutex);
2564
}
2565
EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2566
2567
2568
/*********************************************************************
2569
* POLICY INTERFACE *
2570
*********************************************************************/
2571
2572
DEFINE_PER_CPU(unsigned long, cpufreq_pressure);
2573
2574
/**
2575
* cpufreq_update_pressure() - Update cpufreq pressure for CPUs
2576
* @policy: cpufreq policy of the CPUs.
2577
*
2578
* Update the value of cpufreq pressure for all @cpus in the policy.
2579
*/
2580
static void cpufreq_update_pressure(struct cpufreq_policy *policy)
2581
{
2582
unsigned long max_capacity, capped_freq, pressure;
2583
u32 max_freq;
2584
int cpu;
2585
2586
cpu = cpumask_first(policy->related_cpus);
2587
max_freq = arch_scale_freq_ref(cpu);
2588
capped_freq = policy->max;
2589
2590
/*
2591
* Handle properly the boost frequencies, which should simply clean
2592
* the cpufreq pressure value.
2593
*/
2594
if (max_freq <= capped_freq) {
2595
pressure = 0;
2596
} else {
2597
max_capacity = arch_scale_cpu_capacity(cpu);
2598
pressure = max_capacity -
2599
mult_frac(max_capacity, capped_freq, max_freq);
2600
}
2601
2602
for_each_cpu(cpu, policy->related_cpus)
2603
WRITE_ONCE(per_cpu(cpufreq_pressure, cpu), pressure);
2604
}
2605
2606
/**
2607
* cpufreq_set_policy - Modify cpufreq policy parameters.
2608
* @policy: Policy object to modify.
2609
* @new_gov: Policy governor pointer.
2610
* @new_pol: Policy value (for drivers with built-in governors).
2611
*
2612
* Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
2613
* limits to be set for the policy, update @policy with the verified limits
2614
* values and either invoke the driver's ->setpolicy() callback (if present) or
2615
* carry out a governor update for @policy. That is, run the current governor's
2616
* ->limits() callback (if @new_gov points to the same object as the one in
2617
* @policy) or replace the governor for @policy with @new_gov.
2618
*
2619
* The cpuinfo part of @policy is not updated by this function.
2620
*/
2621
static int cpufreq_set_policy(struct cpufreq_policy *policy,
2622
struct cpufreq_governor *new_gov,
2623
unsigned int new_pol)
2624
{
2625
struct cpufreq_policy_data new_data;
2626
struct cpufreq_governor *old_gov;
2627
int ret;
2628
2629
memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2630
new_data.freq_table = policy->freq_table;
2631
new_data.cpu = policy->cpu;
2632
/*
2633
* PM QoS framework collects all the requests from users and provide us
2634
* the final aggregated value here.
2635
*/
2636
new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2637
new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2638
2639
pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2640
new_data.cpu, new_data.min, new_data.max);
2641
2642
/*
2643
* Verify that the CPU speed can be set within these limits and make sure
2644
* that min <= max.
2645
*/
2646
ret = cpufreq_driver->verify(&new_data);
2647
if (ret)
2648
return ret;
2649
2650
/*
2651
* Resolve policy min/max to available frequencies. It ensures
2652
* no frequency resolution will neither overshoot the requested maximum
2653
* nor undershoot the requested minimum.
2654
*
2655
* Avoid storing intermediate values in policy->max or policy->min and
2656
* compiler optimizations around them because they may be accessed
2657
* concurrently by cpufreq_driver_resolve_freq() during the update.
2658
*/
2659
WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max,
2660
new_data.min, new_data.max,
2661
CPUFREQ_RELATION_H));
2662
new_data.min = __resolve_freq(policy, new_data.min, new_data.min,
2663
new_data.max, CPUFREQ_RELATION_L);
2664
WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
2665
2666
trace_cpu_frequency_limits(policy);
2667
2668
cpufreq_update_pressure(policy);
2669
2670
policy->cached_target_freq = UINT_MAX;
2671
2672
pr_debug("new min and max freqs are %u - %u kHz\n",
2673
policy->min, policy->max);
2674
2675
if (cpufreq_driver->setpolicy) {
2676
policy->policy = new_pol;
2677
pr_debug("setting range\n");
2678
return cpufreq_driver->setpolicy(policy);
2679
}
2680
2681
if (new_gov == policy->governor) {
2682
pr_debug("governor limits update\n");
2683
cpufreq_governor_limits(policy);
2684
return 0;
2685
}
2686
2687
pr_debug("governor switch\n");
2688
2689
/* save old, working values */
2690
old_gov = policy->governor;
2691
/* end old governor */
2692
if (old_gov) {
2693
cpufreq_stop_governor(policy);
2694
cpufreq_exit_governor(policy);
2695
}
2696
2697
/* start new governor */
2698
policy->governor = new_gov;
2699
ret = cpufreq_init_governor(policy);
2700
if (!ret) {
2701
ret = cpufreq_start_governor(policy);
2702
if (!ret) {
2703
pr_debug("governor change\n");
2704
return 0;
2705
}
2706
cpufreq_exit_governor(policy);
2707
}
2708
2709
/* new governor failed, so re-start old one */
2710
pr_debug("starting governor %s failed\n", policy->governor->name);
2711
if (old_gov) {
2712
policy->governor = old_gov;
2713
if (cpufreq_init_governor(policy)) {
2714
policy->governor = NULL;
2715
} else if (cpufreq_start_governor(policy)) {
2716
cpufreq_exit_governor(policy);
2717
policy->governor = NULL;
2718
}
2719
}
2720
2721
return ret;
2722
}
2723
2724
static void cpufreq_policy_refresh(struct cpufreq_policy *policy)
2725
{
2726
guard(cpufreq_policy_write)(policy);
2727
2728
/*
2729
* BIOS might change freq behind our back
2730
* -> ask driver for current freq and notify governors about a change
2731
*/
2732
if (cpufreq_driver->get && has_target() &&
2733
(cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2734
return;
2735
2736
refresh_frequency_limits(policy);
2737
}
2738
2739
/**
2740
* cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2741
* @cpu: CPU to re-evaluate the policy for.
2742
*
2743
* Update the current frequency for the cpufreq policy of @cpu and use
2744
* cpufreq_set_policy() to re-apply the min and max limits, which triggers the
2745
* evaluation of policy notifiers and the cpufreq driver's ->verify() callback
2746
* for the policy in question, among other things.
2747
*/
2748
void cpufreq_update_policy(unsigned int cpu)
2749
{
2750
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
2751
if (!policy)
2752
return;
2753
2754
cpufreq_policy_refresh(policy);
2755
}
2756
EXPORT_SYMBOL(cpufreq_update_policy);
2757
2758
/**
2759
* cpufreq_update_limits - Update policy limits for a given CPU.
2760
* @cpu: CPU to update the policy limits for.
2761
*
2762
* Invoke the driver's ->update_limits callback if present or call
2763
* cpufreq_policy_refresh() for @cpu.
2764
*/
2765
void cpufreq_update_limits(unsigned int cpu)
2766
{
2767
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
2768
if (!policy)
2769
return;
2770
2771
if (cpufreq_driver->update_limits)
2772
cpufreq_driver->update_limits(policy);
2773
else
2774
cpufreq_policy_refresh(policy);
2775
}
2776
EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2777
2778
/*********************************************************************
2779
* BOOST *
2780
*********************************************************************/
2781
int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
2782
{
2783
int ret;
2784
2785
if (!policy->freq_table)
2786
return -ENXIO;
2787
2788
ret = cpufreq_frequency_table_cpuinfo(policy);
2789
if (ret) {
2790
pr_err("%s: Policy frequency update failed\n", __func__);
2791
return ret;
2792
}
2793
2794
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2795
if (ret < 0)
2796
return ret;
2797
2798
return 0;
2799
}
2800
EXPORT_SYMBOL_GPL(cpufreq_boost_set_sw);
2801
2802
static int cpufreq_boost_trigger_state(int state)
2803
{
2804
struct cpufreq_policy *policy;
2805
unsigned long flags;
2806
int ret = 0;
2807
2808
/*
2809
* Don't compare 'cpufreq_driver->boost_enabled' with 'state' here to
2810
* make sure all policies are in sync with global boost flag.
2811
*/
2812
2813
write_lock_irqsave(&cpufreq_driver_lock, flags);
2814
cpufreq_driver->boost_enabled = state;
2815
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2816
2817
cpus_read_lock();
2818
for_each_active_policy(policy) {
2819
if (!policy->boost_supported)
2820
continue;
2821
2822
ret = policy_set_boost(policy, state);
2823
if (ret)
2824
goto err_reset_state;
2825
}
2826
cpus_read_unlock();
2827
2828
return 0;
2829
2830
err_reset_state:
2831
cpus_read_unlock();
2832
2833
write_lock_irqsave(&cpufreq_driver_lock, flags);
2834
cpufreq_driver->boost_enabled = !state;
2835
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2836
2837
pr_err("%s: Cannot %s BOOST\n",
2838
__func__, str_enable_disable(state));
2839
2840
return ret;
2841
}
2842
2843
static bool cpufreq_boost_supported(void)
2844
{
2845
return cpufreq_driver->set_boost;
2846
}
2847
2848
static int create_boost_sysfs_file(void)
2849
{
2850
int ret;
2851
2852
ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2853
if (ret)
2854
pr_err("%s: cannot register global BOOST sysfs file\n",
2855
__func__);
2856
2857
return ret;
2858
}
2859
2860
static void remove_boost_sysfs_file(void)
2861
{
2862
if (cpufreq_boost_supported())
2863
sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2864
}
2865
2866
bool cpufreq_boost_enabled(void)
2867
{
2868
return cpufreq_driver->boost_enabled;
2869
}
2870
EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2871
2872
/*********************************************************************
2873
* REGISTER / UNREGISTER CPUFREQ DRIVER *
2874
*********************************************************************/
2875
static enum cpuhp_state hp_online;
2876
2877
static int cpuhp_cpufreq_online(unsigned int cpu)
2878
{
2879
cpufreq_online(cpu);
2880
2881
return 0;
2882
}
2883
2884
static int cpuhp_cpufreq_offline(unsigned int cpu)
2885
{
2886
cpufreq_offline(cpu);
2887
2888
return 0;
2889
}
2890
2891
/**
2892
* cpufreq_register_driver - register a CPU Frequency driver
2893
* @driver_data: A struct cpufreq_driver containing the values#
2894
* submitted by the CPU Frequency driver.
2895
*
2896
* Registers a CPU Frequency driver to this core code. This code
2897
* returns zero on success, -EEXIST when another driver got here first
2898
* (and isn't unregistered in the meantime).
2899
*
2900
*/
2901
int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2902
{
2903
unsigned long flags;
2904
int ret;
2905
2906
if (cpufreq_disabled())
2907
return -ENODEV;
2908
2909
/*
2910
* The cpufreq core depends heavily on the availability of device
2911
* structure, make sure they are available before proceeding further.
2912
*/
2913
if (!get_cpu_device(0))
2914
return -EPROBE_DEFER;
2915
2916
if (!driver_data || !driver_data->verify || !driver_data->init ||
2917
(driver_data->target_index && driver_data->target) ||
2918
(!!driver_data->setpolicy == (driver_data->target_index || driver_data->target)) ||
2919
(!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2920
(!driver_data->online != !driver_data->offline) ||
2921
(driver_data->adjust_perf && !driver_data->fast_switch))
2922
return -EINVAL;
2923
2924
pr_debug("trying to register driver %s\n", driver_data->name);
2925
2926
/* Protect against concurrent CPU online/offline. */
2927
cpus_read_lock();
2928
2929
write_lock_irqsave(&cpufreq_driver_lock, flags);
2930
if (cpufreq_driver) {
2931
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2932
ret = -EEXIST;
2933
goto out;
2934
}
2935
cpufreq_driver = driver_data;
2936
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2937
2938
if (driver_data->setpolicy)
2939
driver_data->flags |= CPUFREQ_CONST_LOOPS;
2940
2941
if (cpufreq_boost_supported()) {
2942
ret = create_boost_sysfs_file();
2943
if (ret)
2944
goto err_null_driver;
2945
}
2946
2947
/*
2948
* Mark support for the scheduler's frequency invariance engine for
2949
* drivers that implement target(), target_index() or fast_switch().
2950
*/
2951
if (!cpufreq_driver->setpolicy) {
2952
static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
2953
pr_debug("cpufreq: supports frequency invariance\n");
2954
}
2955
2956
ret = subsys_interface_register(&cpufreq_interface);
2957
if (ret)
2958
goto err_boost_unreg;
2959
2960
if (unlikely(list_empty(&cpufreq_policy_list))) {
2961
/* if all ->init() calls failed, unregister */
2962
ret = -ENODEV;
2963
pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2964
driver_data->name);
2965
goto err_if_unreg;
2966
}
2967
2968
ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2969
"cpufreq:online",
2970
cpuhp_cpufreq_online,
2971
cpuhp_cpufreq_offline);
2972
if (ret < 0)
2973
goto err_if_unreg;
2974
hp_online = ret;
2975
ret = 0;
2976
2977
pr_debug("driver %s up and running\n", driver_data->name);
2978
goto out;
2979
2980
err_if_unreg:
2981
subsys_interface_unregister(&cpufreq_interface);
2982
err_boost_unreg:
2983
if (!cpufreq_driver->setpolicy)
2984
static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
2985
remove_boost_sysfs_file();
2986
err_null_driver:
2987
write_lock_irqsave(&cpufreq_driver_lock, flags);
2988
cpufreq_driver = NULL;
2989
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2990
out:
2991
cpus_read_unlock();
2992
return ret;
2993
}
2994
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2995
2996
/*
2997
* cpufreq_unregister_driver - unregister the current CPUFreq driver
2998
*
2999
* Unregister the current CPUFreq driver. Only call this if you have
3000
* the right to do so, i.e. if you have succeeded in initialising before!
3001
* Returns zero if successful, and -EINVAL if the cpufreq_driver is
3002
* currently not initialised.
3003
*/
3004
void cpufreq_unregister_driver(struct cpufreq_driver *driver)
3005
{
3006
unsigned long flags;
3007
3008
if (WARN_ON(!cpufreq_driver || (driver != cpufreq_driver)))
3009
return;
3010
3011
pr_debug("unregistering driver %s\n", driver->name);
3012
3013
/* Protect against concurrent cpu hotplug */
3014
cpus_read_lock();
3015
subsys_interface_unregister(&cpufreq_interface);
3016
remove_boost_sysfs_file();
3017
static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
3018
cpuhp_remove_state_nocalls_cpuslocked(hp_online);
3019
3020
write_lock_irqsave(&cpufreq_driver_lock, flags);
3021
3022
cpufreq_driver = NULL;
3023
3024
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
3025
cpus_read_unlock();
3026
}
3027
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
3028
3029
static int __init cpufreq_core_init(void)
3030
{
3031
struct cpufreq_governor *gov = cpufreq_default_governor();
3032
struct device *dev_root;
3033
3034
if (cpufreq_disabled())
3035
return -ENODEV;
3036
3037
dev_root = bus_get_dev_root(&cpu_subsys);
3038
if (dev_root) {
3039
cpufreq_global_kobject = kobject_create_and_add("cpufreq", &dev_root->kobj);
3040
put_device(dev_root);
3041
}
3042
BUG_ON(!cpufreq_global_kobject);
3043
3044
if (!strlen(default_governor))
3045
strscpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
3046
3047
return 0;
3048
}
3049
3050
static bool cpufreq_policy_is_good_for_eas(unsigned int cpu)
3051
{
3052
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
3053
if (!policy) {
3054
pr_debug("cpufreq policy not set for CPU: %d\n", cpu);
3055
return false;
3056
}
3057
3058
return sugov_is_governor(policy);
3059
}
3060
3061
bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask)
3062
{
3063
unsigned int cpu;
3064
3065
/* Do not attempt EAS if schedutil is not being used. */
3066
for_each_cpu(cpu, cpu_mask) {
3067
if (!cpufreq_policy_is_good_for_eas(cpu)) {
3068
pr_debug("rd %*pbl: schedutil is mandatory for EAS\n",
3069
cpumask_pr_args(cpu_mask));
3070
return false;
3071
}
3072
}
3073
3074
return true;
3075
}
3076
3077
module_param(off, int, 0444);
3078
module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
3079
core_initcall(cpufreq_core_init);
3080
3081