Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/cpufreq/cpufreq.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* linux/drivers/cpufreq/cpufreq.c
4
*
5
* Copyright (C) 2001 Russell King
6
* (C) 2002 - 2003 Dominik Brodowski <[email protected]>
7
* (C) 2013 Viresh Kumar <[email protected]>
8
*
9
* Oct 2005 - Ashok Raj <[email protected]>
10
* Added handling for CPU hotplug
11
* Feb 2006 - Jacob Shin <[email protected]>
12
* Fix handling for CPU hotplug -- affected CPUs
13
*/
14
15
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17
#include <linux/cpu.h>
18
#include <linux/cpufreq.h>
19
#include <linux/cpu_cooling.h>
20
#include <linux/delay.h>
21
#include <linux/device.h>
22
#include <linux/init.h>
23
#include <linux/kernel_stat.h>
24
#include <linux/module.h>
25
#include <linux/mutex.h>
26
#include <linux/pm_qos.h>
27
#include <linux/slab.h>
28
#include <linux/string_choices.h>
29
#include <linux/suspend.h>
30
#include <linux/syscore_ops.h>
31
#include <linux/tick.h>
32
#include <linux/units.h>
33
#include <trace/events/power.h>
34
35
static LIST_HEAD(cpufreq_policy_list);
36
37
/* Macros to iterate over CPU policies */
38
#define for_each_suitable_policy(__policy, __active) \
39
list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
40
if ((__active) == !policy_is_inactive(__policy))
41
42
#define for_each_active_policy(__policy) \
43
for_each_suitable_policy(__policy, true)
44
#define for_each_inactive_policy(__policy) \
45
for_each_suitable_policy(__policy, false)
46
47
/* Iterate over governors */
48
static LIST_HEAD(cpufreq_governor_list);
49
#define for_each_governor(__governor) \
50
list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
51
52
static char default_governor[CPUFREQ_NAME_LEN];
53
54
/*
55
* The "cpufreq driver" - the arch- or hardware-dependent low
56
* level driver of CPUFreq support, and its spinlock. This lock
57
* also protects the cpufreq_cpu_data array.
58
*/
59
static struct cpufreq_driver *cpufreq_driver;
60
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
61
static DEFINE_RWLOCK(cpufreq_driver_lock);
62
63
static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
64
bool cpufreq_supports_freq_invariance(void)
65
{
66
return static_branch_likely(&cpufreq_freq_invariance);
67
}
68
69
/* Flag to suspend/resume CPUFreq governors */
70
static bool cpufreq_suspended;
71
72
static inline bool has_target(void)
73
{
74
return cpufreq_driver->target_index || cpufreq_driver->target;
75
}
76
77
bool has_target_index(void)
78
{
79
return !!cpufreq_driver->target_index;
80
}
81
82
/* internal prototypes */
83
static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
84
static int cpufreq_init_governor(struct cpufreq_policy *policy);
85
static void cpufreq_exit_governor(struct cpufreq_policy *policy);
86
static void cpufreq_governor_limits(struct cpufreq_policy *policy);
87
static int cpufreq_set_policy(struct cpufreq_policy *policy,
88
struct cpufreq_governor *new_gov,
89
unsigned int new_pol);
90
static bool cpufreq_boost_supported(void);
91
static int cpufreq_boost_trigger_state(int state);
92
93
/*
94
* Two notifier lists: the "policy" list is involved in the
95
* validation process for a new CPU frequency policy; the
96
* "transition" list for kernel code that needs to handle
97
* changes to devices when the CPU clock speed changes.
98
* The mutex locks both lists.
99
*/
100
static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
101
SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
102
103
static int off __read_mostly;
104
static int cpufreq_disabled(void)
105
{
106
return off;
107
}
108
void disable_cpufreq(void)
109
{
110
off = 1;
111
}
112
EXPORT_SYMBOL_GPL(disable_cpufreq);
113
114
static DEFINE_MUTEX(cpufreq_governor_mutex);
115
116
bool have_governor_per_policy(void)
117
{
118
return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
119
}
120
EXPORT_SYMBOL_GPL(have_governor_per_policy);
121
122
static struct kobject *cpufreq_global_kobject;
123
124
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
125
{
126
if (have_governor_per_policy())
127
return &policy->kobj;
128
else
129
return cpufreq_global_kobject;
130
}
131
EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
132
133
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
134
{
135
struct kernel_cpustat kcpustat;
136
u64 cur_wall_time;
137
u64 idle_time;
138
u64 busy_time;
139
140
cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
141
142
kcpustat_cpu_fetch(&kcpustat, cpu);
143
144
busy_time = kcpustat.cpustat[CPUTIME_USER];
145
busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
146
busy_time += kcpustat.cpustat[CPUTIME_IRQ];
147
busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
148
busy_time += kcpustat.cpustat[CPUTIME_STEAL];
149
busy_time += kcpustat.cpustat[CPUTIME_NICE];
150
151
idle_time = cur_wall_time - busy_time;
152
if (wall)
153
*wall = div_u64(cur_wall_time, NSEC_PER_USEC);
154
155
return div_u64(idle_time, NSEC_PER_USEC);
156
}
157
158
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
159
{
160
u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
161
162
if (idle_time == -1ULL)
163
return get_cpu_idle_time_jiffy(cpu, wall);
164
else if (!io_busy)
165
idle_time += get_cpu_iowait_time_us(cpu, wall);
166
167
return idle_time;
168
}
169
EXPORT_SYMBOL_GPL(get_cpu_idle_time);
170
171
/*
172
* This is a generic cpufreq init() routine which can be used by cpufreq
173
* drivers of SMP systems. It will do following:
174
* - validate & show freq table passed
175
* - set policies transition latency
176
* - policy->cpus with all possible CPUs
177
*/
178
void cpufreq_generic_init(struct cpufreq_policy *policy,
179
struct cpufreq_frequency_table *table,
180
unsigned int transition_latency)
181
{
182
policy->freq_table = table;
183
policy->cpuinfo.transition_latency = transition_latency;
184
185
/*
186
* The driver only supports the SMP configuration where all processors
187
* share the clock and voltage and clock.
188
*/
189
cpumask_setall(policy->cpus);
190
}
191
EXPORT_SYMBOL_GPL(cpufreq_generic_init);
192
193
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
194
{
195
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
196
197
return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
198
}
199
EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
200
201
unsigned int cpufreq_generic_get(unsigned int cpu)
202
{
203
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
204
205
if (!policy || IS_ERR(policy->clk)) {
206
pr_err("%s: No %s associated to cpu: %d\n",
207
__func__, policy ? "clk" : "policy", cpu);
208
return 0;
209
}
210
211
return clk_get_rate(policy->clk) / 1000;
212
}
213
EXPORT_SYMBOL_GPL(cpufreq_generic_get);
214
215
/**
216
* cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
217
* @cpu: CPU to find the policy for.
218
*
219
* Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
220
* the kobject reference counter of that policy. Return a valid policy on
221
* success or NULL on failure.
222
*
223
* The policy returned by this function has to be released with the help of
224
* cpufreq_cpu_put() to balance its kobject reference counter properly.
225
*/
226
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
227
{
228
struct cpufreq_policy *policy = NULL;
229
unsigned long flags;
230
231
if (WARN_ON(cpu >= nr_cpu_ids))
232
return NULL;
233
234
/* get the cpufreq driver */
235
read_lock_irqsave(&cpufreq_driver_lock, flags);
236
237
if (cpufreq_driver) {
238
/* get the CPU */
239
policy = cpufreq_cpu_get_raw(cpu);
240
if (policy)
241
kobject_get(&policy->kobj);
242
}
243
244
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
245
246
return policy;
247
}
248
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
249
250
/**
251
* cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
252
* @policy: cpufreq policy returned by cpufreq_cpu_get().
253
*/
254
void cpufreq_cpu_put(struct cpufreq_policy *policy)
255
{
256
kobject_put(&policy->kobj);
257
}
258
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
259
260
/*********************************************************************
261
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
262
*********************************************************************/
263
264
/**
265
* adjust_jiffies - Adjust the system "loops_per_jiffy".
266
* @val: CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
267
* @ci: Frequency change information.
268
*
269
* This function alters the system "loops_per_jiffy" for the clock
270
* speed change. Note that loops_per_jiffy cannot be updated on SMP
271
* systems as each CPU might be scaled differently. So, use the arch
272
* per-CPU loops_per_jiffy value wherever possible.
273
*/
274
static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
275
{
276
#ifndef CONFIG_SMP
277
static unsigned long l_p_j_ref;
278
static unsigned int l_p_j_ref_freq;
279
280
if (ci->flags & CPUFREQ_CONST_LOOPS)
281
return;
282
283
if (!l_p_j_ref_freq) {
284
l_p_j_ref = loops_per_jiffy;
285
l_p_j_ref_freq = ci->old;
286
pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
287
l_p_j_ref, l_p_j_ref_freq);
288
}
289
if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
290
loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
291
ci->new);
292
pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
293
loops_per_jiffy, ci->new);
294
}
295
#endif
296
}
297
298
/**
299
* cpufreq_notify_transition - Notify frequency transition and adjust jiffies.
300
* @policy: cpufreq policy to enable fast frequency switching for.
301
* @freqs: contain details of the frequency update.
302
* @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
303
*
304
* This function calls the transition notifiers and adjust_jiffies().
305
*
306
* It is called twice on all CPU frequency changes that have external effects.
307
*/
308
static void cpufreq_notify_transition(struct cpufreq_policy *policy,
309
struct cpufreq_freqs *freqs,
310
unsigned int state)
311
{
312
int cpu;
313
314
BUG_ON(irqs_disabled());
315
316
if (cpufreq_disabled())
317
return;
318
319
freqs->policy = policy;
320
freqs->flags = cpufreq_driver->flags;
321
pr_debug("notification %u of frequency transition to %u kHz\n",
322
state, freqs->new);
323
324
switch (state) {
325
case CPUFREQ_PRECHANGE:
326
/*
327
* Detect if the driver reported a value as "old frequency"
328
* which is not equal to what the cpufreq core thinks is
329
* "old frequency".
330
*/
331
if (policy->cur && policy->cur != freqs->old) {
332
pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
333
freqs->old, policy->cur);
334
freqs->old = policy->cur;
335
}
336
337
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
338
CPUFREQ_PRECHANGE, freqs);
339
340
adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
341
break;
342
343
case CPUFREQ_POSTCHANGE:
344
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
345
pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
346
cpumask_pr_args(policy->cpus));
347
348
for_each_cpu(cpu, policy->cpus)
349
trace_cpu_frequency(freqs->new, cpu);
350
351
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
352
CPUFREQ_POSTCHANGE, freqs);
353
354
cpufreq_stats_record_transition(policy, freqs->new);
355
policy->cur = freqs->new;
356
}
357
}
358
359
/* Do post notifications when there are chances that transition has failed */
360
static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
361
struct cpufreq_freqs *freqs, int transition_failed)
362
{
363
cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
364
if (!transition_failed)
365
return;
366
367
swap(freqs->old, freqs->new);
368
cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
369
cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
370
}
371
372
void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
373
struct cpufreq_freqs *freqs)
374
{
375
376
/*
377
* Catch double invocations of _begin() which lead to self-deadlock.
378
* ASYNC_NOTIFICATION drivers are left out because the cpufreq core
379
* doesn't invoke _begin() on their behalf, and hence the chances of
380
* double invocations are very low. Moreover, there are scenarios
381
* where these checks can emit false-positive warnings in these
382
* drivers; so we avoid that by skipping them altogether.
383
*/
384
WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
385
&& current == policy->transition_task);
386
387
wait:
388
wait_event(policy->transition_wait, !policy->transition_ongoing);
389
390
spin_lock(&policy->transition_lock);
391
392
if (unlikely(policy->transition_ongoing)) {
393
spin_unlock(&policy->transition_lock);
394
goto wait;
395
}
396
397
policy->transition_ongoing = true;
398
policy->transition_task = current;
399
400
spin_unlock(&policy->transition_lock);
401
402
cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
403
}
404
EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
405
406
void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
407
struct cpufreq_freqs *freqs, int transition_failed)
408
{
409
if (WARN_ON(!policy->transition_ongoing))
410
return;
411
412
cpufreq_notify_post_transition(policy, freqs, transition_failed);
413
414
arch_set_freq_scale(policy->related_cpus,
415
policy->cur,
416
arch_scale_freq_ref(policy->cpu));
417
418
spin_lock(&policy->transition_lock);
419
policy->transition_ongoing = false;
420
policy->transition_task = NULL;
421
spin_unlock(&policy->transition_lock);
422
423
wake_up(&policy->transition_wait);
424
}
425
EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
426
427
/*
428
* Fast frequency switching status count. Positive means "enabled", negative
429
* means "disabled" and 0 means "not decided yet".
430
*/
431
static int cpufreq_fast_switch_count;
432
static DEFINE_MUTEX(cpufreq_fast_switch_lock);
433
434
static void cpufreq_list_transition_notifiers(void)
435
{
436
struct notifier_block *nb;
437
438
pr_info("Registered transition notifiers:\n");
439
440
mutex_lock(&cpufreq_transition_notifier_list.mutex);
441
442
for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
443
pr_info("%pS\n", nb->notifier_call);
444
445
mutex_unlock(&cpufreq_transition_notifier_list.mutex);
446
}
447
448
/**
449
* cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
450
* @policy: cpufreq policy to enable fast frequency switching for.
451
*
452
* Try to enable fast frequency switching for @policy.
453
*
454
* The attempt will fail if there is at least one transition notifier registered
455
* at this point, as fast frequency switching is quite fundamentally at odds
456
* with transition notifiers. Thus if successful, it will make registration of
457
* transition notifiers fail going forward.
458
*/
459
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
460
{
461
lockdep_assert_held(&policy->rwsem);
462
463
if (!policy->fast_switch_possible)
464
return;
465
466
mutex_lock(&cpufreq_fast_switch_lock);
467
if (cpufreq_fast_switch_count >= 0) {
468
cpufreq_fast_switch_count++;
469
policy->fast_switch_enabled = true;
470
} else {
471
pr_warn("CPU%u: Fast frequency switching not enabled\n",
472
policy->cpu);
473
cpufreq_list_transition_notifiers();
474
}
475
mutex_unlock(&cpufreq_fast_switch_lock);
476
}
477
EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
478
479
/**
480
* cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
481
* @policy: cpufreq policy to disable fast frequency switching for.
482
*/
483
void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
484
{
485
mutex_lock(&cpufreq_fast_switch_lock);
486
if (policy->fast_switch_enabled) {
487
policy->fast_switch_enabled = false;
488
if (!WARN_ON(cpufreq_fast_switch_count <= 0))
489
cpufreq_fast_switch_count--;
490
}
491
mutex_unlock(&cpufreq_fast_switch_lock);
492
}
493
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
494
495
static unsigned int __resolve_freq(struct cpufreq_policy *policy,
496
unsigned int target_freq,
497
unsigned int min, unsigned int max,
498
unsigned int relation)
499
{
500
unsigned int idx;
501
502
target_freq = clamp_val(target_freq, min, max);
503
504
if (!policy->freq_table)
505
return target_freq;
506
507
idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation);
508
policy->cached_resolved_idx = idx;
509
policy->cached_target_freq = target_freq;
510
return policy->freq_table[idx].frequency;
511
}
512
513
/**
514
* cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
515
* one.
516
* @policy: associated policy to interrogate
517
* @target_freq: target frequency to resolve.
518
*
519
* The target to driver frequency mapping is cached in the policy.
520
*
521
* Return: Lowest driver-supported frequency greater than or equal to the
522
* given target_freq, subject to policy (min/max) and driver limitations.
523
*/
524
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
525
unsigned int target_freq)
526
{
527
unsigned int min = READ_ONCE(policy->min);
528
unsigned int max = READ_ONCE(policy->max);
529
530
/*
531
* If this function runs in parallel with cpufreq_set_policy(), it may
532
* read policy->min before the update and policy->max after the update
533
* or the other way around, so there is no ordering guarantee.
534
*
535
* Resolve this by always honoring the max (in case it comes from
536
* thermal throttling or similar).
537
*/
538
if (unlikely(min > max))
539
min = max;
540
541
return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE);
542
}
543
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
544
545
unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
546
{
547
unsigned int latency;
548
549
if (policy->transition_delay_us)
550
return policy->transition_delay_us;
551
552
latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
553
if (latency)
554
/* Give a 50% breathing room between updates */
555
return latency + (latency >> 1);
556
557
return USEC_PER_MSEC;
558
}
559
EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
560
561
/*********************************************************************
562
* SYSFS INTERFACE *
563
*********************************************************************/
564
static ssize_t show_boost(struct kobject *kobj,
565
struct kobj_attribute *attr, char *buf)
566
{
567
return sysfs_emit(buf, "%d\n", cpufreq_driver->boost_enabled);
568
}
569
570
static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
571
const char *buf, size_t count)
572
{
573
bool enable;
574
575
if (kstrtobool(buf, &enable))
576
return -EINVAL;
577
578
if (cpufreq_boost_trigger_state(enable)) {
579
pr_err("%s: Cannot %s BOOST!\n",
580
__func__, str_enable_disable(enable));
581
return -EINVAL;
582
}
583
584
pr_debug("%s: cpufreq BOOST %s\n",
585
__func__, str_enabled_disabled(enable));
586
587
return count;
588
}
589
define_one_global_rw(boost);
590
591
static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
592
{
593
return sysfs_emit(buf, "%d\n", policy->boost_enabled);
594
}
595
596
static int policy_set_boost(struct cpufreq_policy *policy, bool enable)
597
{
598
int ret;
599
600
if (policy->boost_enabled == enable)
601
return 0;
602
603
policy->boost_enabled = enable;
604
605
ret = cpufreq_driver->set_boost(policy, enable);
606
if (ret)
607
policy->boost_enabled = !policy->boost_enabled;
608
609
return ret;
610
}
611
612
static ssize_t store_local_boost(struct cpufreq_policy *policy,
613
const char *buf, size_t count)
614
{
615
int ret;
616
bool enable;
617
618
if (kstrtobool(buf, &enable))
619
return -EINVAL;
620
621
if (!cpufreq_driver->boost_enabled)
622
return -EINVAL;
623
624
if (!policy->boost_supported)
625
return -EINVAL;
626
627
ret = policy_set_boost(policy, enable);
628
if (!ret)
629
return count;
630
631
return ret;
632
}
633
634
static struct freq_attr local_boost = __ATTR(boost, 0644, show_local_boost, store_local_boost);
635
636
static struct cpufreq_governor *find_governor(const char *str_governor)
637
{
638
struct cpufreq_governor *t;
639
640
for_each_governor(t)
641
if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
642
return t;
643
644
return NULL;
645
}
646
647
static struct cpufreq_governor *get_governor(const char *str_governor)
648
{
649
struct cpufreq_governor *t;
650
651
mutex_lock(&cpufreq_governor_mutex);
652
t = find_governor(str_governor);
653
if (!t)
654
goto unlock;
655
656
if (!try_module_get(t->owner))
657
t = NULL;
658
659
unlock:
660
mutex_unlock(&cpufreq_governor_mutex);
661
662
return t;
663
}
664
665
static unsigned int cpufreq_parse_policy(char *str_governor)
666
{
667
if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
668
return CPUFREQ_POLICY_PERFORMANCE;
669
670
if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
671
return CPUFREQ_POLICY_POWERSAVE;
672
673
return CPUFREQ_POLICY_UNKNOWN;
674
}
675
676
/**
677
* cpufreq_parse_governor - parse a governor string only for has_target()
678
* @str_governor: Governor name.
679
*/
680
static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
681
{
682
struct cpufreq_governor *t;
683
684
t = get_governor(str_governor);
685
if (t)
686
return t;
687
688
if (request_module("cpufreq_%s", str_governor))
689
return NULL;
690
691
return get_governor(str_governor);
692
}
693
694
/*
695
* cpufreq_per_cpu_attr_read() / show_##file_name() -
696
* print out cpufreq information
697
*
698
* Write out information from cpufreq_driver->policy[cpu]; object must be
699
* "unsigned int".
700
*/
701
702
#define show_one(file_name, object) \
703
static ssize_t show_##file_name \
704
(struct cpufreq_policy *policy, char *buf) \
705
{ \
706
return sysfs_emit(buf, "%u\n", policy->object); \
707
}
708
709
show_one(cpuinfo_min_freq, cpuinfo.min_freq);
710
show_one(cpuinfo_max_freq, cpuinfo.max_freq);
711
show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
712
show_one(scaling_min_freq, min);
713
show_one(scaling_max_freq, max);
714
715
__weak int arch_freq_get_on_cpu(int cpu)
716
{
717
return -EOPNOTSUPP;
718
}
719
720
static inline bool cpufreq_avg_freq_supported(struct cpufreq_policy *policy)
721
{
722
return arch_freq_get_on_cpu(policy->cpu) != -EOPNOTSUPP;
723
}
724
725
static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
726
{
727
ssize_t ret;
728
int freq;
729
730
freq = IS_ENABLED(CONFIG_CPUFREQ_ARCH_CUR_FREQ)
731
? arch_freq_get_on_cpu(policy->cpu)
732
: 0;
733
734
if (freq > 0)
735
ret = sysfs_emit(buf, "%u\n", freq);
736
else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
737
ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu));
738
else
739
ret = sysfs_emit(buf, "%u\n", policy->cur);
740
return ret;
741
}
742
743
/*
744
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
745
*/
746
#define store_one(file_name, object) \
747
static ssize_t store_##file_name \
748
(struct cpufreq_policy *policy, const char *buf, size_t count) \
749
{ \
750
unsigned long val; \
751
int ret; \
752
\
753
ret = kstrtoul(buf, 0, &val); \
754
if (ret) \
755
return ret; \
756
\
757
ret = freq_qos_update_request(policy->object##_freq_req, val);\
758
return ret >= 0 ? count : ret; \
759
}
760
761
store_one(scaling_min_freq, min);
762
store_one(scaling_max_freq, max);
763
764
/*
765
* show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
766
*/
767
static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
768
char *buf)
769
{
770
unsigned int cur_freq = __cpufreq_get(policy);
771
772
if (cur_freq)
773
return sysfs_emit(buf, "%u\n", cur_freq);
774
775
return sysfs_emit(buf, "<unknown>\n");
776
}
777
778
/*
779
* show_cpuinfo_avg_freq - average CPU frequency as detected by hardware
780
*/
781
static ssize_t show_cpuinfo_avg_freq(struct cpufreq_policy *policy,
782
char *buf)
783
{
784
int avg_freq = arch_freq_get_on_cpu(policy->cpu);
785
786
if (avg_freq > 0)
787
return sysfs_emit(buf, "%u\n", avg_freq);
788
return avg_freq != 0 ? avg_freq : -EINVAL;
789
}
790
791
/*
792
* show_scaling_governor - show the current policy for the specified CPU
793
*/
794
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
795
{
796
if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
797
return sysfs_emit(buf, "powersave\n");
798
else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
799
return sysfs_emit(buf, "performance\n");
800
else if (policy->governor)
801
return sysfs_emit(buf, "%s\n", policy->governor->name);
802
return -EINVAL;
803
}
804
805
/*
806
* store_scaling_governor - store policy for the specified CPU
807
*/
808
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
809
const char *buf, size_t count)
810
{
811
char str_governor[CPUFREQ_NAME_LEN];
812
int ret;
813
814
ret = sscanf(buf, "%15s", str_governor);
815
if (ret != 1)
816
return -EINVAL;
817
818
if (cpufreq_driver->setpolicy) {
819
unsigned int new_pol;
820
821
new_pol = cpufreq_parse_policy(str_governor);
822
if (!new_pol)
823
return -EINVAL;
824
825
ret = cpufreq_set_policy(policy, NULL, new_pol);
826
} else {
827
struct cpufreq_governor *new_gov;
828
829
new_gov = cpufreq_parse_governor(str_governor);
830
if (!new_gov)
831
return -EINVAL;
832
833
ret = cpufreq_set_policy(policy, new_gov,
834
CPUFREQ_POLICY_UNKNOWN);
835
836
module_put(new_gov->owner);
837
}
838
839
return ret ? ret : count;
840
}
841
842
/*
843
* show_scaling_driver - show the cpufreq driver currently loaded
844
*/
845
static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
846
{
847
return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
848
}
849
850
/*
851
* show_scaling_available_governors - show the available CPUfreq governors
852
*/
853
static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
854
char *buf)
855
{
856
ssize_t i = 0;
857
struct cpufreq_governor *t;
858
859
if (!has_target()) {
860
i += sysfs_emit(buf, "performance powersave");
861
goto out;
862
}
863
864
mutex_lock(&cpufreq_governor_mutex);
865
for_each_governor(t) {
866
if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
867
- (CPUFREQ_NAME_LEN + 2)))
868
break;
869
i += sysfs_emit_at(buf, i, "%s ", t->name);
870
}
871
mutex_unlock(&cpufreq_governor_mutex);
872
out:
873
i += sysfs_emit_at(buf, i, "\n");
874
return i;
875
}
876
877
ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
878
{
879
ssize_t i = 0;
880
unsigned int cpu;
881
882
for_each_cpu(cpu, mask) {
883
i += sysfs_emit_at(buf, i, "%u ", cpu);
884
if (i >= (PAGE_SIZE - 5))
885
break;
886
}
887
888
/* Remove the extra space at the end */
889
i--;
890
891
i += sysfs_emit_at(buf, i, "\n");
892
return i;
893
}
894
EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
895
896
/*
897
* show_related_cpus - show the CPUs affected by each transition even if
898
* hw coordination is in use
899
*/
900
static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
901
{
902
return cpufreq_show_cpus(policy->related_cpus, buf);
903
}
904
905
/*
906
* show_affected_cpus - show the CPUs affected by each transition
907
*/
908
static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
909
{
910
return cpufreq_show_cpus(policy->cpus, buf);
911
}
912
913
static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
914
const char *buf, size_t count)
915
{
916
unsigned int freq = 0;
917
unsigned int ret;
918
919
if (!policy->governor || !policy->governor->store_setspeed)
920
return -EINVAL;
921
922
ret = kstrtouint(buf, 0, &freq);
923
if (ret)
924
return ret;
925
926
policy->governor->store_setspeed(policy, freq);
927
928
return count;
929
}
930
931
static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
932
{
933
if (!policy->governor || !policy->governor->show_setspeed)
934
return sysfs_emit(buf, "<unsupported>\n");
935
936
return policy->governor->show_setspeed(policy, buf);
937
}
938
939
/*
940
* show_bios_limit - show the current cpufreq HW/BIOS limitation
941
*/
942
static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
943
{
944
unsigned int limit;
945
int ret;
946
ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
947
if (!ret)
948
return sysfs_emit(buf, "%u\n", limit);
949
return sysfs_emit(buf, "%u\n", policy->cpuinfo.max_freq);
950
}
951
952
cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
953
cpufreq_freq_attr_ro(cpuinfo_avg_freq);
954
cpufreq_freq_attr_ro(cpuinfo_min_freq);
955
cpufreq_freq_attr_ro(cpuinfo_max_freq);
956
cpufreq_freq_attr_ro(cpuinfo_transition_latency);
957
cpufreq_freq_attr_ro(scaling_available_governors);
958
cpufreq_freq_attr_ro(scaling_driver);
959
cpufreq_freq_attr_ro(scaling_cur_freq);
960
cpufreq_freq_attr_ro(bios_limit);
961
cpufreq_freq_attr_ro(related_cpus);
962
cpufreq_freq_attr_ro(affected_cpus);
963
cpufreq_freq_attr_rw(scaling_min_freq);
964
cpufreq_freq_attr_rw(scaling_max_freq);
965
cpufreq_freq_attr_rw(scaling_governor);
966
cpufreq_freq_attr_rw(scaling_setspeed);
967
968
static struct attribute *cpufreq_attrs[] = {
969
&cpuinfo_min_freq.attr,
970
&cpuinfo_max_freq.attr,
971
&cpuinfo_transition_latency.attr,
972
&scaling_cur_freq.attr,
973
&scaling_min_freq.attr,
974
&scaling_max_freq.attr,
975
&affected_cpus.attr,
976
&related_cpus.attr,
977
&scaling_governor.attr,
978
&scaling_driver.attr,
979
&scaling_available_governors.attr,
980
&scaling_setspeed.attr,
981
NULL
982
};
983
ATTRIBUTE_GROUPS(cpufreq);
984
985
#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
986
#define to_attr(a) container_of(a, struct freq_attr, attr)
987
988
static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
989
{
990
struct cpufreq_policy *policy = to_policy(kobj);
991
struct freq_attr *fattr = to_attr(attr);
992
993
if (!fattr->show)
994
return -EIO;
995
996
guard(cpufreq_policy_read)(policy);
997
998
if (likely(!policy_is_inactive(policy)))
999
return fattr->show(policy, buf);
1000
1001
return -EBUSY;
1002
}
1003
1004
static ssize_t store(struct kobject *kobj, struct attribute *attr,
1005
const char *buf, size_t count)
1006
{
1007
struct cpufreq_policy *policy = to_policy(kobj);
1008
struct freq_attr *fattr = to_attr(attr);
1009
1010
if (!fattr->store)
1011
return -EIO;
1012
1013
guard(cpufreq_policy_write)(policy);
1014
1015
if (likely(!policy_is_inactive(policy)))
1016
return fattr->store(policy, buf, count);
1017
1018
return -EBUSY;
1019
}
1020
1021
static void cpufreq_sysfs_release(struct kobject *kobj)
1022
{
1023
struct cpufreq_policy *policy = to_policy(kobj);
1024
pr_debug("last reference is dropped\n");
1025
complete(&policy->kobj_unregister);
1026
}
1027
1028
static const struct sysfs_ops sysfs_ops = {
1029
.show = show,
1030
.store = store,
1031
};
1032
1033
static const struct kobj_type ktype_cpufreq = {
1034
.sysfs_ops = &sysfs_ops,
1035
.default_groups = cpufreq_groups,
1036
.release = cpufreq_sysfs_release,
1037
};
1038
1039
static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
1040
struct device *dev)
1041
{
1042
if (unlikely(!dev))
1043
return;
1044
1045
if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1046
return;
1047
1048
dev_dbg(dev, "%s: Adding symlink\n", __func__);
1049
if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1050
dev_err(dev, "cpufreq symlink creation failed\n");
1051
}
1052
1053
static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu,
1054
struct device *dev)
1055
{
1056
dev_dbg(dev, "%s: Removing symlink\n", __func__);
1057
sysfs_remove_link(&dev->kobj, "cpufreq");
1058
cpumask_clear_cpu(cpu, policy->real_cpus);
1059
}
1060
1061
static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1062
{
1063
struct freq_attr **drv_attr;
1064
int ret = 0;
1065
1066
/* Attributes that need freq_table */
1067
if (policy->freq_table) {
1068
ret = sysfs_create_file(&policy->kobj,
1069
&cpufreq_freq_attr_scaling_available_freqs.attr);
1070
if (ret)
1071
return ret;
1072
1073
if (cpufreq_boost_supported()) {
1074
ret = sysfs_create_file(&policy->kobj,
1075
&cpufreq_freq_attr_scaling_boost_freqs.attr);
1076
if (ret)
1077
return ret;
1078
}
1079
}
1080
1081
/* set up files for this cpu device */
1082
drv_attr = cpufreq_driver->attr;
1083
while (drv_attr && *drv_attr) {
1084
ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1085
if (ret)
1086
return ret;
1087
drv_attr++;
1088
}
1089
if (cpufreq_driver->get) {
1090
ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1091
if (ret)
1092
return ret;
1093
}
1094
1095
if (cpufreq_avg_freq_supported(policy)) {
1096
ret = sysfs_create_file(&policy->kobj, &cpuinfo_avg_freq.attr);
1097
if (ret)
1098
return ret;
1099
}
1100
1101
if (cpufreq_driver->bios_limit) {
1102
ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1103
if (ret)
1104
return ret;
1105
}
1106
1107
if (cpufreq_boost_supported()) {
1108
ret = sysfs_create_file(&policy->kobj, &local_boost.attr);
1109
if (ret)
1110
return ret;
1111
}
1112
1113
return 0;
1114
}
1115
1116
static int cpufreq_init_policy(struct cpufreq_policy *policy)
1117
{
1118
struct cpufreq_governor *gov = NULL;
1119
unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1120
int ret;
1121
1122
if (has_target()) {
1123
/* Update policy governor to the one used before hotplug. */
1124
gov = get_governor(policy->last_governor);
1125
if (gov) {
1126
pr_debug("Restoring governor %s for cpu %d\n",
1127
gov->name, policy->cpu);
1128
} else {
1129
gov = get_governor(default_governor);
1130
}
1131
1132
if (!gov) {
1133
gov = cpufreq_default_governor();
1134
__module_get(gov->owner);
1135
}
1136
1137
} else {
1138
1139
/* Use the default policy if there is no last_policy. */
1140
if (policy->last_policy) {
1141
pol = policy->last_policy;
1142
} else {
1143
pol = cpufreq_parse_policy(default_governor);
1144
/*
1145
* In case the default governor is neither "performance"
1146
* nor "powersave", fall back to the initial policy
1147
* value set by the driver.
1148
*/
1149
if (pol == CPUFREQ_POLICY_UNKNOWN)
1150
pol = policy->policy;
1151
}
1152
if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1153
pol != CPUFREQ_POLICY_POWERSAVE)
1154
return -ENODATA;
1155
}
1156
1157
ret = cpufreq_set_policy(policy, gov, pol);
1158
if (gov)
1159
module_put(gov->owner);
1160
1161
return ret;
1162
}
1163
1164
static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1165
{
1166
int ret = 0;
1167
1168
/* Has this CPU been taken care of already? */
1169
if (cpumask_test_cpu(cpu, policy->cpus))
1170
return 0;
1171
1172
guard(cpufreq_policy_write)(policy);
1173
1174
if (has_target())
1175
cpufreq_stop_governor(policy);
1176
1177
cpumask_set_cpu(cpu, policy->cpus);
1178
1179
if (has_target()) {
1180
ret = cpufreq_start_governor(policy);
1181
if (ret)
1182
pr_err("%s: Failed to start governor\n", __func__);
1183
}
1184
1185
return ret;
1186
}
1187
1188
void refresh_frequency_limits(struct cpufreq_policy *policy)
1189
{
1190
if (!policy_is_inactive(policy)) {
1191
pr_debug("updating policy for CPU %u\n", policy->cpu);
1192
1193
cpufreq_set_policy(policy, policy->governor, policy->policy);
1194
}
1195
}
1196
EXPORT_SYMBOL(refresh_frequency_limits);
1197
1198
static void handle_update(struct work_struct *work)
1199
{
1200
struct cpufreq_policy *policy =
1201
container_of(work, struct cpufreq_policy, update);
1202
1203
pr_debug("handle_update for cpu %u called\n", policy->cpu);
1204
1205
guard(cpufreq_policy_write)(policy);
1206
1207
refresh_frequency_limits(policy);
1208
}
1209
1210
static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1211
void *data)
1212
{
1213
struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1214
1215
schedule_work(&policy->update);
1216
return 0;
1217
}
1218
1219
static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1220
void *data)
1221
{
1222
struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1223
1224
schedule_work(&policy->update);
1225
return 0;
1226
}
1227
1228
static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1229
{
1230
struct kobject *kobj;
1231
struct completion *cmp;
1232
1233
scoped_guard(cpufreq_policy_write, policy) {
1234
cpufreq_stats_free_table(policy);
1235
kobj = &policy->kobj;
1236
cmp = &policy->kobj_unregister;
1237
}
1238
kobject_put(kobj);
1239
1240
/*
1241
* We need to make sure that the underlying kobj is
1242
* actually not referenced anymore by anybody before we
1243
* proceed with unloading.
1244
*/
1245
pr_debug("waiting for dropping of refcount\n");
1246
wait_for_completion(cmp);
1247
pr_debug("wait complete\n");
1248
}
1249
1250
static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1251
{
1252
struct cpufreq_policy *policy;
1253
struct device *dev = get_cpu_device(cpu);
1254
int ret;
1255
1256
if (!dev)
1257
return NULL;
1258
1259
policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1260
if (!policy)
1261
return NULL;
1262
1263
if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1264
goto err_free_policy;
1265
1266
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1267
goto err_free_cpumask;
1268
1269
if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1270
goto err_free_rcpumask;
1271
1272
init_completion(&policy->kobj_unregister);
1273
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1274
cpufreq_global_kobject, "policy%u", cpu);
1275
if (ret) {
1276
dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1277
/*
1278
* The entire policy object will be freed below, but the extra
1279
* memory allocated for the kobject name needs to be freed by
1280
* releasing the kobject.
1281
*/
1282
kobject_put(&policy->kobj);
1283
goto err_free_real_cpus;
1284
}
1285
1286
init_rwsem(&policy->rwsem);
1287
1288
freq_constraints_init(&policy->constraints);
1289
1290
policy->nb_min.notifier_call = cpufreq_notifier_min;
1291
policy->nb_max.notifier_call = cpufreq_notifier_max;
1292
1293
ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1294
&policy->nb_min);
1295
if (ret) {
1296
dev_err(dev, "Failed to register MIN QoS notifier: %d (CPU%u)\n",
1297
ret, cpu);
1298
goto err_kobj_remove;
1299
}
1300
1301
ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1302
&policy->nb_max);
1303
if (ret) {
1304
dev_err(dev, "Failed to register MAX QoS notifier: %d (CPU%u)\n",
1305
ret, cpu);
1306
goto err_min_qos_notifier;
1307
}
1308
1309
INIT_LIST_HEAD(&policy->policy_list);
1310
spin_lock_init(&policy->transition_lock);
1311
init_waitqueue_head(&policy->transition_wait);
1312
INIT_WORK(&policy->update, handle_update);
1313
1314
return policy;
1315
1316
err_min_qos_notifier:
1317
freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1318
&policy->nb_min);
1319
err_kobj_remove:
1320
cpufreq_policy_put_kobj(policy);
1321
err_free_real_cpus:
1322
free_cpumask_var(policy->real_cpus);
1323
err_free_rcpumask:
1324
free_cpumask_var(policy->related_cpus);
1325
err_free_cpumask:
1326
free_cpumask_var(policy->cpus);
1327
err_free_policy:
1328
kfree(policy);
1329
1330
return NULL;
1331
}
1332
1333
static void cpufreq_policy_free(struct cpufreq_policy *policy)
1334
{
1335
unsigned long flags;
1336
int cpu;
1337
1338
/*
1339
* The callers must ensure the policy is inactive by now, to avoid any
1340
* races with show()/store() callbacks.
1341
*/
1342
if (unlikely(!policy_is_inactive(policy)))
1343
pr_warn("%s: Freeing active policy\n", __func__);
1344
1345
/* Remove policy from list */
1346
write_lock_irqsave(&cpufreq_driver_lock, flags);
1347
list_del(&policy->policy_list);
1348
1349
for_each_cpu(cpu, policy->related_cpus)
1350
per_cpu(cpufreq_cpu_data, cpu) = NULL;
1351
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1352
1353
freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1354
&policy->nb_max);
1355
freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1356
&policy->nb_min);
1357
1358
/* Cancel any pending policy->update work before freeing the policy. */
1359
cancel_work_sync(&policy->update);
1360
1361
if (policy->max_freq_req) {
1362
/*
1363
* Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
1364
* notification, since CPUFREQ_CREATE_POLICY notification was
1365
* sent after adding max_freq_req earlier.
1366
*/
1367
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1368
CPUFREQ_REMOVE_POLICY, policy);
1369
freq_qos_remove_request(policy->max_freq_req);
1370
}
1371
1372
freq_qos_remove_request(policy->min_freq_req);
1373
kfree(policy->min_freq_req);
1374
1375
cpufreq_policy_put_kobj(policy);
1376
free_cpumask_var(policy->real_cpus);
1377
free_cpumask_var(policy->related_cpus);
1378
free_cpumask_var(policy->cpus);
1379
kfree(policy);
1380
}
1381
1382
static int cpufreq_policy_online(struct cpufreq_policy *policy,
1383
unsigned int cpu, bool new_policy)
1384
{
1385
unsigned long flags;
1386
unsigned int j;
1387
int ret;
1388
1389
guard(cpufreq_policy_write)(policy);
1390
1391
policy->cpu = cpu;
1392
policy->governor = NULL;
1393
1394
if (!new_policy && cpufreq_driver->online) {
1395
/* Recover policy->cpus using related_cpus */
1396
cpumask_copy(policy->cpus, policy->related_cpus);
1397
1398
ret = cpufreq_driver->online(policy);
1399
if (ret) {
1400
pr_debug("%s: %d: initialization failed\n", __func__,
1401
__LINE__);
1402
goto out_exit_policy;
1403
}
1404
} else {
1405
cpumask_copy(policy->cpus, cpumask_of(cpu));
1406
1407
/*
1408
* Call driver. From then on the cpufreq must be able
1409
* to accept all calls to ->verify and ->setpolicy for this CPU.
1410
*/
1411
ret = cpufreq_driver->init(policy);
1412
if (ret) {
1413
pr_debug("%s: %d: initialization failed\n", __func__,
1414
__LINE__);
1415
goto out_clear_policy;
1416
}
1417
1418
/*
1419
* The initialization has succeeded and the policy is online.
1420
* If there is a problem with its frequency table, take it
1421
* offline and drop it.
1422
*/
1423
ret = cpufreq_table_validate_and_sort(policy);
1424
if (ret)
1425
goto out_offline_policy;
1426
1427
/* related_cpus should at least include policy->cpus. */
1428
cpumask_copy(policy->related_cpus, policy->cpus);
1429
}
1430
1431
/*
1432
* affected cpus must always be the one, which are online. We aren't
1433
* managing offline cpus here.
1434
*/
1435
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1436
1437
if (new_policy) {
1438
for_each_cpu(j, policy->related_cpus) {
1439
per_cpu(cpufreq_cpu_data, j) = policy;
1440
add_cpu_dev_symlink(policy, j, get_cpu_device(j));
1441
}
1442
1443
policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1444
GFP_KERNEL);
1445
if (!policy->min_freq_req) {
1446
ret = -ENOMEM;
1447
goto out_destroy_policy;
1448
}
1449
1450
ret = freq_qos_add_request(&policy->constraints,
1451
policy->min_freq_req, FREQ_QOS_MIN,
1452
FREQ_QOS_MIN_DEFAULT_VALUE);
1453
if (ret < 0) {
1454
/*
1455
* So we don't call freq_qos_remove_request() for an
1456
* uninitialized request.
1457
*/
1458
kfree(policy->min_freq_req);
1459
policy->min_freq_req = NULL;
1460
goto out_destroy_policy;
1461
}
1462
1463
/*
1464
* This must be initialized right here to avoid calling
1465
* freq_qos_remove_request() on uninitialized request in case
1466
* of errors.
1467
*/
1468
policy->max_freq_req = policy->min_freq_req + 1;
1469
1470
ret = freq_qos_add_request(&policy->constraints,
1471
policy->max_freq_req, FREQ_QOS_MAX,
1472
FREQ_QOS_MAX_DEFAULT_VALUE);
1473
if (ret < 0) {
1474
policy->max_freq_req = NULL;
1475
goto out_destroy_policy;
1476
}
1477
1478
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1479
CPUFREQ_CREATE_POLICY, policy);
1480
} else {
1481
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
1482
if (ret < 0)
1483
goto out_destroy_policy;
1484
}
1485
1486
if (cpufreq_driver->get && has_target()) {
1487
policy->cur = cpufreq_driver->get(policy->cpu);
1488
if (!policy->cur) {
1489
ret = -EIO;
1490
pr_err("%s: ->get() failed\n", __func__);
1491
goto out_destroy_policy;
1492
}
1493
}
1494
1495
/*
1496
* Sometimes boot loaders set CPU frequency to a value outside of
1497
* frequency table present with cpufreq core. In such cases CPU might be
1498
* unstable if it has to run on that frequency for long duration of time
1499
* and so its better to set it to a frequency which is specified in
1500
* freq-table. This also makes cpufreq stats inconsistent as
1501
* cpufreq-stats would fail to register because current frequency of CPU
1502
* isn't found in freq-table.
1503
*
1504
* Because we don't want this change to effect boot process badly, we go
1505
* for the next freq which is >= policy->cur ('cur' must be set by now,
1506
* otherwise we will end up setting freq to lowest of the table as 'cur'
1507
* is initialized to zero).
1508
*
1509
* We are passing target-freq as "policy->cur - 1" otherwise
1510
* __cpufreq_driver_target() would simply fail, as policy->cur will be
1511
* equal to target-freq.
1512
*/
1513
if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1514
&& has_target()) {
1515
unsigned int old_freq = policy->cur;
1516
1517
/* Are we running at unknown frequency ? */
1518
ret = cpufreq_frequency_table_get_index(policy, old_freq);
1519
if (ret == -EINVAL) {
1520
ret = __cpufreq_driver_target(policy, old_freq - 1,
1521
CPUFREQ_RELATION_L);
1522
1523
/*
1524
* Reaching here after boot in a few seconds may not
1525
* mean that system will remain stable at "unknown"
1526
* frequency for longer duration. Hence, a BUG_ON().
1527
*/
1528
BUG_ON(ret);
1529
pr_info("%s: CPU%d: Running at unlisted initial frequency: %u kHz, changing to: %u kHz\n",
1530
__func__, policy->cpu, old_freq, policy->cur);
1531
}
1532
}
1533
1534
if (new_policy) {
1535
ret = cpufreq_add_dev_interface(policy);
1536
if (ret)
1537
goto out_destroy_policy;
1538
1539
cpufreq_stats_create_table(policy);
1540
1541
write_lock_irqsave(&cpufreq_driver_lock, flags);
1542
list_add(&policy->policy_list, &cpufreq_policy_list);
1543
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1544
1545
/*
1546
* Register with the energy model before
1547
* em_rebuild_sched_domains() is called, which will result
1548
* in rebuilding of the sched domains, which should only be done
1549
* once the energy model is properly initialized for the policy
1550
* first.
1551
*
1552
* Also, this should be called before the policy is registered
1553
* with cooling framework.
1554
*/
1555
if (cpufreq_driver->register_em)
1556
cpufreq_driver->register_em(policy);
1557
}
1558
1559
ret = cpufreq_init_policy(policy);
1560
if (ret) {
1561
pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1562
__func__, cpu, ret);
1563
goto out_destroy_policy;
1564
}
1565
1566
return 0;
1567
1568
out_destroy_policy:
1569
for_each_cpu(j, policy->real_cpus)
1570
remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
1571
1572
out_offline_policy:
1573
if (cpufreq_driver->offline)
1574
cpufreq_driver->offline(policy);
1575
1576
out_exit_policy:
1577
if (cpufreq_driver->exit)
1578
cpufreq_driver->exit(policy);
1579
1580
out_clear_policy:
1581
cpumask_clear(policy->cpus);
1582
1583
return ret;
1584
}
1585
1586
static int cpufreq_online(unsigned int cpu)
1587
{
1588
struct cpufreq_policy *policy;
1589
bool new_policy;
1590
int ret;
1591
1592
pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1593
1594
/* Check if this CPU already has a policy to manage it */
1595
policy = per_cpu(cpufreq_cpu_data, cpu);
1596
if (policy) {
1597
WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1598
if (!policy_is_inactive(policy))
1599
return cpufreq_add_policy_cpu(policy, cpu);
1600
1601
/* This is the only online CPU for the policy. Start over. */
1602
new_policy = false;
1603
} else {
1604
new_policy = true;
1605
policy = cpufreq_policy_alloc(cpu);
1606
if (!policy)
1607
return -ENOMEM;
1608
}
1609
1610
ret = cpufreq_policy_online(policy, cpu, new_policy);
1611
if (ret) {
1612
cpufreq_policy_free(policy);
1613
return ret;
1614
}
1615
1616
kobject_uevent(&policy->kobj, KOBJ_ADD);
1617
1618
/* Callback for handling stuff after policy is ready */
1619
if (cpufreq_driver->ready)
1620
cpufreq_driver->ready(policy);
1621
1622
/* Register cpufreq cooling only for a new policy */
1623
if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
1624
policy->cdev = of_cpufreq_cooling_register(policy);
1625
1626
/*
1627
* Let the per-policy boost flag mirror the cpufreq_driver boost during
1628
* initialization for a new policy. For an existing policy, maintain the
1629
* previous boost value unless global boost is disabled.
1630
*/
1631
if (cpufreq_driver->set_boost && policy->boost_supported &&
1632
(new_policy || !cpufreq_boost_enabled())) {
1633
ret = policy_set_boost(policy, cpufreq_boost_enabled());
1634
if (ret) {
1635
/* If the set_boost fails, the online operation is not affected */
1636
pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu,
1637
str_enable_disable(cpufreq_boost_enabled()));
1638
}
1639
}
1640
1641
pr_debug("initialization complete\n");
1642
1643
return 0;
1644
}
1645
1646
/**
1647
* cpufreq_add_dev - the cpufreq interface for a CPU device.
1648
* @dev: CPU device.
1649
* @sif: Subsystem interface structure pointer (not used)
1650
*/
1651
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1652
{
1653
struct cpufreq_policy *policy;
1654
unsigned cpu = dev->id;
1655
int ret;
1656
1657
dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1658
1659
if (cpu_online(cpu)) {
1660
ret = cpufreq_online(cpu);
1661
if (ret)
1662
return ret;
1663
}
1664
1665
/* Create sysfs link on CPU registration */
1666
policy = per_cpu(cpufreq_cpu_data, cpu);
1667
if (policy)
1668
add_cpu_dev_symlink(policy, cpu, dev);
1669
1670
return 0;
1671
}
1672
1673
static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
1674
{
1675
int ret;
1676
1677
if (has_target())
1678
cpufreq_stop_governor(policy);
1679
1680
cpumask_clear_cpu(cpu, policy->cpus);
1681
1682
if (!policy_is_inactive(policy)) {
1683
/* Nominate a new CPU if necessary. */
1684
if (cpu == policy->cpu)
1685
policy->cpu = cpumask_any(policy->cpus);
1686
1687
/* Start the governor again for the active policy. */
1688
if (has_target()) {
1689
ret = cpufreq_start_governor(policy);
1690
if (ret)
1691
pr_err("%s: Failed to start governor\n", __func__);
1692
}
1693
1694
return;
1695
}
1696
1697
if (has_target()) {
1698
strscpy(policy->last_governor, policy->governor->name,
1699
CPUFREQ_NAME_LEN);
1700
cpufreq_exit_governor(policy);
1701
} else {
1702
policy->last_policy = policy->policy;
1703
}
1704
1705
/*
1706
* Perform the ->offline() during light-weight tear-down, as
1707
* that allows fast recovery when the CPU comes back.
1708
*/
1709
if (cpufreq_driver->offline) {
1710
cpufreq_driver->offline(policy);
1711
return;
1712
}
1713
1714
if (cpufreq_driver->exit)
1715
cpufreq_driver->exit(policy);
1716
1717
policy->freq_table = NULL;
1718
}
1719
1720
static int cpufreq_offline(unsigned int cpu)
1721
{
1722
struct cpufreq_policy *policy;
1723
1724
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1725
1726
policy = cpufreq_cpu_get_raw(cpu);
1727
if (!policy) {
1728
pr_debug("%s: No cpu_data found\n", __func__);
1729
return 0;
1730
}
1731
1732
guard(cpufreq_policy_write)(policy);
1733
1734
__cpufreq_offline(cpu, policy);
1735
1736
return 0;
1737
}
1738
1739
/*
1740
* cpufreq_remove_dev - remove a CPU device
1741
*
1742
* Removes the cpufreq interface for a CPU device.
1743
*/
1744
static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1745
{
1746
unsigned int cpu = dev->id;
1747
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1748
1749
if (!policy)
1750
return;
1751
1752
scoped_guard(cpufreq_policy_write, policy) {
1753
if (cpu_online(cpu))
1754
__cpufreq_offline(cpu, policy);
1755
1756
remove_cpu_dev_symlink(policy, cpu, dev);
1757
1758
if (!cpumask_empty(policy->real_cpus))
1759
return;
1760
1761
/*
1762
* Unregister cpufreq cooling once all the CPUs of the policy
1763
* are removed.
1764
*/
1765
if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1766
cpufreq_cooling_unregister(policy->cdev);
1767
policy->cdev = NULL;
1768
}
1769
1770
/* We did light-weight exit earlier, do full tear down now */
1771
if (cpufreq_driver->offline && cpufreq_driver->exit)
1772
cpufreq_driver->exit(policy);
1773
}
1774
1775
cpufreq_policy_free(policy);
1776
}
1777
1778
/**
1779
* cpufreq_out_of_sync - Fix up actual and saved CPU frequency difference.
1780
* @policy: Policy managing CPUs.
1781
* @new_freq: New CPU frequency.
1782
*
1783
* Adjust to the current frequency first and clean up later by either calling
1784
* cpufreq_update_policy(), or scheduling handle_update().
1785
*/
1786
static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1787
unsigned int new_freq)
1788
{
1789
struct cpufreq_freqs freqs;
1790
1791
pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1792
policy->cur, new_freq);
1793
1794
freqs.old = policy->cur;
1795
freqs.new = new_freq;
1796
1797
cpufreq_freq_transition_begin(policy, &freqs);
1798
cpufreq_freq_transition_end(policy, &freqs, 0);
1799
}
1800
1801
static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1802
{
1803
unsigned int new_freq;
1804
1805
if (!cpufreq_driver->get)
1806
return 0;
1807
1808
new_freq = cpufreq_driver->get(policy->cpu);
1809
if (!new_freq)
1810
return 0;
1811
1812
/*
1813
* If fast frequency switching is used with the given policy, the check
1814
* against policy->cur is pointless, so skip it in that case.
1815
*/
1816
if (policy->fast_switch_enabled || !has_target())
1817
return new_freq;
1818
1819
if (policy->cur != new_freq) {
1820
/*
1821
* For some platforms, the frequency returned by hardware may be
1822
* slightly different from what is provided in the frequency
1823
* table, for example hardware may return 499 MHz instead of 500
1824
* MHz. In such cases it is better to avoid getting into
1825
* unnecessary frequency updates.
1826
*/
1827
if (abs(policy->cur - new_freq) < KHZ_PER_MHZ)
1828
return policy->cur;
1829
1830
cpufreq_out_of_sync(policy, new_freq);
1831
if (update)
1832
schedule_work(&policy->update);
1833
}
1834
1835
return new_freq;
1836
}
1837
1838
/**
1839
* cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1840
* @cpu: CPU number
1841
*
1842
* This is the last known freq, without actually getting it from the driver.
1843
* Return value will be same as what is shown in scaling_cur_freq in sysfs.
1844
*/
1845
unsigned int cpufreq_quick_get(unsigned int cpu)
1846
{
1847
struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
1848
unsigned long flags;
1849
1850
read_lock_irqsave(&cpufreq_driver_lock, flags);
1851
1852
if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1853
unsigned int ret_freq = cpufreq_driver->get(cpu);
1854
1855
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1856
1857
return ret_freq;
1858
}
1859
1860
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1861
1862
policy = cpufreq_cpu_get(cpu);
1863
if (policy)
1864
return policy->cur;
1865
1866
return 0;
1867
}
1868
EXPORT_SYMBOL(cpufreq_quick_get);
1869
1870
/**
1871
* cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1872
* @cpu: CPU number
1873
*
1874
* Just return the max possible frequency for a given CPU.
1875
*/
1876
unsigned int cpufreq_quick_get_max(unsigned int cpu)
1877
{
1878
struct cpufreq_policy *policy __free(put_cpufreq_policy);
1879
1880
policy = cpufreq_cpu_get(cpu);
1881
if (policy)
1882
return policy->max;
1883
1884
return 0;
1885
}
1886
EXPORT_SYMBOL(cpufreq_quick_get_max);
1887
1888
/**
1889
* cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
1890
* @cpu: CPU number
1891
*
1892
* The default return value is the max_freq field of cpuinfo.
1893
*/
1894
__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
1895
{
1896
struct cpufreq_policy *policy __free(put_cpufreq_policy);
1897
1898
policy = cpufreq_cpu_get(cpu);
1899
if (policy)
1900
return policy->cpuinfo.max_freq;
1901
1902
return 0;
1903
}
1904
EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
1905
1906
static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1907
{
1908
if (unlikely(policy_is_inactive(policy)))
1909
return 0;
1910
1911
return cpufreq_verify_current_freq(policy, true);
1912
}
1913
1914
/**
1915
* cpufreq_get - get the current CPU frequency (in kHz)
1916
* @cpu: CPU number
1917
*
1918
* Get the CPU current (static) CPU frequency
1919
*/
1920
unsigned int cpufreq_get(unsigned int cpu)
1921
{
1922
struct cpufreq_policy *policy __free(put_cpufreq_policy);
1923
1924
policy = cpufreq_cpu_get(cpu);
1925
if (!policy)
1926
return 0;
1927
1928
guard(cpufreq_policy_read)(policy);
1929
1930
return __cpufreq_get(policy);
1931
}
1932
EXPORT_SYMBOL(cpufreq_get);
1933
1934
static struct subsys_interface cpufreq_interface = {
1935
.name = "cpufreq",
1936
.subsys = &cpu_subsys,
1937
.add_dev = cpufreq_add_dev,
1938
.remove_dev = cpufreq_remove_dev,
1939
};
1940
1941
/*
1942
* In case platform wants some specific frequency to be configured
1943
* during suspend..
1944
*/
1945
int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1946
{
1947
int ret;
1948
1949
if (!policy->suspend_freq) {
1950
pr_debug("%s: suspend_freq not defined\n", __func__);
1951
return 0;
1952
}
1953
1954
pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1955
policy->suspend_freq);
1956
1957
ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1958
CPUFREQ_RELATION_H);
1959
if (ret)
1960
pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1961
__func__, policy->suspend_freq, ret);
1962
1963
return ret;
1964
}
1965
EXPORT_SYMBOL(cpufreq_generic_suspend);
1966
1967
/**
1968
* cpufreq_suspend() - Suspend CPUFreq governors.
1969
*
1970
* Called during system wide Suspend/Hibernate cycles for suspending governors
1971
* as some platforms can't change frequency after this point in suspend cycle.
1972
* Because some of the devices (like: i2c, regulators, etc) they use for
1973
* changing frequency are suspended quickly after this point.
1974
*/
1975
void cpufreq_suspend(void)
1976
{
1977
struct cpufreq_policy *policy;
1978
1979
if (!cpufreq_driver)
1980
return;
1981
1982
if (!has_target() && !cpufreq_driver->suspend)
1983
goto suspend;
1984
1985
pr_debug("%s: Suspending Governors\n", __func__);
1986
1987
for_each_active_policy(policy) {
1988
if (has_target()) {
1989
scoped_guard(cpufreq_policy_write, policy) {
1990
cpufreq_stop_governor(policy);
1991
}
1992
}
1993
1994
if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1995
pr_err("%s: Failed to suspend driver: %s\n", __func__,
1996
cpufreq_driver->name);
1997
}
1998
1999
suspend:
2000
cpufreq_suspended = true;
2001
}
2002
2003
/**
2004
* cpufreq_resume() - Resume CPUFreq governors.
2005
*
2006
* Called during system wide Suspend/Hibernate cycle for resuming governors that
2007
* are suspended with cpufreq_suspend().
2008
*/
2009
void cpufreq_resume(void)
2010
{
2011
struct cpufreq_policy *policy;
2012
int ret;
2013
2014
if (!cpufreq_driver)
2015
return;
2016
2017
if (unlikely(!cpufreq_suspended))
2018
return;
2019
2020
cpufreq_suspended = false;
2021
2022
if (!has_target() && !cpufreq_driver->resume)
2023
return;
2024
2025
pr_debug("%s: Resuming Governors\n", __func__);
2026
2027
for_each_active_policy(policy) {
2028
if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
2029
pr_err("%s: Failed to resume driver: %s\n", __func__,
2030
cpufreq_driver->name);
2031
} else if (has_target()) {
2032
scoped_guard(cpufreq_policy_write, policy) {
2033
ret = cpufreq_start_governor(policy);
2034
}
2035
2036
if (ret)
2037
pr_err("%s: Failed to start governor for CPU%u's policy\n",
2038
__func__, policy->cpu);
2039
}
2040
}
2041
}
2042
2043
/**
2044
* cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
2045
* @flags: Flags to test against the current cpufreq driver's flags.
2046
*
2047
* Assumes that the driver is there, so callers must ensure that this is the
2048
* case.
2049
*/
2050
bool cpufreq_driver_test_flags(u16 flags)
2051
{
2052
return !!(cpufreq_driver->flags & flags);
2053
}
2054
2055
/**
2056
* cpufreq_get_current_driver - Return the current driver's name.
2057
*
2058
* Return the name string of the currently registered cpufreq driver or NULL if
2059
* none.
2060
*/
2061
const char *cpufreq_get_current_driver(void)
2062
{
2063
if (cpufreq_driver)
2064
return cpufreq_driver->name;
2065
2066
return NULL;
2067
}
2068
EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
2069
2070
/**
2071
* cpufreq_get_driver_data - Return current driver data.
2072
*
2073
* Return the private data of the currently registered cpufreq driver, or NULL
2074
* if no cpufreq driver has been registered.
2075
*/
2076
void *cpufreq_get_driver_data(void)
2077
{
2078
if (cpufreq_driver)
2079
return cpufreq_driver->driver_data;
2080
2081
return NULL;
2082
}
2083
EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
2084
2085
/*********************************************************************
2086
* NOTIFIER LISTS INTERFACE *
2087
*********************************************************************/
2088
2089
/**
2090
* cpufreq_register_notifier - Register a notifier with cpufreq.
2091
* @nb: notifier function to register.
2092
* @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2093
*
2094
* Add a notifier to one of two lists: either a list of notifiers that run on
2095
* clock rate changes (once before and once after every transition), or a list
2096
* of notifiers that ron on cpufreq policy changes.
2097
*
2098
* This function may sleep and it has the same return values as
2099
* blocking_notifier_chain_register().
2100
*/
2101
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
2102
{
2103
int ret;
2104
2105
if (cpufreq_disabled())
2106
return -EINVAL;
2107
2108
switch (list) {
2109
case CPUFREQ_TRANSITION_NOTIFIER:
2110
mutex_lock(&cpufreq_fast_switch_lock);
2111
2112
if (cpufreq_fast_switch_count > 0) {
2113
mutex_unlock(&cpufreq_fast_switch_lock);
2114
return -EBUSY;
2115
}
2116
ret = srcu_notifier_chain_register(
2117
&cpufreq_transition_notifier_list, nb);
2118
if (!ret)
2119
cpufreq_fast_switch_count--;
2120
2121
mutex_unlock(&cpufreq_fast_switch_lock);
2122
break;
2123
case CPUFREQ_POLICY_NOTIFIER:
2124
ret = blocking_notifier_chain_register(
2125
&cpufreq_policy_notifier_list, nb);
2126
break;
2127
default:
2128
ret = -EINVAL;
2129
}
2130
2131
return ret;
2132
}
2133
EXPORT_SYMBOL(cpufreq_register_notifier);
2134
2135
/**
2136
* cpufreq_unregister_notifier - Unregister a notifier from cpufreq.
2137
* @nb: notifier block to be unregistered.
2138
* @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2139
*
2140
* Remove a notifier from one of the cpufreq notifier lists.
2141
*
2142
* This function may sleep and it has the same return values as
2143
* blocking_notifier_chain_unregister().
2144
*/
2145
int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
2146
{
2147
int ret;
2148
2149
if (cpufreq_disabled())
2150
return -EINVAL;
2151
2152
switch (list) {
2153
case CPUFREQ_TRANSITION_NOTIFIER:
2154
mutex_lock(&cpufreq_fast_switch_lock);
2155
2156
ret = srcu_notifier_chain_unregister(
2157
&cpufreq_transition_notifier_list, nb);
2158
if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
2159
cpufreq_fast_switch_count++;
2160
2161
mutex_unlock(&cpufreq_fast_switch_lock);
2162
break;
2163
case CPUFREQ_POLICY_NOTIFIER:
2164
ret = blocking_notifier_chain_unregister(
2165
&cpufreq_policy_notifier_list, nb);
2166
break;
2167
default:
2168
ret = -EINVAL;
2169
}
2170
2171
return ret;
2172
}
2173
EXPORT_SYMBOL(cpufreq_unregister_notifier);
2174
2175
2176
/*********************************************************************
2177
* GOVERNORS *
2178
*********************************************************************/
2179
2180
/**
2181
* cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
2182
* @policy: cpufreq policy to switch the frequency for.
2183
* @target_freq: New frequency to set (may be approximate).
2184
*
2185
* Carry out a fast frequency switch without sleeping.
2186
*
2187
* The driver's ->fast_switch() callback invoked by this function must be
2188
* suitable for being called from within RCU-sched read-side critical sections
2189
* and it is expected to select the minimum available frequency greater than or
2190
* equal to @target_freq (CPUFREQ_RELATION_L).
2191
*
2192
* This function must not be called if policy->fast_switch_enabled is unset.
2193
*
2194
* Governors calling this function must guarantee that it will never be invoked
2195
* twice in parallel for the same policy and that it will never be called in
2196
* parallel with either ->target() or ->target_index() for the same policy.
2197
*
2198
* Returns the actual frequency set for the CPU.
2199
*
2200
* If 0 is returned by the driver's ->fast_switch() callback to indicate an
2201
* error condition, the hardware configuration must be preserved.
2202
*/
2203
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2204
unsigned int target_freq)
2205
{
2206
unsigned int freq;
2207
int cpu;
2208
2209
target_freq = clamp_val(target_freq, policy->min, policy->max);
2210
freq = cpufreq_driver->fast_switch(policy, target_freq);
2211
2212
if (!freq)
2213
return 0;
2214
2215
policy->cur = freq;
2216
arch_set_freq_scale(policy->related_cpus, freq,
2217
arch_scale_freq_ref(policy->cpu));
2218
cpufreq_stats_record_transition(policy, freq);
2219
2220
if (trace_cpu_frequency_enabled()) {
2221
for_each_cpu(cpu, policy->cpus)
2222
trace_cpu_frequency(freq, cpu);
2223
}
2224
2225
return freq;
2226
}
2227
EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2228
2229
/**
2230
* cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
2231
* @cpu: Target CPU.
2232
* @min_perf: Minimum (required) performance level (units of @capacity).
2233
* @target_perf: Target (desired) performance level (units of @capacity).
2234
* @capacity: Capacity of the target CPU.
2235
*
2236
* Carry out a fast performance level switch of @cpu without sleeping.
2237
*
2238
* The driver's ->adjust_perf() callback invoked by this function must be
2239
* suitable for being called from within RCU-sched read-side critical sections
2240
* and it is expected to select a suitable performance level equal to or above
2241
* @min_perf and preferably equal to or below @target_perf.
2242
*
2243
* This function must not be called if policy->fast_switch_enabled is unset.
2244
*
2245
* Governors calling this function must guarantee that it will never be invoked
2246
* twice in parallel for the same CPU and that it will never be called in
2247
* parallel with either ->target() or ->target_index() or ->fast_switch() for
2248
* the same CPU.
2249
*/
2250
void cpufreq_driver_adjust_perf(unsigned int cpu,
2251
unsigned long min_perf,
2252
unsigned long target_perf,
2253
unsigned long capacity)
2254
{
2255
cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
2256
}
2257
2258
/**
2259
* cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback.
2260
*
2261
* Return 'true' if the ->adjust_perf callback is present for the
2262
* current driver or 'false' otherwise.
2263
*/
2264
bool cpufreq_driver_has_adjust_perf(void)
2265
{
2266
return !!cpufreq_driver->adjust_perf;
2267
}
2268
2269
/* Must set freqs->new to intermediate frequency */
2270
static int __target_intermediate(struct cpufreq_policy *policy,
2271
struct cpufreq_freqs *freqs, int index)
2272
{
2273
int ret;
2274
2275
freqs->new = cpufreq_driver->get_intermediate(policy, index);
2276
2277
/* We don't need to switch to intermediate freq */
2278
if (!freqs->new)
2279
return 0;
2280
2281
pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2282
__func__, policy->cpu, freqs->old, freqs->new);
2283
2284
cpufreq_freq_transition_begin(policy, freqs);
2285
ret = cpufreq_driver->target_intermediate(policy, index);
2286
cpufreq_freq_transition_end(policy, freqs, ret);
2287
2288
if (ret)
2289
pr_err("%s: Failed to change to intermediate frequency: %d\n",
2290
__func__, ret);
2291
2292
return ret;
2293
}
2294
2295
static int __target_index(struct cpufreq_policy *policy, int index)
2296
{
2297
struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2298
unsigned int restore_freq, intermediate_freq = 0;
2299
unsigned int newfreq = policy->freq_table[index].frequency;
2300
int retval = -EINVAL;
2301
bool notify;
2302
2303
if (newfreq == policy->cur)
2304
return 0;
2305
2306
/* Save last value to restore later on errors */
2307
restore_freq = policy->cur;
2308
2309
notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2310
if (notify) {
2311
/* Handle switching to intermediate frequency */
2312
if (cpufreq_driver->get_intermediate) {
2313
retval = __target_intermediate(policy, &freqs, index);
2314
if (retval)
2315
return retval;
2316
2317
intermediate_freq = freqs.new;
2318
/* Set old freq to intermediate */
2319
if (intermediate_freq)
2320
freqs.old = freqs.new;
2321
}
2322
2323
freqs.new = newfreq;
2324
pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2325
__func__, policy->cpu, freqs.old, freqs.new);
2326
2327
cpufreq_freq_transition_begin(policy, &freqs);
2328
}
2329
2330
retval = cpufreq_driver->target_index(policy, index);
2331
if (retval)
2332
pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2333
retval);
2334
2335
if (notify) {
2336
cpufreq_freq_transition_end(policy, &freqs, retval);
2337
2338
/*
2339
* Failed after setting to intermediate freq? Driver should have
2340
* reverted back to initial frequency and so should we. Check
2341
* here for intermediate_freq instead of get_intermediate, in
2342
* case we haven't switched to intermediate freq at all.
2343
*/
2344
if (unlikely(retval && intermediate_freq)) {
2345
freqs.old = intermediate_freq;
2346
freqs.new = restore_freq;
2347
cpufreq_freq_transition_begin(policy, &freqs);
2348
cpufreq_freq_transition_end(policy, &freqs, 0);
2349
}
2350
}
2351
2352
return retval;
2353
}
2354
2355
int __cpufreq_driver_target(struct cpufreq_policy *policy,
2356
unsigned int target_freq,
2357
unsigned int relation)
2358
{
2359
unsigned int old_target_freq = target_freq;
2360
2361
if (cpufreq_disabled())
2362
return -ENODEV;
2363
2364
target_freq = __resolve_freq(policy, target_freq, policy->min,
2365
policy->max, relation);
2366
2367
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2368
policy->cpu, target_freq, relation, old_target_freq);
2369
2370
/*
2371
* This might look like a redundant call as we are checking it again
2372
* after finding index. But it is left intentionally for cases where
2373
* exactly same freq is called again and so we can save on few function
2374
* calls.
2375
*/
2376
if (target_freq == policy->cur &&
2377
!(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
2378
return 0;
2379
2380
if (cpufreq_driver->target) {
2381
/*
2382
* If the driver hasn't setup a single inefficient frequency,
2383
* it's unlikely it knows how to decode CPUFREQ_RELATION_E.
2384
*/
2385
if (!policy->efficiencies_available)
2386
relation &= ~CPUFREQ_RELATION_E;
2387
2388
return cpufreq_driver->target(policy, target_freq, relation);
2389
}
2390
2391
if (!cpufreq_driver->target_index)
2392
return -EINVAL;
2393
2394
return __target_index(policy, policy->cached_resolved_idx);
2395
}
2396
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2397
2398
int cpufreq_driver_target(struct cpufreq_policy *policy,
2399
unsigned int target_freq,
2400
unsigned int relation)
2401
{
2402
guard(cpufreq_policy_write)(policy);
2403
2404
return __cpufreq_driver_target(policy, target_freq, relation);
2405
}
2406
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2407
2408
__weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2409
{
2410
return NULL;
2411
}
2412
2413
static int cpufreq_init_governor(struct cpufreq_policy *policy)
2414
{
2415
int ret;
2416
2417
/* Don't start any governor operations if we are entering suspend */
2418
if (cpufreq_suspended)
2419
return 0;
2420
/*
2421
* Governor might not be initiated here if ACPI _PPC changed
2422
* notification happened, so check it.
2423
*/
2424
if (!policy->governor)
2425
return -EINVAL;
2426
2427
/* Platform doesn't want dynamic frequency switching ? */
2428
if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
2429
cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2430
struct cpufreq_governor *gov = cpufreq_fallback_governor();
2431
2432
if (gov) {
2433
pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2434
policy->governor->name, gov->name);
2435
policy->governor = gov;
2436
} else {
2437
return -EINVAL;
2438
}
2439
}
2440
2441
if (!try_module_get(policy->governor->owner))
2442
return -EINVAL;
2443
2444
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2445
2446
if (policy->governor->init) {
2447
ret = policy->governor->init(policy);
2448
if (ret) {
2449
module_put(policy->governor->owner);
2450
return ret;
2451
}
2452
}
2453
2454
policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
2455
2456
return 0;
2457
}
2458
2459
static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2460
{
2461
if (cpufreq_suspended || !policy->governor)
2462
return;
2463
2464
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2465
2466
if (policy->governor->exit)
2467
policy->governor->exit(policy);
2468
2469
module_put(policy->governor->owner);
2470
}
2471
2472
int cpufreq_start_governor(struct cpufreq_policy *policy)
2473
{
2474
int ret;
2475
2476
if (cpufreq_suspended)
2477
return 0;
2478
2479
if (!policy->governor)
2480
return -EINVAL;
2481
2482
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2483
2484
cpufreq_verify_current_freq(policy, false);
2485
2486
if (policy->governor->start) {
2487
ret = policy->governor->start(policy);
2488
if (ret)
2489
return ret;
2490
}
2491
2492
if (policy->governor->limits)
2493
policy->governor->limits(policy);
2494
2495
return 0;
2496
}
2497
2498
void cpufreq_stop_governor(struct cpufreq_policy *policy)
2499
{
2500
if (cpufreq_suspended || !policy->governor)
2501
return;
2502
2503
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2504
2505
if (policy->governor->stop)
2506
policy->governor->stop(policy);
2507
}
2508
2509
static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2510
{
2511
if (cpufreq_suspended || !policy->governor)
2512
return;
2513
2514
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2515
2516
if (policy->governor->limits)
2517
policy->governor->limits(policy);
2518
}
2519
2520
int cpufreq_register_governor(struct cpufreq_governor *governor)
2521
{
2522
int err;
2523
2524
if (!governor)
2525
return -EINVAL;
2526
2527
if (cpufreq_disabled())
2528
return -ENODEV;
2529
2530
mutex_lock(&cpufreq_governor_mutex);
2531
2532
err = -EBUSY;
2533
if (!find_governor(governor->name)) {
2534
err = 0;
2535
list_add(&governor->governor_list, &cpufreq_governor_list);
2536
}
2537
2538
mutex_unlock(&cpufreq_governor_mutex);
2539
return err;
2540
}
2541
EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2542
2543
void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2544
{
2545
struct cpufreq_policy *policy;
2546
unsigned long flags;
2547
2548
if (!governor)
2549
return;
2550
2551
if (cpufreq_disabled())
2552
return;
2553
2554
/* clear last_governor for all inactive policies */
2555
read_lock_irqsave(&cpufreq_driver_lock, flags);
2556
for_each_inactive_policy(policy) {
2557
if (!strcmp(policy->last_governor, governor->name)) {
2558
policy->governor = NULL;
2559
strcpy(policy->last_governor, "\0");
2560
}
2561
}
2562
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2563
2564
mutex_lock(&cpufreq_governor_mutex);
2565
list_del(&governor->governor_list);
2566
mutex_unlock(&cpufreq_governor_mutex);
2567
}
2568
EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2569
2570
2571
/*********************************************************************
2572
* POLICY INTERFACE *
2573
*********************************************************************/
2574
2575
DEFINE_PER_CPU(unsigned long, cpufreq_pressure);
2576
2577
/**
2578
* cpufreq_update_pressure() - Update cpufreq pressure for CPUs
2579
* @policy: cpufreq policy of the CPUs.
2580
*
2581
* Update the value of cpufreq pressure for all @cpus in the policy.
2582
*/
2583
static void cpufreq_update_pressure(struct cpufreq_policy *policy)
2584
{
2585
unsigned long max_capacity, capped_freq, pressure;
2586
u32 max_freq;
2587
int cpu;
2588
2589
cpu = cpumask_first(policy->related_cpus);
2590
max_freq = arch_scale_freq_ref(cpu);
2591
capped_freq = policy->max;
2592
2593
/*
2594
* Handle properly the boost frequencies, which should simply clean
2595
* the cpufreq pressure value.
2596
*/
2597
if (max_freq <= capped_freq) {
2598
pressure = 0;
2599
} else {
2600
max_capacity = arch_scale_cpu_capacity(cpu);
2601
pressure = max_capacity -
2602
mult_frac(max_capacity, capped_freq, max_freq);
2603
}
2604
2605
for_each_cpu(cpu, policy->related_cpus)
2606
WRITE_ONCE(per_cpu(cpufreq_pressure, cpu), pressure);
2607
}
2608
2609
/**
2610
* cpufreq_set_policy - Modify cpufreq policy parameters.
2611
* @policy: Policy object to modify.
2612
* @new_gov: Policy governor pointer.
2613
* @new_pol: Policy value (for drivers with built-in governors).
2614
*
2615
* Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
2616
* limits to be set for the policy, update @policy with the verified limits
2617
* values and either invoke the driver's ->setpolicy() callback (if present) or
2618
* carry out a governor update for @policy. That is, run the current governor's
2619
* ->limits() callback (if @new_gov points to the same object as the one in
2620
* @policy) or replace the governor for @policy with @new_gov.
2621
*
2622
* The cpuinfo part of @policy is not updated by this function.
2623
*/
2624
static int cpufreq_set_policy(struct cpufreq_policy *policy,
2625
struct cpufreq_governor *new_gov,
2626
unsigned int new_pol)
2627
{
2628
struct cpufreq_policy_data new_data;
2629
struct cpufreq_governor *old_gov;
2630
int ret;
2631
2632
memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2633
new_data.freq_table = policy->freq_table;
2634
new_data.cpu = policy->cpu;
2635
/*
2636
* PM QoS framework collects all the requests from users and provide us
2637
* the final aggregated value here.
2638
*/
2639
new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2640
new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2641
2642
pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2643
new_data.cpu, new_data.min, new_data.max);
2644
2645
/*
2646
* Verify that the CPU speed can be set within these limits and make sure
2647
* that min <= max.
2648
*/
2649
ret = cpufreq_driver->verify(&new_data);
2650
if (ret)
2651
return ret;
2652
2653
/*
2654
* Resolve policy min/max to available frequencies. It ensures
2655
* no frequency resolution will neither overshoot the requested maximum
2656
* nor undershoot the requested minimum.
2657
*
2658
* Avoid storing intermediate values in policy->max or policy->min and
2659
* compiler optimizations around them because they may be accessed
2660
* concurrently by cpufreq_driver_resolve_freq() during the update.
2661
*/
2662
WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max,
2663
new_data.min, new_data.max,
2664
CPUFREQ_RELATION_H));
2665
new_data.min = __resolve_freq(policy, new_data.min, new_data.min,
2666
new_data.max, CPUFREQ_RELATION_L);
2667
WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
2668
2669
trace_cpu_frequency_limits(policy);
2670
2671
cpufreq_update_pressure(policy);
2672
2673
policy->cached_target_freq = UINT_MAX;
2674
2675
pr_debug("new min and max freqs are %u - %u kHz\n",
2676
policy->min, policy->max);
2677
2678
if (cpufreq_driver->setpolicy) {
2679
policy->policy = new_pol;
2680
pr_debug("setting range\n");
2681
return cpufreq_driver->setpolicy(policy);
2682
}
2683
2684
if (new_gov == policy->governor) {
2685
pr_debug("governor limits update\n");
2686
cpufreq_governor_limits(policy);
2687
return 0;
2688
}
2689
2690
pr_debug("governor switch\n");
2691
2692
/* save old, working values */
2693
old_gov = policy->governor;
2694
/* end old governor */
2695
if (old_gov) {
2696
cpufreq_stop_governor(policy);
2697
cpufreq_exit_governor(policy);
2698
}
2699
2700
/* start new governor */
2701
policy->governor = new_gov;
2702
ret = cpufreq_init_governor(policy);
2703
if (!ret) {
2704
ret = cpufreq_start_governor(policy);
2705
if (!ret) {
2706
pr_debug("governor change\n");
2707
return 0;
2708
}
2709
cpufreq_exit_governor(policy);
2710
}
2711
2712
/* new governor failed, so re-start old one */
2713
pr_debug("starting governor %s failed\n", policy->governor->name);
2714
if (old_gov) {
2715
policy->governor = old_gov;
2716
if (cpufreq_init_governor(policy)) {
2717
policy->governor = NULL;
2718
} else if (cpufreq_start_governor(policy)) {
2719
cpufreq_exit_governor(policy);
2720
policy->governor = NULL;
2721
}
2722
}
2723
2724
return ret;
2725
}
2726
2727
static void cpufreq_policy_refresh(struct cpufreq_policy *policy)
2728
{
2729
guard(cpufreq_policy_write)(policy);
2730
2731
/*
2732
* BIOS might change freq behind our back
2733
* -> ask driver for current freq and notify governors about a change
2734
*/
2735
if (cpufreq_driver->get && has_target() &&
2736
(cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2737
return;
2738
2739
refresh_frequency_limits(policy);
2740
}
2741
2742
/**
2743
* cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2744
* @cpu: CPU to re-evaluate the policy for.
2745
*
2746
* Update the current frequency for the cpufreq policy of @cpu and use
2747
* cpufreq_set_policy() to re-apply the min and max limits, which triggers the
2748
* evaluation of policy notifiers and the cpufreq driver's ->verify() callback
2749
* for the policy in question, among other things.
2750
*/
2751
void cpufreq_update_policy(unsigned int cpu)
2752
{
2753
struct cpufreq_policy *policy __free(put_cpufreq_policy);
2754
2755
policy = cpufreq_cpu_get(cpu);
2756
if (!policy)
2757
return;
2758
2759
cpufreq_policy_refresh(policy);
2760
}
2761
EXPORT_SYMBOL(cpufreq_update_policy);
2762
2763
/**
2764
* cpufreq_update_limits - Update policy limits for a given CPU.
2765
* @cpu: CPU to update the policy limits for.
2766
*
2767
* Invoke the driver's ->update_limits callback if present or call
2768
* cpufreq_policy_refresh() for @cpu.
2769
*/
2770
void cpufreq_update_limits(unsigned int cpu)
2771
{
2772
struct cpufreq_policy *policy __free(put_cpufreq_policy);
2773
2774
policy = cpufreq_cpu_get(cpu);
2775
if (!policy)
2776
return;
2777
2778
if (cpufreq_driver->update_limits)
2779
cpufreq_driver->update_limits(policy);
2780
else
2781
cpufreq_policy_refresh(policy);
2782
}
2783
EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2784
2785
/*********************************************************************
2786
* BOOST *
2787
*********************************************************************/
2788
int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
2789
{
2790
int ret;
2791
2792
if (!policy->freq_table)
2793
return -ENXIO;
2794
2795
ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
2796
if (ret) {
2797
pr_err("%s: Policy frequency update failed\n", __func__);
2798
return ret;
2799
}
2800
2801
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2802
if (ret < 0)
2803
return ret;
2804
2805
return 0;
2806
}
2807
EXPORT_SYMBOL_GPL(cpufreq_boost_set_sw);
2808
2809
static int cpufreq_boost_trigger_state(int state)
2810
{
2811
struct cpufreq_policy *policy;
2812
unsigned long flags;
2813
int ret = 0;
2814
2815
/*
2816
* Don't compare 'cpufreq_driver->boost_enabled' with 'state' here to
2817
* make sure all policies are in sync with global boost flag.
2818
*/
2819
2820
write_lock_irqsave(&cpufreq_driver_lock, flags);
2821
cpufreq_driver->boost_enabled = state;
2822
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2823
2824
cpus_read_lock();
2825
for_each_active_policy(policy) {
2826
if (!policy->boost_supported)
2827
continue;
2828
2829
ret = policy_set_boost(policy, state);
2830
if (ret)
2831
goto err_reset_state;
2832
}
2833
cpus_read_unlock();
2834
2835
return 0;
2836
2837
err_reset_state:
2838
cpus_read_unlock();
2839
2840
write_lock_irqsave(&cpufreq_driver_lock, flags);
2841
cpufreq_driver->boost_enabled = !state;
2842
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2843
2844
pr_err("%s: Cannot %s BOOST\n",
2845
__func__, str_enable_disable(state));
2846
2847
return ret;
2848
}
2849
2850
static bool cpufreq_boost_supported(void)
2851
{
2852
return cpufreq_driver->set_boost;
2853
}
2854
2855
static int create_boost_sysfs_file(void)
2856
{
2857
int ret;
2858
2859
ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2860
if (ret)
2861
pr_err("%s: cannot register global BOOST sysfs file\n",
2862
__func__);
2863
2864
return ret;
2865
}
2866
2867
static void remove_boost_sysfs_file(void)
2868
{
2869
if (cpufreq_boost_supported())
2870
sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2871
}
2872
2873
bool cpufreq_boost_enabled(void)
2874
{
2875
return cpufreq_driver->boost_enabled;
2876
}
2877
EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2878
2879
/*********************************************************************
2880
* REGISTER / UNREGISTER CPUFREQ DRIVER *
2881
*********************************************************************/
2882
static enum cpuhp_state hp_online;
2883
2884
static int cpuhp_cpufreq_online(unsigned int cpu)
2885
{
2886
cpufreq_online(cpu);
2887
2888
return 0;
2889
}
2890
2891
static int cpuhp_cpufreq_offline(unsigned int cpu)
2892
{
2893
cpufreq_offline(cpu);
2894
2895
return 0;
2896
}
2897
2898
/**
2899
* cpufreq_register_driver - register a CPU Frequency driver
2900
* @driver_data: A struct cpufreq_driver containing the values#
2901
* submitted by the CPU Frequency driver.
2902
*
2903
* Registers a CPU Frequency driver to this core code. This code
2904
* returns zero on success, -EEXIST when another driver got here first
2905
* (and isn't unregistered in the meantime).
2906
*
2907
*/
2908
int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2909
{
2910
unsigned long flags;
2911
int ret;
2912
2913
if (cpufreq_disabled())
2914
return -ENODEV;
2915
2916
/*
2917
* The cpufreq core depends heavily on the availability of device
2918
* structure, make sure they are available before proceeding further.
2919
*/
2920
if (!get_cpu_device(0))
2921
return -EPROBE_DEFER;
2922
2923
if (!driver_data || !driver_data->verify || !driver_data->init ||
2924
!(driver_data->setpolicy || driver_data->target_index ||
2925
driver_data->target) ||
2926
(driver_data->setpolicy && (driver_data->target_index ||
2927
driver_data->target)) ||
2928
(!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2929
(!driver_data->online != !driver_data->offline) ||
2930
(driver_data->adjust_perf && !driver_data->fast_switch))
2931
return -EINVAL;
2932
2933
pr_debug("trying to register driver %s\n", driver_data->name);
2934
2935
/* Protect against concurrent CPU online/offline. */
2936
cpus_read_lock();
2937
2938
write_lock_irqsave(&cpufreq_driver_lock, flags);
2939
if (cpufreq_driver) {
2940
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2941
ret = -EEXIST;
2942
goto out;
2943
}
2944
cpufreq_driver = driver_data;
2945
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2946
2947
if (driver_data->setpolicy)
2948
driver_data->flags |= CPUFREQ_CONST_LOOPS;
2949
2950
if (cpufreq_boost_supported()) {
2951
ret = create_boost_sysfs_file();
2952
if (ret)
2953
goto err_null_driver;
2954
}
2955
2956
ret = subsys_interface_register(&cpufreq_interface);
2957
if (ret)
2958
goto err_boost_unreg;
2959
2960
if (unlikely(list_empty(&cpufreq_policy_list))) {
2961
/* if all ->init() calls failed, unregister */
2962
ret = -ENODEV;
2963
pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2964
driver_data->name);
2965
goto err_if_unreg;
2966
}
2967
2968
ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2969
"cpufreq:online",
2970
cpuhp_cpufreq_online,
2971
cpuhp_cpufreq_offline);
2972
if (ret < 0)
2973
goto err_if_unreg;
2974
hp_online = ret;
2975
ret = 0;
2976
2977
/*
2978
* Mark support for the scheduler's frequency invariance engine for
2979
* drivers that implement target(), target_index() or fast_switch().
2980
*/
2981
if (!cpufreq_driver->setpolicy) {
2982
static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
2983
pr_debug("supports frequency invariance");
2984
}
2985
2986
pr_debug("driver %s up and running\n", driver_data->name);
2987
goto out;
2988
2989
err_if_unreg:
2990
subsys_interface_unregister(&cpufreq_interface);
2991
err_boost_unreg:
2992
remove_boost_sysfs_file();
2993
err_null_driver:
2994
write_lock_irqsave(&cpufreq_driver_lock, flags);
2995
cpufreq_driver = NULL;
2996
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2997
out:
2998
cpus_read_unlock();
2999
return ret;
3000
}
3001
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
3002
3003
/*
3004
* cpufreq_unregister_driver - unregister the current CPUFreq driver
3005
*
3006
* Unregister the current CPUFreq driver. Only call this if you have
3007
* the right to do so, i.e. if you have succeeded in initialising before!
3008
* Returns zero if successful, and -EINVAL if the cpufreq_driver is
3009
* currently not initialised.
3010
*/
3011
void cpufreq_unregister_driver(struct cpufreq_driver *driver)
3012
{
3013
unsigned long flags;
3014
3015
if (WARN_ON(!cpufreq_driver || (driver != cpufreq_driver)))
3016
return;
3017
3018
pr_debug("unregistering driver %s\n", driver->name);
3019
3020
/* Protect against concurrent cpu hotplug */
3021
cpus_read_lock();
3022
subsys_interface_unregister(&cpufreq_interface);
3023
remove_boost_sysfs_file();
3024
static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
3025
cpuhp_remove_state_nocalls_cpuslocked(hp_online);
3026
3027
write_lock_irqsave(&cpufreq_driver_lock, flags);
3028
3029
cpufreq_driver = NULL;
3030
3031
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
3032
cpus_read_unlock();
3033
}
3034
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
3035
3036
static int __init cpufreq_core_init(void)
3037
{
3038
struct cpufreq_governor *gov = cpufreq_default_governor();
3039
struct device *dev_root;
3040
3041
if (cpufreq_disabled())
3042
return -ENODEV;
3043
3044
dev_root = bus_get_dev_root(&cpu_subsys);
3045
if (dev_root) {
3046
cpufreq_global_kobject = kobject_create_and_add("cpufreq", &dev_root->kobj);
3047
put_device(dev_root);
3048
}
3049
BUG_ON(!cpufreq_global_kobject);
3050
3051
if (!strlen(default_governor))
3052
strscpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
3053
3054
return 0;
3055
}
3056
3057
static bool cpufreq_policy_is_good_for_eas(unsigned int cpu)
3058
{
3059
struct cpufreq_policy *policy __free(put_cpufreq_policy);
3060
3061
policy = cpufreq_cpu_get(cpu);
3062
if (!policy) {
3063
pr_debug("cpufreq policy not set for CPU: %d\n", cpu);
3064
return false;
3065
}
3066
3067
return sugov_is_governor(policy);
3068
}
3069
3070
bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask)
3071
{
3072
unsigned int cpu;
3073
3074
/* Do not attempt EAS if schedutil is not being used. */
3075
for_each_cpu(cpu, cpu_mask) {
3076
if (!cpufreq_policy_is_good_for_eas(cpu)) {
3077
pr_debug("rd %*pbl: schedutil is mandatory for EAS\n",
3078
cpumask_pr_args(cpu_mask));
3079
return false;
3080
}
3081
}
3082
3083
return true;
3084
}
3085
3086
module_param(off, int, 0444);
3087
module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
3088
core_initcall(cpufreq_core_init);
3089
3090