Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/kern/kern_cpu.c
39475 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2004-2007 Nate Lawson (SDG)
5
* All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
*
16
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26
* SUCH DAMAGE.
27
*/
28
29
#include <sys/param.h>
30
#include <sys/bus.h>
31
#include <sys/cpu.h>
32
#include <sys/eventhandler.h>
33
#include <sys/kernel.h>
34
#include <sys/lock.h>
35
#include <sys/malloc.h>
36
#include <sys/module.h>
37
#include <sys/proc.h>
38
#include <sys/queue.h>
39
#include <sys/sbuf.h>
40
#include <sys/sched.h>
41
#include <sys/smp.h>
42
#include <sys/sysctl.h>
43
#include <sys/systm.h>
44
#include <sys/sx.h>
45
#include <sys/timetc.h>
46
#include <sys/taskqueue.h>
47
48
#include "cpufreq_if.h"
49
50
/*
51
* Common CPU frequency glue code. Drivers for specific hardware can
52
* attach this interface to allow users to get/set the CPU frequency.
53
*/
54
55
/*
56
* Number of levels we can handle. Levels are synthesized from settings
57
* so for M settings and N drivers, there may be M*N levels.
58
*/
59
#define CF_MAX_LEVELS 256
60
61
struct cf_saved_freq {
62
struct cf_level level;
63
int priority;
64
SLIST_ENTRY(cf_saved_freq) link;
65
};
66
67
struct cpufreq_softc {
68
struct sx lock;
69
struct cf_level curr_level;
70
int curr_priority;
71
SLIST_HEAD(, cf_saved_freq) saved_freq;
72
struct cf_level_lst all_levels;
73
int all_count;
74
int max_mhz;
75
device_t dev;
76
device_t cf_drv_dev;
77
struct sysctl_ctx_list sysctl_ctx;
78
struct task startup_task;
79
struct cf_level *levels_buf;
80
};
81
82
struct cf_setting_array {
83
struct cf_setting sets[MAX_SETTINGS];
84
int count;
85
TAILQ_ENTRY(cf_setting_array) link;
86
};
87
88
TAILQ_HEAD(cf_setting_lst, cf_setting_array);
89
90
#define CF_MTX_INIT(x) sx_init((x), "cpufreq lock")
91
#define CF_MTX_LOCK(x) sx_xlock((x))
92
#define CF_MTX_UNLOCK(x) sx_xunlock((x))
93
#define CF_MTX_ASSERT(x) sx_assert((x), SX_XLOCKED)
94
95
#define CF_DEBUG(msg...) do { \
96
if (cf_verbose) \
97
printf("cpufreq: " msg); \
98
} while (0)
99
100
static int cpufreq_probe(device_t dev);
101
static int cpufreq_attach(device_t dev);
102
static void cpufreq_startup_task(void *ctx, int pending);
103
static int cpufreq_detach(device_t dev);
104
static int cf_set_method(device_t dev, const struct cf_level *level,
105
int priority);
106
static int cf_get_method(device_t dev, struct cf_level *level);
107
static int cf_levels_method(device_t dev, struct cf_level *levels,
108
int *count);
109
static int cpufreq_insert_abs(struct cpufreq_softc *sc,
110
struct cf_setting *sets, int count);
111
static int cpufreq_expand_set(struct cpufreq_softc *sc,
112
struct cf_setting_array *set_arr);
113
static struct cf_level *cpufreq_dup_set(struct cpufreq_softc *sc,
114
struct cf_level *dup, struct cf_setting *set);
115
static int cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS);
116
static int cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS);
117
static int cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS);
118
119
static device_method_t cpufreq_methods[] = {
120
DEVMETHOD(device_probe, cpufreq_probe),
121
DEVMETHOD(device_attach, cpufreq_attach),
122
DEVMETHOD(device_detach, cpufreq_detach),
123
124
DEVMETHOD(cpufreq_set, cf_set_method),
125
DEVMETHOD(cpufreq_get, cf_get_method),
126
DEVMETHOD(cpufreq_levels, cf_levels_method),
127
{0, 0}
128
};
129
130
static driver_t cpufreq_driver = {
131
"cpufreq", cpufreq_methods, sizeof(struct cpufreq_softc)
132
};
133
134
DRIVER_MODULE(cpufreq, cpu, cpufreq_driver, 0, 0);
135
136
static int cf_lowest_freq;
137
static int cf_verbose;
138
static SYSCTL_NODE(_debug, OID_AUTO, cpufreq, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
139
"cpufreq debugging");
140
SYSCTL_INT(_debug_cpufreq, OID_AUTO, lowest, CTLFLAG_RWTUN, &cf_lowest_freq, 1,
141
"Don't provide levels below this frequency.");
142
SYSCTL_INT(_debug_cpufreq, OID_AUTO, verbose, CTLFLAG_RWTUN, &cf_verbose, 1,
143
"Print verbose debugging messages");
144
145
static int
146
cpufreq_probe(device_t dev)
147
{
148
device_set_desc(dev, "CPU frequency control");
149
return (BUS_PROBE_DEFAULT);
150
}
151
152
/*
153
* This is called as the result of a hardware specific frequency control driver
154
* calling cpufreq_register. It provides a general interface for system wide
155
* frequency controls and operates on a per cpu basis.
156
*/
157
static int
158
cpufreq_attach(device_t dev)
159
{
160
struct cpufreq_softc *sc;
161
struct pcpu *pc;
162
device_t parent;
163
uint64_t rate;
164
165
CF_DEBUG("initializing %s\n", device_get_nameunit(dev));
166
sc = device_get_softc(dev);
167
parent = device_get_parent(dev);
168
sc->dev = dev;
169
sysctl_ctx_init(&sc->sysctl_ctx);
170
TAILQ_INIT(&sc->all_levels);
171
CF_MTX_INIT(&sc->lock);
172
sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN;
173
SLIST_INIT(&sc->saved_freq);
174
/* Try to get nominal CPU freq to use it as maximum later if needed */
175
sc->max_mhz = cpu_get_nominal_mhz(dev);
176
/* If that fails, try to measure the current rate */
177
if (sc->max_mhz <= 0) {
178
CF_DEBUG("Unable to obtain nominal frequency.\n");
179
pc = cpu_get_pcpu(dev);
180
if (cpu_est_clockrate(pc->pc_cpuid, &rate) == 0)
181
sc->max_mhz = rate / 1000000;
182
else
183
sc->max_mhz = CPUFREQ_VAL_UNKNOWN;
184
}
185
186
CF_DEBUG("initializing one-time data for %s\n",
187
device_get_nameunit(dev));
188
sc->levels_buf = malloc(CF_MAX_LEVELS * sizeof(*sc->levels_buf),
189
M_DEVBUF, M_WAITOK);
190
SYSCTL_ADD_PROC(&sc->sysctl_ctx,
191
SYSCTL_CHILDREN(device_get_sysctl_tree(parent)),
192
OID_AUTO, "freq", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
193
sc, 0, cpufreq_curr_sysctl, "I", "Current CPU frequency");
194
SYSCTL_ADD_PROC(&sc->sysctl_ctx,
195
SYSCTL_CHILDREN(device_get_sysctl_tree(parent)),
196
OID_AUTO, "freq_levels",
197
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
198
cpufreq_levels_sysctl, "A", "CPU frequency levels");
199
200
/*
201
* Queue a one-shot broadcast that levels have changed.
202
* It will run once the system has completed booting.
203
*/
204
TASK_INIT(&sc->startup_task, 0, cpufreq_startup_task, dev);
205
taskqueue_enqueue(taskqueue_thread, &sc->startup_task);
206
207
return (0);
208
}
209
210
/* Handle any work to be done for all drivers that attached during boot. */
211
static void
212
cpufreq_startup_task(void *ctx, int pending)
213
{
214
215
cpufreq_settings_changed((device_t)ctx);
216
}
217
218
static int
219
cpufreq_detach(device_t dev)
220
{
221
struct cpufreq_softc *sc;
222
struct cf_saved_freq *saved_freq;
223
224
CF_DEBUG("shutdown %s\n", device_get_nameunit(dev));
225
sc = device_get_softc(dev);
226
sysctl_ctx_free(&sc->sysctl_ctx);
227
228
while ((saved_freq = SLIST_FIRST(&sc->saved_freq)) != NULL) {
229
SLIST_REMOVE_HEAD(&sc->saved_freq, link);
230
free(saved_freq, M_TEMP);
231
}
232
233
free(sc->levels_buf, M_DEVBUF);
234
235
return (0);
236
}
237
238
static int
239
cf_set_method(device_t dev, const struct cf_level *level, int priority)
240
{
241
struct cpufreq_softc *sc;
242
const struct cf_setting *set;
243
struct cf_saved_freq *saved_freq, *curr_freq;
244
struct pcpu *pc;
245
int error, i;
246
u_char pri;
247
248
sc = device_get_softc(dev);
249
error = 0;
250
set = NULL;
251
saved_freq = NULL;
252
253
/* We are going to change levels so notify the pre-change handler. */
254
EVENTHANDLER_INVOKE(cpufreq_pre_change, level, &error);
255
if (error != 0) {
256
EVENTHANDLER_INVOKE(cpufreq_post_change, level, error);
257
return (error);
258
}
259
260
CF_MTX_LOCK(&sc->lock);
261
262
#ifdef SMP
263
#ifdef EARLY_AP_STARTUP
264
MPASS(mp_ncpus == 1 || smp_started);
265
#else
266
/*
267
* If still booting and secondary CPUs not started yet, don't allow
268
* changing the frequency until they're online. This is because we
269
* can't switch to them using sched_bind() and thus we'd only be
270
* switching the main CPU. XXXTODO: Need to think more about how to
271
* handle having different CPUs at different frequencies.
272
*/
273
if (mp_ncpus > 1 && !smp_started) {
274
device_printf(dev, "rejecting change, SMP not started yet\n");
275
error = ENXIO;
276
goto out;
277
}
278
#endif
279
#endif /* SMP */
280
281
/*
282
* If the requested level has a lower priority, don't allow
283
* the new level right now.
284
*/
285
if (priority < sc->curr_priority) {
286
CF_DEBUG("ignoring, curr prio %d less than %d\n", priority,
287
sc->curr_priority);
288
error = EPERM;
289
goto out;
290
}
291
292
/*
293
* If the caller didn't specify a level and one is saved, prepare to
294
* restore the saved level. If none has been saved, return an error.
295
*/
296
if (level == NULL) {
297
saved_freq = SLIST_FIRST(&sc->saved_freq);
298
if (saved_freq == NULL) {
299
CF_DEBUG("NULL level, no saved level\n");
300
error = ENXIO;
301
goto out;
302
}
303
level = &saved_freq->level;
304
priority = saved_freq->priority;
305
CF_DEBUG("restoring saved level, freq %d prio %d\n",
306
level->total_set.freq, priority);
307
}
308
309
/* Reject levels that are below our specified threshold. */
310
if (level->total_set.freq < cf_lowest_freq) {
311
CF_DEBUG("rejecting freq %d, less than %d limit\n",
312
level->total_set.freq, cf_lowest_freq);
313
error = EINVAL;
314
goto out;
315
}
316
317
/* If already at this level, just return. */
318
if (sc->curr_level.total_set.freq == level->total_set.freq) {
319
CF_DEBUG("skipping freq %d, same as current level %d\n",
320
level->total_set.freq, sc->curr_level.total_set.freq);
321
goto skip;
322
}
323
324
/* First, set the absolute frequency via its driver. */
325
set = &level->abs_set;
326
if (set->dev) {
327
if (!device_is_attached(set->dev)) {
328
error = ENXIO;
329
goto out;
330
}
331
332
/* Bind to the target CPU before switching. */
333
pc = cpu_get_pcpu(set->dev);
334
335
/* Skip settings if CPU is not started. */
336
if (pc == NULL) {
337
error = 0;
338
goto out;
339
}
340
thread_lock(curthread);
341
pri = curthread->td_priority;
342
sched_prio(curthread, PRI_MIN);
343
sched_bind(curthread, pc->pc_cpuid);
344
thread_unlock(curthread);
345
CF_DEBUG("setting abs freq %d on %s (cpu %d)\n", set->freq,
346
device_get_nameunit(set->dev), PCPU_GET(cpuid));
347
error = CPUFREQ_DRV_SET(set->dev, set);
348
thread_lock(curthread);
349
sched_unbind(curthread);
350
sched_prio(curthread, pri);
351
thread_unlock(curthread);
352
if (error) {
353
goto out;
354
}
355
}
356
357
/* Next, set any/all relative frequencies via their drivers. */
358
for (i = 0; i < level->rel_count; i++) {
359
set = &level->rel_set[i];
360
if (!device_is_attached(set->dev)) {
361
error = ENXIO;
362
goto out;
363
}
364
365
/* Bind to the target CPU before switching. */
366
pc = cpu_get_pcpu(set->dev);
367
thread_lock(curthread);
368
pri = curthread->td_priority;
369
sched_prio(curthread, PRI_MIN);
370
sched_bind(curthread, pc->pc_cpuid);
371
thread_unlock(curthread);
372
CF_DEBUG("setting rel freq %d on %s (cpu %d)\n", set->freq,
373
device_get_nameunit(set->dev), PCPU_GET(cpuid));
374
error = CPUFREQ_DRV_SET(set->dev, set);
375
thread_lock(curthread);
376
sched_unbind(curthread);
377
sched_prio(curthread, pri);
378
thread_unlock(curthread);
379
if (error) {
380
/* XXX Back out any successful setting? */
381
goto out;
382
}
383
}
384
385
skip:
386
/*
387
* Before recording the current level, check if we're going to a
388
* higher priority. If so, save the previous level and priority.
389
*/
390
if (sc->curr_level.total_set.freq != CPUFREQ_VAL_UNKNOWN &&
391
priority > sc->curr_priority) {
392
CF_DEBUG("saving level, freq %d prio %d\n",
393
sc->curr_level.total_set.freq, sc->curr_priority);
394
curr_freq = malloc(sizeof(*curr_freq), M_TEMP, M_NOWAIT);
395
if (curr_freq == NULL) {
396
error = ENOMEM;
397
goto out;
398
}
399
curr_freq->level = sc->curr_level;
400
curr_freq->priority = sc->curr_priority;
401
SLIST_INSERT_HEAD(&sc->saved_freq, curr_freq, link);
402
}
403
sc->curr_level = *level;
404
sc->curr_priority = priority;
405
406
/* If we were restoring a saved state, reset it to "unused". */
407
if (saved_freq != NULL) {
408
CF_DEBUG("resetting saved level\n");
409
sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN;
410
SLIST_REMOVE_HEAD(&sc->saved_freq, link);
411
free(saved_freq, M_TEMP);
412
}
413
414
out:
415
CF_MTX_UNLOCK(&sc->lock);
416
417
/*
418
* We changed levels (or attempted to) so notify the post-change
419
* handler of new frequency or error.
420
*/
421
EVENTHANDLER_INVOKE(cpufreq_post_change, level, error);
422
if (error && set)
423
device_printf(set->dev, "set freq failed, err %d\n", error);
424
425
return (error);
426
}
427
428
static int
429
cpufreq_get_frequency(device_t dev)
430
{
431
struct cf_setting set;
432
433
if (CPUFREQ_DRV_GET(dev, &set) != 0)
434
return (-1);
435
436
return (set.freq);
437
}
438
439
/* Returns the index into *levels with the match */
440
static int
441
cpufreq_get_level(device_t dev, struct cf_level *levels, int count)
442
{
443
int i, freq;
444
445
if ((freq = cpufreq_get_frequency(dev)) < 0)
446
return (-1);
447
for (i = 0; i < count; i++)
448
if (freq == levels[i].total_set.freq)
449
return (i);
450
451
return (-1);
452
}
453
454
/*
455
* Used by the cpufreq core, this function will populate *level with the current
456
* frequency as either determined by a cached value sc->curr_level, or in the
457
* case the lower level driver has set the CPUFREQ_FLAG_UNCACHED flag, it will
458
* obtain the frequency from the driver itself.
459
*/
460
static int
461
cf_get_method(device_t dev, struct cf_level *level)
462
{
463
struct cpufreq_softc *sc;
464
struct cf_level *levels;
465
struct cf_setting *curr_set;
466
struct pcpu *pc;
467
int bdiff, count, diff, error, i, type;
468
uint64_t rate;
469
470
sc = device_get_softc(dev);
471
error = 0;
472
levels = NULL;
473
474
/*
475
* If we already know the current frequency, and the driver didn't ask
476
* for uncached usage, we're done.
477
*/
478
CF_MTX_LOCK(&sc->lock);
479
curr_set = &sc->curr_level.total_set;
480
error = CPUFREQ_DRV_TYPE(sc->cf_drv_dev, &type);
481
if (error == 0 && (type & CPUFREQ_FLAG_UNCACHED)) {
482
struct cf_setting set;
483
484
/*
485
* If the driver wants to always report back the real frequency,
486
* first try the driver and if that fails, fall back to
487
* estimating.
488
*/
489
if (CPUFREQ_DRV_GET(sc->cf_drv_dev, &set) == 0) {
490
sc->curr_level.total_set = set;
491
CF_DEBUG("get returning immediate freq %d\n",
492
curr_set->freq);
493
goto out;
494
}
495
} else if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) {
496
CF_DEBUG("get returning known freq %d\n", curr_set->freq);
497
error = 0;
498
goto out;
499
}
500
CF_MTX_UNLOCK(&sc->lock);
501
502
/*
503
* We need to figure out the current level. Loop through every
504
* driver, getting the current setting. Then, attempt to get a best
505
* match of settings against each level.
506
*/
507
count = CF_MAX_LEVELS;
508
levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT);
509
if (levels == NULL)
510
return (ENOMEM);
511
error = CPUFREQ_LEVELS(sc->dev, levels, &count);
512
if (error) {
513
if (error == E2BIG)
514
printf("cpufreq: need to increase CF_MAX_LEVELS\n");
515
free(levels, M_TEMP);
516
return (error);
517
}
518
519
/*
520
* Reacquire the lock and search for the given level.
521
*
522
* XXX Note: this is not quite right since we really need to go
523
* through each level and compare both absolute and relative
524
* settings for each driver in the system before making a match.
525
* The estimation code below catches this case though.
526
*/
527
CF_MTX_LOCK(&sc->lock);
528
i = cpufreq_get_level(sc->cf_drv_dev, levels, count);
529
if (i >= 0)
530
sc->curr_level = levels[i];
531
else
532
CF_DEBUG("Couldn't find supported level for %s\n",
533
device_get_nameunit(sc->cf_drv_dev));
534
535
if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) {
536
CF_DEBUG("get matched freq %d from drivers\n", curr_set->freq);
537
goto out;
538
}
539
540
/*
541
* We couldn't find an exact match, so attempt to estimate and then
542
* match against a level.
543
*/
544
pc = cpu_get_pcpu(dev);
545
if (pc == NULL) {
546
error = ENXIO;
547
goto out;
548
}
549
cpu_est_clockrate(pc->pc_cpuid, &rate);
550
rate /= 1000000;
551
bdiff = 1 << 30;
552
for (i = 0; i < count; i++) {
553
diff = abs(levels[i].total_set.freq - rate);
554
if (diff < bdiff) {
555
bdiff = diff;
556
sc->curr_level = levels[i];
557
}
558
}
559
CF_DEBUG("get estimated freq %d\n", curr_set->freq);
560
561
out:
562
if (error == 0)
563
*level = sc->curr_level;
564
565
CF_MTX_UNLOCK(&sc->lock);
566
if (levels)
567
free(levels, M_TEMP);
568
return (error);
569
}
570
571
/*
572
* Either directly obtain settings from the cpufreq driver, or build a list of
573
* relative settings to be integrated later against an absolute max.
574
*/
575
static int
576
cpufreq_add_levels(device_t cf_dev, struct cf_setting_lst *rel_sets)
577
{
578
struct cf_setting_array *set_arr;
579
struct cf_setting *sets;
580
device_t dev;
581
struct cpufreq_softc *sc;
582
int type, set_count, error;
583
584
sc = device_get_softc(cf_dev);
585
dev = sc->cf_drv_dev;
586
587
/* Skip devices that aren't ready. */
588
if (!device_is_attached(cf_dev))
589
return (0);
590
591
/*
592
* Get settings, skipping drivers that offer no settings or
593
* provide settings for informational purposes only.
594
*/
595
error = CPUFREQ_DRV_TYPE(dev, &type);
596
if (error != 0 || (type & CPUFREQ_FLAG_INFO_ONLY)) {
597
if (error == 0) {
598
CF_DEBUG("skipping info-only driver %s\n",
599
device_get_nameunit(cf_dev));
600
}
601
return (error);
602
}
603
604
sets = malloc(MAX_SETTINGS * sizeof(*sets), M_TEMP, M_NOWAIT);
605
if (sets == NULL)
606
return (ENOMEM);
607
608
set_count = MAX_SETTINGS;
609
error = CPUFREQ_DRV_SETTINGS(dev, sets, &set_count);
610
if (error != 0 || set_count == 0)
611
goto out;
612
613
/* Add the settings to our absolute/relative lists. */
614
switch (type & CPUFREQ_TYPE_MASK) {
615
case CPUFREQ_TYPE_ABSOLUTE:
616
error = cpufreq_insert_abs(sc, sets, set_count);
617
break;
618
case CPUFREQ_TYPE_RELATIVE:
619
CF_DEBUG("adding %d relative settings\n", set_count);
620
set_arr = malloc(sizeof(*set_arr), M_TEMP, M_NOWAIT);
621
if (set_arr == NULL) {
622
error = ENOMEM;
623
goto out;
624
}
625
bcopy(sets, set_arr->sets, set_count * sizeof(*sets));
626
set_arr->count = set_count;
627
TAILQ_INSERT_TAIL(rel_sets, set_arr, link);
628
break;
629
default:
630
error = EINVAL;
631
}
632
633
out:
634
free(sets, M_TEMP);
635
return (error);
636
}
637
638
static int
639
cf_levels_method(device_t dev, struct cf_level *levels, int *count)
640
{
641
struct cf_setting_array *set_arr;
642
struct cf_setting_lst rel_sets;
643
struct cpufreq_softc *sc;
644
struct cf_level *lev;
645
struct pcpu *pc;
646
int error, i;
647
uint64_t rate;
648
649
if (levels == NULL || count == NULL)
650
return (EINVAL);
651
652
TAILQ_INIT(&rel_sets);
653
sc = device_get_softc(dev);
654
655
CF_MTX_LOCK(&sc->lock);
656
error = cpufreq_add_levels(sc->dev, &rel_sets);
657
if (error)
658
goto out;
659
660
/*
661
* If there are no absolute levels, create a fake one at 100%. We
662
* then cache the clockrate for later use as our base frequency.
663
*/
664
if (TAILQ_EMPTY(&sc->all_levels)) {
665
struct cf_setting set;
666
667
CF_DEBUG("No absolute levels returned by driver\n");
668
669
if (sc->max_mhz == CPUFREQ_VAL_UNKNOWN) {
670
sc->max_mhz = cpu_get_nominal_mhz(dev);
671
/*
672
* If the CPU can't report a rate for 100%, hope
673
* the CPU is running at its nominal rate right now,
674
* and use that instead.
675
*/
676
if (sc->max_mhz <= 0) {
677
pc = cpu_get_pcpu(dev);
678
cpu_est_clockrate(pc->pc_cpuid, &rate);
679
sc->max_mhz = rate / 1000000;
680
}
681
}
682
memset(&set, CPUFREQ_VAL_UNKNOWN, sizeof(set));
683
set.freq = sc->max_mhz;
684
set.dev = NULL;
685
error = cpufreq_insert_abs(sc, &set, 1);
686
if (error)
687
goto out;
688
}
689
690
/* Create a combined list of absolute + relative levels. */
691
TAILQ_FOREACH(set_arr, &rel_sets, link)
692
cpufreq_expand_set(sc, set_arr);
693
694
/* If the caller doesn't have enough space, return the actual count. */
695
if (sc->all_count > *count) {
696
*count = sc->all_count;
697
error = E2BIG;
698
goto out;
699
}
700
701
/* Finally, output the list of levels. */
702
i = 0;
703
TAILQ_FOREACH(lev, &sc->all_levels, link) {
704
/* Skip levels that have a frequency that is too low. */
705
if (lev->total_set.freq < cf_lowest_freq) {
706
sc->all_count--;
707
continue;
708
}
709
710
levels[i] = *lev;
711
i++;
712
}
713
*count = sc->all_count;
714
error = 0;
715
716
out:
717
/* Clear all levels since we regenerate them each time. */
718
while ((lev = TAILQ_FIRST(&sc->all_levels)) != NULL) {
719
TAILQ_REMOVE(&sc->all_levels, lev, link);
720
free(lev, M_TEMP);
721
}
722
sc->all_count = 0;
723
724
CF_MTX_UNLOCK(&sc->lock);
725
while ((set_arr = TAILQ_FIRST(&rel_sets)) != NULL) {
726
TAILQ_REMOVE(&rel_sets, set_arr, link);
727
free(set_arr, M_TEMP);
728
}
729
return (error);
730
}
731
732
/*
733
* Create levels for an array of absolute settings and insert them in
734
* sorted order in the specified list.
735
*/
736
static int
737
cpufreq_insert_abs(struct cpufreq_softc *sc, struct cf_setting *sets,
738
int count)
739
{
740
struct cf_level_lst *list;
741
struct cf_level *level, *search;
742
int i, inserted;
743
744
CF_MTX_ASSERT(&sc->lock);
745
746
list = &sc->all_levels;
747
for (i = 0; i < count; i++) {
748
level = malloc(sizeof(*level), M_TEMP, M_NOWAIT | M_ZERO);
749
if (level == NULL)
750
return (ENOMEM);
751
level->abs_set = sets[i];
752
level->total_set = sets[i];
753
level->total_set.dev = NULL;
754
sc->all_count++;
755
inserted = 0;
756
757
if (TAILQ_EMPTY(list)) {
758
CF_DEBUG("adding abs setting %d at head\n",
759
sets[i].freq);
760
TAILQ_INSERT_HEAD(list, level, link);
761
continue;
762
}
763
764
TAILQ_FOREACH_REVERSE(search, list, cf_level_lst, link)
765
if (sets[i].freq <= search->total_set.freq) {
766
CF_DEBUG("adding abs setting %d after %d\n",
767
sets[i].freq, search->total_set.freq);
768
TAILQ_INSERT_AFTER(list, search, level, link);
769
inserted = 1;
770
break;
771
}
772
773
if (inserted == 0) {
774
TAILQ_FOREACH(search, list, link)
775
if (sets[i].freq >= search->total_set.freq) {
776
CF_DEBUG("adding abs setting %d before %d\n",
777
sets[i].freq, search->total_set.freq);
778
TAILQ_INSERT_BEFORE(search, level, link);
779
break;
780
}
781
}
782
}
783
784
return (0);
785
}
786
787
/*
788
* Expand a group of relative settings, creating derived levels from them.
789
*/
790
static int
791
cpufreq_expand_set(struct cpufreq_softc *sc, struct cf_setting_array *set_arr)
792
{
793
struct cf_level *fill, *search;
794
struct cf_setting *set;
795
int i;
796
797
CF_MTX_ASSERT(&sc->lock);
798
799
/*
800
* Walk the set of all existing levels in reverse. This is so we
801
* create derived states from the lowest absolute settings first
802
* and discard duplicates created from higher absolute settings.
803
* For instance, a level of 50 Mhz derived from 100 Mhz + 50% is
804
* preferable to 200 Mhz + 25% because absolute settings are more
805
* efficient since they often change the voltage as well.
806
*/
807
TAILQ_FOREACH_REVERSE(search, &sc->all_levels, cf_level_lst, link) {
808
/* Add each setting to the level, duplicating if necessary. */
809
for (i = 0; i < set_arr->count; i++) {
810
set = &set_arr->sets[i];
811
812
/*
813
* If this setting is less than 100%, split the level
814
* into two and add this setting to the new level.
815
*/
816
fill = search;
817
if (set->freq < 10000) {
818
fill = cpufreq_dup_set(sc, search, set);
819
820
/*
821
* The new level was a duplicate of an existing
822
* level or its absolute setting is too high
823
* so we freed it. For example, we discard a
824
* derived level of 1000 MHz/25% if a level
825
* of 500 MHz/100% already exists.
826
*/
827
if (fill == NULL)
828
break;
829
}
830
831
/* Add this setting to the existing or new level. */
832
KASSERT(fill->rel_count < MAX_SETTINGS,
833
("cpufreq: too many relative drivers (%d)",
834
MAX_SETTINGS));
835
fill->rel_set[fill->rel_count] = *set;
836
fill->rel_count++;
837
CF_DEBUG(
838
"expand set added rel setting %d%% to %d level\n",
839
set->freq / 100, fill->total_set.freq);
840
}
841
}
842
843
return (0);
844
}
845
846
static struct cf_level *
847
cpufreq_dup_set(struct cpufreq_softc *sc, struct cf_level *dup,
848
struct cf_setting *set)
849
{
850
struct cf_level_lst *list;
851
struct cf_level *fill, *itr;
852
struct cf_setting *fill_set, *itr_set;
853
int i;
854
855
CF_MTX_ASSERT(&sc->lock);
856
857
/*
858
* Create a new level, copy it from the old one, and update the
859
* total frequency and power by the percentage specified in the
860
* relative setting.
861
*/
862
fill = malloc(sizeof(*fill), M_TEMP, M_NOWAIT);
863
if (fill == NULL)
864
return (NULL);
865
*fill = *dup;
866
fill_set = &fill->total_set;
867
fill_set->freq =
868
((uint64_t)fill_set->freq * set->freq) / 10000;
869
if (fill_set->power != CPUFREQ_VAL_UNKNOWN) {
870
fill_set->power = ((uint64_t)fill_set->power * set->freq)
871
/ 10000;
872
}
873
if (set->lat != CPUFREQ_VAL_UNKNOWN) {
874
if (fill_set->lat != CPUFREQ_VAL_UNKNOWN)
875
fill_set->lat += set->lat;
876
else
877
fill_set->lat = set->lat;
878
}
879
CF_DEBUG("dup set considering derived setting %d\n", fill_set->freq);
880
881
/*
882
* If we copied an old level that we already modified (say, at 100%),
883
* we need to remove that setting before adding this one. Since we
884
* process each setting array in order, we know any settings for this
885
* driver will be found at the end.
886
*/
887
for (i = fill->rel_count; i != 0; i--) {
888
if (fill->rel_set[i - 1].dev != set->dev)
889
break;
890
CF_DEBUG("removed last relative driver: %s\n",
891
device_get_nameunit(set->dev));
892
fill->rel_count--;
893
}
894
895
/*
896
* Insert the new level in sorted order. If it is a duplicate of an
897
* existing level (1) or has an absolute setting higher than the
898
* existing level (2), do not add it. We can do this since any such
899
* level is guaranteed use less power. For example (1), a level with
900
* one absolute setting of 800 Mhz uses less power than one composed
901
* of an absolute setting of 1600 Mhz and a relative setting at 50%.
902
* Also for example (2), a level of 800 Mhz/75% is preferable to
903
* 1600 Mhz/25% even though the latter has a lower total frequency.
904
*/
905
list = &sc->all_levels;
906
KASSERT(!TAILQ_EMPTY(list), ("all levels list empty in dup set"));
907
TAILQ_FOREACH_REVERSE(itr, list, cf_level_lst, link) {
908
itr_set = &itr->total_set;
909
if (CPUFREQ_CMP(fill_set->freq, itr_set->freq)) {
910
CF_DEBUG("dup set rejecting %d (dupe)\n",
911
fill_set->freq);
912
itr = NULL;
913
break;
914
} else if (fill_set->freq < itr_set->freq) {
915
if (fill->abs_set.freq <= itr->abs_set.freq) {
916
CF_DEBUG(
917
"dup done, inserting new level %d after %d\n",
918
fill_set->freq, itr_set->freq);
919
TAILQ_INSERT_AFTER(list, itr, fill, link);
920
sc->all_count++;
921
} else {
922
CF_DEBUG("dup set rejecting %d (abs too big)\n",
923
fill_set->freq);
924
itr = NULL;
925
}
926
break;
927
}
928
}
929
930
/* We didn't find a good place for this new level so free it. */
931
if (itr == NULL) {
932
CF_DEBUG("dup set freeing new level %d (not optimal)\n",
933
fill_set->freq);
934
free(fill, M_TEMP);
935
fill = NULL;
936
}
937
938
return (fill);
939
}
940
941
static int
942
cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS)
943
{
944
struct cpufreq_softc *sc;
945
struct cf_level *levels;
946
int best, count, diff, bdiff, devcount, error, freq, i, n;
947
device_t *devs;
948
949
devs = NULL;
950
sc = oidp->oid_arg1;
951
levels = sc->levels_buf;
952
953
error = CPUFREQ_GET(sc->dev, &levels[0]);
954
if (error)
955
goto out;
956
freq = levels[0].total_set.freq;
957
error = sysctl_handle_int(oidp, &freq, 0, req);
958
if (error != 0 || req->newptr == NULL)
959
goto out;
960
961
/*
962
* While we only call cpufreq_get() on one device (assuming all
963
* CPUs have equal levels), we call cpufreq_set() on all CPUs.
964
* This is needed for some MP systems.
965
*/
966
error = devclass_get_devices(devclass_find("cpufreq"), &devs, &devcount);
967
if (error)
968
goto out;
969
for (n = 0; n < devcount; n++) {
970
count = CF_MAX_LEVELS;
971
error = CPUFREQ_LEVELS(devs[n], levels, &count);
972
if (error) {
973
if (error == E2BIG)
974
printf(
975
"cpufreq: need to increase CF_MAX_LEVELS\n");
976
break;
977
}
978
best = 0;
979
bdiff = 1 << 30;
980
for (i = 0; i < count; i++) {
981
diff = abs(levels[i].total_set.freq - freq);
982
if (diff < bdiff) {
983
bdiff = diff;
984
best = i;
985
}
986
}
987
error = CPUFREQ_SET(devs[n], &levels[best], CPUFREQ_PRIO_USER);
988
}
989
990
out:
991
if (devs)
992
free(devs, M_TEMP);
993
return (error);
994
}
995
996
static int
997
cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS)
998
{
999
struct cpufreq_softc *sc;
1000
struct cf_level *levels;
1001
struct cf_setting *set;
1002
struct sbuf sb;
1003
int count, error, i;
1004
1005
sc = oidp->oid_arg1;
1006
sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND);
1007
1008
/* Get settings from the device and generate the output string. */
1009
count = CF_MAX_LEVELS;
1010
levels = sc->levels_buf;
1011
if (levels == NULL) {
1012
sbuf_delete(&sb);
1013
return (ENOMEM);
1014
}
1015
error = CPUFREQ_LEVELS(sc->dev, levels, &count);
1016
if (error) {
1017
if (error == E2BIG)
1018
printf("cpufreq: need to increase CF_MAX_LEVELS\n");
1019
goto out;
1020
}
1021
if (count) {
1022
for (i = 0; i < count; i++) {
1023
set = &levels[i].total_set;
1024
sbuf_printf(&sb, "%d/%d ", set->freq, set->power);
1025
}
1026
} else
1027
sbuf_cpy(&sb, "0");
1028
sbuf_trim(&sb);
1029
sbuf_finish(&sb);
1030
error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
1031
1032
out:
1033
sbuf_delete(&sb);
1034
return (error);
1035
}
1036
1037
static int
1038
cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS)
1039
{
1040
device_t dev;
1041
struct cf_setting *sets;
1042
struct sbuf sb;
1043
int error, i, set_count;
1044
1045
dev = oidp->oid_arg1;
1046
sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND);
1047
1048
/* Get settings from the device and generate the output string. */
1049
set_count = MAX_SETTINGS;
1050
sets = malloc(set_count * sizeof(*sets), M_TEMP, M_NOWAIT);
1051
if (sets == NULL) {
1052
sbuf_delete(&sb);
1053
return (ENOMEM);
1054
}
1055
error = CPUFREQ_DRV_SETTINGS(dev, sets, &set_count);
1056
if (error)
1057
goto out;
1058
if (set_count) {
1059
for (i = 0; i < set_count; i++)
1060
sbuf_printf(&sb, "%d/%d ", sets[i].freq, sets[i].power);
1061
} else
1062
sbuf_cpy(&sb, "0");
1063
sbuf_trim(&sb);
1064
sbuf_finish(&sb);
1065
error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
1066
1067
out:
1068
free(sets, M_TEMP);
1069
sbuf_delete(&sb);
1070
return (error);
1071
}
1072
1073
static void
1074
cpufreq_add_freq_driver_sysctl(device_t cf_dev)
1075
{
1076
struct cpufreq_softc *sc;
1077
1078
sc = device_get_softc(cf_dev);
1079
SYSCTL_ADD_CONST_STRING(&sc->sysctl_ctx,
1080
SYSCTL_CHILDREN(device_get_sysctl_tree(cf_dev)), OID_AUTO,
1081
"freq_driver", CTLFLAG_RD, device_get_nameunit(sc->cf_drv_dev),
1082
"cpufreq driver used by this cpu");
1083
}
1084
1085
int
1086
cpufreq_register(device_t dev)
1087
{
1088
struct cpufreq_softc *sc;
1089
device_t cf_dev, cpu_dev;
1090
int error;
1091
1092
/* Add a sysctl to get each driver's settings separately. */
1093
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1094
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1095
OID_AUTO, "freq_settings",
1096
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, dev, 0,
1097
cpufreq_settings_sysctl, "A", "CPU frequency driver settings");
1098
1099
/*
1100
* Add only one cpufreq device to each CPU. Currently, all CPUs
1101
* must offer the same levels and be switched at the same time.
1102
*/
1103
cpu_dev = device_get_parent(dev);
1104
if ((cf_dev = device_find_child(cpu_dev, "cpufreq", DEVICE_UNIT_ANY))) {
1105
sc = device_get_softc(cf_dev);
1106
sc->max_mhz = CPUFREQ_VAL_UNKNOWN;
1107
MPASS(sc->cf_drv_dev != NULL);
1108
return (0);
1109
}
1110
1111
/* Add the child device and possibly sysctls. */
1112
cf_dev = BUS_ADD_CHILD(cpu_dev, 0, "cpufreq", device_get_unit(cpu_dev));
1113
if (cf_dev == NULL)
1114
return (ENOMEM);
1115
device_quiet(cf_dev);
1116
1117
error = device_probe_and_attach(cf_dev);
1118
if (error)
1119
return (error);
1120
1121
sc = device_get_softc(cf_dev);
1122
sc->cf_drv_dev = dev;
1123
cpufreq_add_freq_driver_sysctl(cf_dev);
1124
return (error);
1125
}
1126
1127
int
1128
cpufreq_unregister(device_t dev)
1129
{
1130
device_t cf_dev;
1131
struct cpufreq_softc *sc __diagused;
1132
1133
/*
1134
* If this is the last cpufreq child device, remove the control
1135
* device as well. We identify cpufreq children by calling a method
1136
* they support.
1137
*/
1138
cf_dev = device_find_child(device_get_parent(dev), "cpufreq",
1139
DEVICE_UNIT_ANY);
1140
if (cf_dev == NULL) {
1141
device_printf(dev,
1142
"warning: cpufreq_unregister called with no cpufreq device active\n");
1143
return (0);
1144
}
1145
sc = device_get_softc(cf_dev);
1146
MPASS(sc->cf_drv_dev == dev);
1147
device_delete_child(device_get_parent(cf_dev), cf_dev);
1148
1149
return (0);
1150
}
1151
1152
int
1153
cpufreq_settings_changed(device_t dev)
1154
{
1155
1156
EVENTHANDLER_INVOKE(cpufreq_levels_changed,
1157
device_get_unit(device_get_parent(dev)));
1158
return (0);
1159
}
1160
1161