Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/cpuidle/governors/menu.c
15111 views
1
/*
2
* menu.c - the menu idle governor
3
*
4
* Copyright (C) 2006-2007 Adam Belay <[email protected]>
5
* Copyright (C) 2009 Intel Corporation
6
* Author:
7
* Arjan van de Ven <[email protected]>
8
*
9
* This code is licenced under the GPL version 2 as described
10
* in the COPYING file that acompanies the Linux Kernel.
11
*/
12
13
#include <linux/kernel.h>
14
#include <linux/cpuidle.h>
15
#include <linux/pm_qos_params.h>
16
#include <linux/time.h>
17
#include <linux/ktime.h>
18
#include <linux/hrtimer.h>
19
#include <linux/tick.h>
20
#include <linux/sched.h>
21
#include <linux/math64.h>
22
23
#define BUCKETS 12
24
#define INTERVALS 8
25
#define RESOLUTION 1024
26
#define DECAY 8
27
#define MAX_INTERESTING 50000
28
#define STDDEV_THRESH 400
29
30
31
/*
32
* Concepts and ideas behind the menu governor
33
*
34
* For the menu governor, there are 3 decision factors for picking a C
35
* state:
36
* 1) Energy break even point
37
* 2) Performance impact
38
* 3) Latency tolerance (from pmqos infrastructure)
39
* These these three factors are treated independently.
40
*
41
* Energy break even point
42
* -----------------------
43
* C state entry and exit have an energy cost, and a certain amount of time in
44
* the C state is required to actually break even on this cost. CPUIDLE
45
* provides us this duration in the "target_residency" field. So all that we
46
* need is a good prediction of how long we'll be idle. Like the traditional
47
* menu governor, we start with the actual known "next timer event" time.
48
*
49
* Since there are other source of wakeups (interrupts for example) than
50
* the next timer event, this estimation is rather optimistic. To get a
51
* more realistic estimate, a correction factor is applied to the estimate,
52
* that is based on historic behavior. For example, if in the past the actual
53
* duration always was 50% of the next timer tick, the correction factor will
54
* be 0.5.
55
*
56
* menu uses a running average for this correction factor, however it uses a
57
* set of factors, not just a single factor. This stems from the realization
58
* that the ratio is dependent on the order of magnitude of the expected
59
* duration; if we expect 500 milliseconds of idle time the likelihood of
60
* getting an interrupt very early is much higher than if we expect 50 micro
61
* seconds of idle time. A second independent factor that has big impact on
62
* the actual factor is if there is (disk) IO outstanding or not.
63
* (as a special twist, we consider every sleep longer than 50 milliseconds
64
* as perfect; there are no power gains for sleeping longer than this)
65
*
66
* For these two reasons we keep an array of 12 independent factors, that gets
67
* indexed based on the magnitude of the expected duration as well as the
68
* "is IO outstanding" property.
69
*
70
* Repeatable-interval-detector
71
* ----------------------------
72
* There are some cases where "next timer" is a completely unusable predictor:
73
* Those cases where the interval is fixed, for example due to hardware
74
* interrupt mitigation, but also due to fixed transfer rate devices such as
75
* mice.
76
* For this, we use a different predictor: We track the duration of the last 8
77
* intervals and if the stand deviation of these 8 intervals is below a
78
* threshold value, we use the average of these intervals as prediction.
79
*
80
* Limiting Performance Impact
81
* ---------------------------
82
* C states, especially those with large exit latencies, can have a real
83
* noticeable impact on workloads, which is not acceptable for most sysadmins,
84
* and in addition, less performance has a power price of its own.
85
*
86
* As a general rule of thumb, menu assumes that the following heuristic
87
* holds:
88
* The busier the system, the less impact of C states is acceptable
89
*
90
* This rule-of-thumb is implemented using a performance-multiplier:
91
* If the exit latency times the performance multiplier is longer than
92
* the predicted duration, the C state is not considered a candidate
93
* for selection due to a too high performance impact. So the higher
94
* this multiplier is, the longer we need to be idle to pick a deep C
95
* state, and thus the less likely a busy CPU will hit such a deep
96
* C state.
97
*
98
* Two factors are used in determing this multiplier:
99
* a value of 10 is added for each point of "per cpu load average" we have.
100
* a value of 5 points is added for each process that is waiting for
101
* IO on this CPU.
102
* (these values are experimentally determined)
103
*
104
* The load average factor gives a longer term (few seconds) input to the
105
* decision, while the iowait value gives a cpu local instantanious input.
106
* The iowait factor may look low, but realize that this is also already
107
* represented in the system load average.
108
*
109
*/
110
111
struct menu_device {
112
int last_state_idx;
113
int needs_update;
114
115
unsigned int expected_us;
116
u64 predicted_us;
117
unsigned int exit_us;
118
unsigned int bucket;
119
u64 correction_factor[BUCKETS];
120
u32 intervals[INTERVALS];
121
int interval_ptr;
122
};
123
124
125
#define LOAD_INT(x) ((x) >> FSHIFT)
126
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
127
128
static int get_loadavg(void)
129
{
130
unsigned long this = this_cpu_load();
131
132
133
return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10;
134
}
135
136
static inline int which_bucket(unsigned int duration)
137
{
138
int bucket = 0;
139
140
/*
141
* We keep two groups of stats; one with no
142
* IO pending, one without.
143
* This allows us to calculate
144
* E(duration)|iowait
145
*/
146
if (nr_iowait_cpu(smp_processor_id()))
147
bucket = BUCKETS/2;
148
149
if (duration < 10)
150
return bucket;
151
if (duration < 100)
152
return bucket + 1;
153
if (duration < 1000)
154
return bucket + 2;
155
if (duration < 10000)
156
return bucket + 3;
157
if (duration < 100000)
158
return bucket + 4;
159
return bucket + 5;
160
}
161
162
/*
163
* Return a multiplier for the exit latency that is intended
164
* to take performance requirements into account.
165
* The more performance critical we estimate the system
166
* to be, the higher this multiplier, and thus the higher
167
* the barrier to go to an expensive C state.
168
*/
169
static inline int performance_multiplier(void)
170
{
171
int mult = 1;
172
173
/* for higher loadavg, we are more reluctant */
174
175
mult += 2 * get_loadavg();
176
177
/* for IO wait tasks (per cpu!) we add 5x each */
178
mult += 10 * nr_iowait_cpu(smp_processor_id());
179
180
return mult;
181
}
182
183
static DEFINE_PER_CPU(struct menu_device, menu_devices);
184
185
static void menu_update(struct cpuidle_device *dev);
186
187
/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
188
static u64 div_round64(u64 dividend, u32 divisor)
189
{
190
return div_u64(dividend + (divisor / 2), divisor);
191
}
192
193
/*
194
* Try detecting repeating patterns by keeping track of the last 8
195
* intervals, and checking if the standard deviation of that set
196
* of points is below a threshold. If it is... then use the
197
* average of these 8 points as the estimated value.
198
*/
199
static void detect_repeating_patterns(struct menu_device *data)
200
{
201
int i;
202
uint64_t avg = 0;
203
uint64_t stddev = 0; /* contains the square of the std deviation */
204
205
/* first calculate average and standard deviation of the past */
206
for (i = 0; i < INTERVALS; i++)
207
avg += data->intervals[i];
208
avg = avg / INTERVALS;
209
210
/* if the avg is beyond the known next tick, it's worthless */
211
if (avg > data->expected_us)
212
return;
213
214
for (i = 0; i < INTERVALS; i++)
215
stddev += (data->intervals[i] - avg) *
216
(data->intervals[i] - avg);
217
218
stddev = stddev / INTERVALS;
219
220
/*
221
* now.. if stddev is small.. then assume we have a
222
* repeating pattern and predict we keep doing this.
223
*/
224
225
if (avg && stddev < STDDEV_THRESH)
226
data->predicted_us = avg;
227
}
228
229
/**
230
* menu_select - selects the next idle state to enter
231
* @dev: the CPU
232
*/
233
static int menu_select(struct cpuidle_device *dev)
234
{
235
struct menu_device *data = &__get_cpu_var(menu_devices);
236
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
237
unsigned int power_usage = -1;
238
int i;
239
int multiplier;
240
struct timespec t;
241
242
if (data->needs_update) {
243
menu_update(dev);
244
data->needs_update = 0;
245
}
246
247
data->last_state_idx = 0;
248
data->exit_us = 0;
249
250
/* Special case when user has set very strict latency requirement */
251
if (unlikely(latency_req == 0))
252
return 0;
253
254
/* determine the expected residency time, round up */
255
t = ktime_to_timespec(tick_nohz_get_sleep_length());
256
data->expected_us =
257
t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
258
259
260
data->bucket = which_bucket(data->expected_us);
261
262
multiplier = performance_multiplier();
263
264
/*
265
* if the correction factor is 0 (eg first time init or cpu hotplug
266
* etc), we actually want to start out with a unity factor.
267
*/
268
if (data->correction_factor[data->bucket] == 0)
269
data->correction_factor[data->bucket] = RESOLUTION * DECAY;
270
271
/* Make sure to round up for half microseconds */
272
data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
273
RESOLUTION * DECAY);
274
275
detect_repeating_patterns(data);
276
277
/*
278
* We want to default to C1 (hlt), not to busy polling
279
* unless the timer is happening really really soon.
280
*/
281
if (data->expected_us > 5)
282
data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
283
284
/*
285
* Find the idle state with the lowest power while satisfying
286
* our constraints.
287
*/
288
for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) {
289
struct cpuidle_state *s = &dev->states[i];
290
291
if (s->flags & CPUIDLE_FLAG_IGNORE)
292
continue;
293
if (s->target_residency > data->predicted_us)
294
continue;
295
if (s->exit_latency > latency_req)
296
continue;
297
if (s->exit_latency * multiplier > data->predicted_us)
298
continue;
299
300
if (s->power_usage < power_usage) {
301
power_usage = s->power_usage;
302
data->last_state_idx = i;
303
data->exit_us = s->exit_latency;
304
}
305
}
306
307
return data->last_state_idx;
308
}
309
310
/**
311
* menu_reflect - records that data structures need update
312
* @dev: the CPU
313
*
314
* NOTE: it's important to be fast here because this operation will add to
315
* the overall exit latency.
316
*/
317
static void menu_reflect(struct cpuidle_device *dev)
318
{
319
struct menu_device *data = &__get_cpu_var(menu_devices);
320
data->needs_update = 1;
321
}
322
323
/**
324
* menu_update - attempts to guess what happened after entry
325
* @dev: the CPU
326
*/
327
static void menu_update(struct cpuidle_device *dev)
328
{
329
struct menu_device *data = &__get_cpu_var(menu_devices);
330
int last_idx = data->last_state_idx;
331
unsigned int last_idle_us = cpuidle_get_last_residency(dev);
332
struct cpuidle_state *target = &dev->states[last_idx];
333
unsigned int measured_us;
334
u64 new_factor;
335
336
/*
337
* Ugh, this idle state doesn't support residency measurements, so we
338
* are basically lost in the dark. As a compromise, assume we slept
339
* for the whole expected time.
340
*/
341
if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
342
last_idle_us = data->expected_us;
343
344
345
measured_us = last_idle_us;
346
347
/*
348
* We correct for the exit latency; we are assuming here that the
349
* exit latency happens after the event that we're interested in.
350
*/
351
if (measured_us > data->exit_us)
352
measured_us -= data->exit_us;
353
354
355
/* update our correction ratio */
356
357
new_factor = data->correction_factor[data->bucket]
358
* (DECAY - 1) / DECAY;
359
360
if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
361
new_factor += RESOLUTION * measured_us / data->expected_us;
362
else
363
/*
364
* we were idle so long that we count it as a perfect
365
* prediction
366
*/
367
new_factor += RESOLUTION;
368
369
/*
370
* We don't want 0 as factor; we always want at least
371
* a tiny bit of estimated time.
372
*/
373
if (new_factor == 0)
374
new_factor = 1;
375
376
data->correction_factor[data->bucket] = new_factor;
377
378
/* update the repeating-pattern data */
379
data->intervals[data->interval_ptr++] = last_idle_us;
380
if (data->interval_ptr >= INTERVALS)
381
data->interval_ptr = 0;
382
}
383
384
/**
385
* menu_enable_device - scans a CPU's states and does setup
386
* @dev: the CPU
387
*/
388
static int menu_enable_device(struct cpuidle_device *dev)
389
{
390
struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
391
392
memset(data, 0, sizeof(struct menu_device));
393
394
return 0;
395
}
396
397
static struct cpuidle_governor menu_governor = {
398
.name = "menu",
399
.rating = 20,
400
.enable = menu_enable_device,
401
.select = menu_select,
402
.reflect = menu_reflect,
403
.owner = THIS_MODULE,
404
};
405
406
/**
407
* init_menu - initializes the governor
408
*/
409
static int __init init_menu(void)
410
{
411
return cpuidle_register_governor(&menu_governor);
412
}
413
414
/**
415
* exit_menu - exits the governor
416
*/
417
static void __exit exit_menu(void)
418
{
419
cpuidle_unregister_governor(&menu_governor);
420
}
421
422
MODULE_LICENSE("GPL");
423
module_init(init_menu);
424
module_exit(exit_menu);
425
426