// SPDX-License-Identifier: GPL-2.0-only1/*2* menu.c - the menu idle governor3*4* Copyright (C) 2006-2007 Adam Belay <[email protected]>5* Copyright (C) 2009 Intel Corporation6* Author:7* Arjan van de Ven <[email protected]>8*/910#include <linux/kernel.h>11#include <linux/cpuidle.h>12#include <linux/time.h>13#include <linux/ktime.h>14#include <linux/hrtimer.h>15#include <linux/tick.h>16#include <linux/sched/stat.h>17#include <linux/math64.h>1819#include "gov.h"2021#define BUCKETS 622#define INTERVAL_SHIFT 323#define INTERVALS (1UL << INTERVAL_SHIFT)24#define RESOLUTION 102425#define DECAY 826#define MAX_INTERESTING (50000 * NSEC_PER_USEC)2728/*29* Concepts and ideas behind the menu governor30*31* For the menu governor, there are 2 decision factors for picking a C32* state:33* 1) Energy break even point34* 2) Latency tolerance (from pmqos infrastructure)35* These two factors are treated independently.36*37* Energy break even point38* -----------------------39* C state entry and exit have an energy cost, and a certain amount of time in40* the C state is required to actually break even on this cost. CPUIDLE41* provides us this duration in the "target_residency" field. So all that we42* need is a good prediction of how long we'll be idle. Like the traditional43* menu governor, we take the actual known "next timer event" time.44*45* Since there are other source of wakeups (interrupts for example) than46* the next timer event, this estimation is rather optimistic. To get a47* more realistic estimate, a correction factor is applied to the estimate,48* that is based on historic behavior. For example, if in the past the actual49* duration always was 50% of the next timer tick, the correction factor will50* be 0.5.51*52* menu uses a running average for this correction factor, but it uses a set of53* factors, not just a single factor. This stems from the realization that the54* ratio is dependent on the order of magnitude of the expected duration; if we55* expect 500 milliseconds of idle time the likelihood of getting an interrupt56* very early is much higher than if we expect 50 micro seconds of idle time.57* For this reason, menu keeps an array of 6 independent factors, that gets58* indexed based on the magnitude of the expected duration.59*60* Repeatable-interval-detector61* ----------------------------62* There are some cases where "next timer" is a completely unusable predictor:63* Those cases where the interval is fixed, for example due to hardware64* interrupt mitigation, but also due to fixed transfer rate devices like mice.65* For this, we use a different predictor: We track the duration of the last 866* intervals and use them to estimate the duration of the next one.67*/6869struct menu_device {70int needs_update;71int tick_wakeup;7273u64 next_timer_ns;74unsigned int bucket;75unsigned int correction_factor[BUCKETS];76unsigned int intervals[INTERVALS];77int interval_ptr;78};7980static inline int which_bucket(u64 duration_ns)81{82int bucket = 0;8384if (duration_ns < 10ULL * NSEC_PER_USEC)85return bucket;86if (duration_ns < 100ULL * NSEC_PER_USEC)87return bucket + 1;88if (duration_ns < 1000ULL * NSEC_PER_USEC)89return bucket + 2;90if (duration_ns < 10000ULL * NSEC_PER_USEC)91return bucket + 3;92if (duration_ns < 100000ULL * NSEC_PER_USEC)93return bucket + 4;94return bucket + 5;95}9697static DEFINE_PER_CPU(struct menu_device, menu_devices);9899static void menu_update_intervals(struct menu_device *data, unsigned int interval_us)100{101/* Update the repeating-pattern data. */102data->intervals[data->interval_ptr++] = interval_us;103if (data->interval_ptr >= INTERVALS)104data->interval_ptr = 0;105}106107static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);108109/*110* Try detecting repeating patterns by keeping track of the last 8111* intervals, and checking if the standard deviation of that set112* of points is below a threshold. If it is... then use the113* average of these 8 points as the estimated value.114*/115static unsigned int get_typical_interval(struct menu_device *data)116{117s64 value, min_thresh = -1, max_thresh = UINT_MAX;118unsigned int max, min, divisor;119u64 avg, variance, avg_sq;120int i;121122again:123/* Compute the average and variance of past intervals. */124max = 0;125min = UINT_MAX;126avg = 0;127variance = 0;128divisor = 0;129for (i = 0; i < INTERVALS; i++) {130value = data->intervals[i];131/*132* Discard the samples outside the interval between the min and133* max thresholds.134*/135if (value <= min_thresh || value >= max_thresh)136continue;137138divisor++;139140avg += value;141variance += value * value;142143if (value > max)144max = value;145146if (value < min)147min = value;148}149150if (!max)151return UINT_MAX;152153if (divisor == INTERVALS) {154avg >>= INTERVAL_SHIFT;155variance >>= INTERVAL_SHIFT;156} else {157do_div(avg, divisor);158do_div(variance, divisor);159}160161avg_sq = avg * avg;162variance -= avg_sq;163164/*165* The typical interval is obtained when standard deviation is166* small (stddev <= 20 us, variance <= 400 us^2) or standard167* deviation is small compared to the average interval (avg >168* 6*stddev, avg^2 > 36*variance). The average is smaller than169* UINT_MAX aka U32_MAX, so computing its square does not170* overflow a u64. We simply reject this candidate average if171* the standard deviation is greater than 715 s (which is172* rather unlikely).173*174* Use this result only if there is no timer to wake us up sooner.175*/176if (likely(variance <= U64_MAX/36)) {177if ((avg_sq > variance * 36 && divisor * 4 >= INTERVALS * 3) ||178variance <= 400)179return avg;180}181182/*183* If there are outliers, discard them by setting thresholds to exclude184* data points at a large enough distance from the average, then185* calculate the average and standard deviation again. Once we get186* down to the last 3/4 of our samples, stop excluding samples.187*188* This can deal with workloads that have long pauses interspersed189* with sporadic activity with a bunch of short pauses.190*/191if (divisor * 4 <= INTERVALS * 3) {192/*193* If there are sufficiently many data points still under194* consideration after the outliers have been eliminated,195* returning without a prediction would be a mistake because it196* is likely that the next interval will not exceed the current197* maximum, so return the latter in that case.198*/199if (divisor >= INTERVALS / 2)200return max;201202return UINT_MAX;203}204205/* Update the thresholds for the next round. */206if (avg - min > max - avg)207min_thresh = min;208else209max_thresh = max;210211goto again;212}213214/**215* menu_select - selects the next idle state to enter216* @drv: cpuidle driver containing state data217* @dev: the CPU218* @stop_tick: indication on whether or not to stop the tick219*/220static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,221bool *stop_tick)222{223struct menu_device *data = this_cpu_ptr(&menu_devices);224s64 latency_req = cpuidle_governor_latency_req(dev->cpu);225u64 predicted_ns;226ktime_t delta, delta_tick;227int i, idx;228229if (data->needs_update) {230menu_update(drv, dev);231data->needs_update = 0;232} else if (!dev->last_residency_ns) {233/*234* This happens when the driver rejects the previously selected235* idle state and returns an error, so update the recent236* intervals table to prevent invalid information from being237* used going forward.238*/239menu_update_intervals(data, UINT_MAX);240}241242/* Find the shortest expected idle interval. */243predicted_ns = get_typical_interval(data) * NSEC_PER_USEC;244if (predicted_ns > RESIDENCY_THRESHOLD_NS) {245unsigned int timer_us;246247/* Determine the time till the closest timer. */248delta = tick_nohz_get_sleep_length(&delta_tick);249if (unlikely(delta < 0)) {250delta = 0;251delta_tick = 0;252}253254data->next_timer_ns = delta;255data->bucket = which_bucket(data->next_timer_ns);256257/* Round up the result for half microseconds. */258timer_us = div_u64((RESOLUTION * DECAY * NSEC_PER_USEC) / 2 +259data->next_timer_ns *260data->correction_factor[data->bucket],261RESOLUTION * DECAY * NSEC_PER_USEC);262/* Use the lowest expected idle interval to pick the idle state. */263predicted_ns = min((u64)timer_us * NSEC_PER_USEC, predicted_ns);264} else {265/*266* Because the next timer event is not going to be determined267* in this case, assume that without the tick the closest timer268* will be in distant future and that the closest tick will occur269* after 1/2 of the tick period.270*/271data->next_timer_ns = KTIME_MAX;272delta_tick = TICK_NSEC / 2;273data->bucket = BUCKETS - 1;274}275276if (unlikely(drv->state_count <= 1 || latency_req == 0) ||277((data->next_timer_ns < drv->states[1].target_residency_ns ||278latency_req < drv->states[1].exit_latency_ns) &&279!dev->states_usage[0].disable)) {280/*281* In this case state[0] will be used no matter what, so return282* it right away and keep the tick running if state[0] is a283* polling one.284*/285*stop_tick = !(drv->states[0].flags & CPUIDLE_FLAG_POLLING);286return 0;287}288289/*290* If the tick is already stopped, the cost of possible short idle291* duration misprediction is much higher, because the CPU may be stuck292* in a shallow idle state for a long time as a result of it. In that293* case, say we might mispredict and use the known time till the closest294* timer event for the idle state selection.295*/296if (tick_nohz_tick_stopped() && predicted_ns < TICK_NSEC)297predicted_ns = data->next_timer_ns;298299/*300* Find the idle state with the lowest power while satisfying301* our constraints.302*/303idx = -1;304for (i = 0; i < drv->state_count; i++) {305struct cpuidle_state *s = &drv->states[i];306307if (dev->states_usage[i].disable)308continue;309310if (idx == -1)311idx = i; /* first enabled state */312313if (s->exit_latency_ns > latency_req)314break;315316if (s->target_residency_ns > predicted_ns) {317/*318* Use a physical idle state, not busy polling, unless319* a timer is going to trigger soon enough.320*/321if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&322s->target_residency_ns <= data->next_timer_ns) {323predicted_ns = s->target_residency_ns;324idx = i;325break;326}327if (predicted_ns < TICK_NSEC)328break;329330if (!tick_nohz_tick_stopped()) {331/*332* If the state selected so far is shallow,333* waking up early won't hurt, so retain the334* tick in that case and let the governor run335* again in the next iteration of the loop.336*/337predicted_ns = drv->states[idx].target_residency_ns;338break;339}340341/*342* If the state selected so far is shallow and this343* state's target residency matches the time till the344* closest timer event, select this one to avoid getting345* stuck in the shallow one for too long.346*/347if (drv->states[idx].target_residency_ns < TICK_NSEC &&348s->target_residency_ns <= delta_tick)349idx = i;350351return idx;352}353354idx = i;355}356357if (idx == -1)358idx = 0; /* No states enabled. Must use 0. */359360/*361* Don't stop the tick if the selected state is a polling one or if the362* expected idle duration is shorter than the tick period length.363*/364if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||365predicted_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) {366*stop_tick = false;367368if (idx > 0 && drv->states[idx].target_residency_ns > delta_tick) {369/*370* The tick is not going to be stopped and the target371* residency of the state to be returned is not within372* the time until the next timer event including the373* tick, so try to correct that.374*/375for (i = idx - 1; i >= 0; i--) {376if (dev->states_usage[i].disable)377continue;378379idx = i;380if (drv->states[i].target_residency_ns <= delta_tick)381break;382}383}384}385386return idx;387}388389/**390* menu_reflect - records that data structures need update391* @dev: the CPU392* @index: the index of actual entered state393*394* NOTE: it's important to be fast here because this operation will add to395* the overall exit latency.396*/397static void menu_reflect(struct cpuidle_device *dev, int index)398{399struct menu_device *data = this_cpu_ptr(&menu_devices);400401dev->last_state_idx = index;402data->needs_update = 1;403data->tick_wakeup = tick_nohz_idle_got_tick();404}405406/**407* menu_update - attempts to guess what happened after entry408* @drv: cpuidle driver containing state data409* @dev: the CPU410*/411static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)412{413struct menu_device *data = this_cpu_ptr(&menu_devices);414int last_idx = dev->last_state_idx;415struct cpuidle_state *target = &drv->states[last_idx];416u64 measured_ns;417unsigned int new_factor;418419/*420* Try to figure out how much time passed between entry to low421* power state and occurrence of the wakeup event.422*423* If the entered idle state didn't support residency measurements,424* we use them anyway if they are short, and if long,425* truncate to the whole expected time.426*427* Any measured amount of time will include the exit latency.428* Since we are interested in when the wakeup begun, not when it429* was completed, we must subtract the exit latency. However, if430* the measured amount of time is less than the exit latency,431* assume the state was never reached and the exit latency is 0.432*/433434if (data->tick_wakeup && data->next_timer_ns > TICK_NSEC) {435/*436* The nohz code said that there wouldn't be any events within437* the tick boundary (if the tick was stopped), but the idle438* duration predictor had a differing opinion. Since the CPU439* was woken up by a tick (that wasn't stopped after all), the440* predictor was not quite right, so assume that the CPU could441* have been idle long (but not forever) to help the idle442* duration predictor do a better job next time.443*/444measured_ns = 9 * MAX_INTERESTING / 10;445} else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&446dev->poll_time_limit) {447/*448* The CPU exited the "polling" state due to a time limit, so449* the idle duration prediction leading to the selection of that450* state was inaccurate. If a better prediction had been made,451* the CPU might have been woken up from idle by the next timer.452* Assume that to be the case.453*/454measured_ns = data->next_timer_ns;455} else {456/* measured value */457measured_ns = dev->last_residency_ns;458459/* Deduct exit latency */460if (measured_ns > 2 * target->exit_latency_ns)461measured_ns -= target->exit_latency_ns;462else463measured_ns /= 2;464}465466/* Make sure our coefficients do not exceed unity */467if (measured_ns > data->next_timer_ns)468measured_ns = data->next_timer_ns;469470/* Update our correction ratio */471new_factor = data->correction_factor[data->bucket];472new_factor -= new_factor / DECAY;473474if (data->next_timer_ns > 0 && measured_ns < MAX_INTERESTING)475new_factor += div64_u64(RESOLUTION * measured_ns,476data->next_timer_ns);477else478/*479* we were idle so long that we count it as a perfect480* prediction481*/482new_factor += RESOLUTION;483484/*485* We don't want 0 as factor; we always want at least486* a tiny bit of estimated time. Fortunately, due to rounding,487* new_factor will stay nonzero regardless of measured_us values488* and the compiler can eliminate this test as long as DECAY > 1.489*/490if (DECAY == 1 && unlikely(new_factor == 0))491new_factor = 1;492493data->correction_factor[data->bucket] = new_factor;494495menu_update_intervals(data, ktime_to_us(measured_ns));496}497498/**499* menu_enable_device - scans a CPU's states and does setup500* @drv: cpuidle driver501* @dev: the CPU502*/503static int menu_enable_device(struct cpuidle_driver *drv,504struct cpuidle_device *dev)505{506struct menu_device *data = &per_cpu(menu_devices, dev->cpu);507int i;508509memset(data, 0, sizeof(struct menu_device));510511/*512* if the correction factor is 0 (eg first time init or cpu hotplug513* etc), we actually want to start out with a unity factor.514*/515for(i = 0; i < BUCKETS; i++)516data->correction_factor[i] = RESOLUTION * DECAY;517518return 0;519}520521static struct cpuidle_governor menu_governor = {522.name = "menu",523.rating = 20,524.enable = menu_enable_device,525.select = menu_select,526.reflect = menu_reflect,527};528529/**530* init_menu - initializes the governor531*/532static int __init init_menu(void)533{534return cpuidle_register_governor(&menu_governor);535}536537postcore_initcall(init_menu);538539540