// SPDX-License-Identifier: GPL-2.0-only1/*2* menu.c - the menu idle governor3*4* Copyright (C) 2006-2007 Adam Belay <[email protected]>5* Copyright (C) 2009 Intel Corporation6* Author:7* Arjan van de Ven <[email protected]>8*/910#include <linux/kernel.h>11#include <linux/cpuidle.h>12#include <linux/time.h>13#include <linux/ktime.h>14#include <linux/hrtimer.h>15#include <linux/tick.h>16#include <linux/sched/stat.h>17#include <linux/math64.h>1819#include "gov.h"2021#define BUCKETS 622#define INTERVAL_SHIFT 323#define INTERVALS (1UL << INTERVAL_SHIFT)24#define RESOLUTION 102425#define DECAY 826#define MAX_INTERESTING (50000 * NSEC_PER_USEC)2728/*29* Concepts and ideas behind the menu governor30*31* For the menu governor, there are 2 decision factors for picking a C32* state:33* 1) Energy break even point34* 2) Latency tolerance (from pmqos infrastructure)35* These two factors are treated independently.36*37* Energy break even point38* -----------------------39* C state entry and exit have an energy cost, and a certain amount of time in40* the C state is required to actually break even on this cost. CPUIDLE41* provides us this duration in the "target_residency" field. So all that we42* need is a good prediction of how long we'll be idle. Like the traditional43* menu governor, we take the actual known "next timer event" time.44*45* Since there are other source of wakeups (interrupts for example) than46* the next timer event, this estimation is rather optimistic. To get a47* more realistic estimate, a correction factor is applied to the estimate,48* that is based on historic behavior. For example, if in the past the actual49* duration always was 50% of the next timer tick, the correction factor will50* be 0.5.51*52* menu uses a running average for this correction factor, but it uses a set of53* factors, not just a single factor. This stems from the realization that the54* ratio is dependent on the order of magnitude of the expected duration; if we55* expect 500 milliseconds of idle time the likelihood of getting an interrupt56* very early is much higher than if we expect 50 micro seconds of idle time.57* For this reason, menu keeps an array of 6 independent factors, that gets58* indexed based on the magnitude of the expected duration.59*60* Repeatable-interval-detector61* ----------------------------62* There are some cases where "next timer" is a completely unusable predictor:63* Those cases where the interval is fixed, for example due to hardware64* interrupt mitigation, but also due to fixed transfer rate devices like mice.65* For this, we use a different predictor: We track the duration of the last 866* intervals and use them to estimate the duration of the next one.67*/6869struct menu_device {70int needs_update;71int tick_wakeup;7273u64 next_timer_ns;74unsigned int bucket;75unsigned int correction_factor[BUCKETS];76unsigned int intervals[INTERVALS];77int interval_ptr;78};7980static inline int which_bucket(u64 duration_ns)81{82int bucket = 0;8384if (duration_ns < 10ULL * NSEC_PER_USEC)85return bucket;86if (duration_ns < 100ULL * NSEC_PER_USEC)87return bucket + 1;88if (duration_ns < 1000ULL * NSEC_PER_USEC)89return bucket + 2;90if (duration_ns < 10000ULL * NSEC_PER_USEC)91return bucket + 3;92if (duration_ns < 100000ULL * NSEC_PER_USEC)93return bucket + 4;94return bucket + 5;95}9697static DEFINE_PER_CPU(struct menu_device, menu_devices);9899static void menu_update_intervals(struct menu_device *data, unsigned int interval_us)100{101/* Update the repeating-pattern data. */102data->intervals[data->interval_ptr++] = interval_us;103if (data->interval_ptr >= INTERVALS)104data->interval_ptr = 0;105}106107static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);108109/*110* Try detecting repeating patterns by keeping track of the last 8111* intervals, and checking if the standard deviation of that set112* of points is below a threshold. If it is... then use the113* average of these 8 points as the estimated value.114*/115static unsigned int get_typical_interval(struct menu_device *data)116{117s64 value, min_thresh = -1, max_thresh = UINT_MAX;118unsigned int max, min, divisor;119u64 avg, variance, avg_sq;120int i;121122again:123/* Compute the average and variance of past intervals. */124max = 0;125min = UINT_MAX;126avg = 0;127variance = 0;128divisor = 0;129for (i = 0; i < INTERVALS; i++) {130value = data->intervals[i];131/*132* Discard the samples outside the interval between the min and133* max thresholds.134*/135if (value <= min_thresh || value >= max_thresh)136continue;137138divisor++;139140avg += value;141variance += value * value;142143if (value > max)144max = value;145146if (value < min)147min = value;148}149150if (!max)151return UINT_MAX;152153if (divisor == INTERVALS) {154avg >>= INTERVAL_SHIFT;155variance >>= INTERVAL_SHIFT;156} else {157do_div(avg, divisor);158do_div(variance, divisor);159}160161avg_sq = avg * avg;162variance -= avg_sq;163164/*165* The typical interval is obtained when standard deviation is166* small (stddev <= 20 us, variance <= 400 us^2) or standard167* deviation is small compared to the average interval (avg >168* 6*stddev, avg^2 > 36*variance). The average is smaller than169* UINT_MAX aka U32_MAX, so computing its square does not170* overflow a u64. We simply reject this candidate average if171* the standard deviation is greater than 715 s (which is172* rather unlikely).173*174* Use this result only if there is no timer to wake us up sooner.175*/176if (likely(variance <= U64_MAX/36)) {177if ((avg_sq > variance * 36 && divisor * 4 >= INTERVALS * 3) ||178variance <= 400)179return avg;180}181182/*183* If there are outliers, discard them by setting thresholds to exclude184* data points at a large enough distance from the average, then185* calculate the average and standard deviation again. Once we get186* down to the last 3/4 of our samples, stop excluding samples.187*188* This can deal with workloads that have long pauses interspersed189* with sporadic activity with a bunch of short pauses.190*191* However, if the number of remaining samples is too small to exclude192* any more outliers, allow the deepest available idle state to be193* selected because there are systems where the time spent by CPUs in194* deep idle states is correlated to the maximum frequency the CPUs195* can get to. On those systems, shallow idle states should be avoided196* unless there is a clear indication that the given CPU is most likley197* going to be woken up shortly.198*/199if (divisor * 4 <= INTERVALS * 3)200return UINT_MAX;201202/* Update the thresholds for the next round. */203if (avg - min > max - avg)204min_thresh = min;205else206max_thresh = max;207208goto again;209}210211/**212* menu_select - selects the next idle state to enter213* @drv: cpuidle driver containing state data214* @dev: the CPU215* @stop_tick: indication on whether or not to stop the tick216*/217static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,218bool *stop_tick)219{220struct menu_device *data = this_cpu_ptr(&menu_devices);221s64 latency_req = cpuidle_governor_latency_req(dev->cpu);222u64 predicted_ns;223ktime_t delta, delta_tick;224int i, idx;225226if (data->needs_update) {227menu_update(drv, dev);228data->needs_update = 0;229} else if (!dev->last_residency_ns) {230/*231* This happens when the driver rejects the previously selected232* idle state and returns an error, so update the recent233* intervals table to prevent invalid information from being234* used going forward.235*/236menu_update_intervals(data, UINT_MAX);237}238239/* Find the shortest expected idle interval. */240predicted_ns = get_typical_interval(data) * NSEC_PER_USEC;241if (predicted_ns > RESIDENCY_THRESHOLD_NS || tick_nohz_tick_stopped()) {242unsigned int timer_us;243244/* Determine the time till the closest timer. */245delta = tick_nohz_get_sleep_length(&delta_tick);246if (unlikely(delta < 0)) {247delta = 0;248delta_tick = 0;249}250251data->next_timer_ns = delta;252data->bucket = which_bucket(data->next_timer_ns);253254/* Round up the result for half microseconds. */255timer_us = div_u64((RESOLUTION * DECAY * NSEC_PER_USEC) / 2 +256data->next_timer_ns *257data->correction_factor[data->bucket],258RESOLUTION * DECAY * NSEC_PER_USEC);259/* Use the lowest expected idle interval to pick the idle state. */260predicted_ns = min((u64)timer_us * NSEC_PER_USEC, predicted_ns);261/*262* If the tick is already stopped, the cost of possible short263* idle duration misprediction is much higher, because the CPU264* may be stuck in a shallow idle state for a long time as a265* result of it. In that case, say we might mispredict and use266* the known time till the closest timer event for the idle267* state selection.268*/269if (tick_nohz_tick_stopped() && predicted_ns < TICK_NSEC)270predicted_ns = data->next_timer_ns;271} else {272/*273* Because the next timer event is not going to be determined274* in this case, assume that without the tick the closest timer275* will be in distant future and that the closest tick will occur276* after 1/2 of the tick period.277*/278data->next_timer_ns = KTIME_MAX;279delta_tick = TICK_NSEC / 2;280data->bucket = BUCKETS - 1;281}282283if (drv->state_count <= 1 || latency_req == 0 ||284((data->next_timer_ns < drv->states[1].target_residency_ns ||285latency_req < drv->states[1].exit_latency_ns) &&286!dev->states_usage[0].disable)) {287/*288* In this case state[0] will be used no matter what, so return289* it right away and keep the tick running if state[0] is a290* polling one.291*/292*stop_tick = !(drv->states[0].flags & CPUIDLE_FLAG_POLLING);293return 0;294}295296/*297* Find the idle state with the lowest power while satisfying298* our constraints.299*/300idx = -1;301for (i = 0; i < drv->state_count; i++) {302struct cpuidle_state *s = &drv->states[i];303304if (dev->states_usage[i].disable)305continue;306307if (idx == -1)308idx = i; /* first enabled state */309310if (s->exit_latency_ns > latency_req)311break;312313if (s->target_residency_ns <= predicted_ns) {314idx = i;315continue;316}317318/*319* Use a physical idle state instead of busy polling so long as320* its target residency is below the residency threshold, its321* exit latency is not greater than the predicted idle duration,322* and the next timer doesn't expire soon.323*/324if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&325s->target_residency_ns < RESIDENCY_THRESHOLD_NS &&326s->target_residency_ns <= data->next_timer_ns &&327s->exit_latency_ns <= predicted_ns) {328predicted_ns = s->target_residency_ns;329idx = i;330break;331}332333if (predicted_ns < TICK_NSEC)334break;335336if (!tick_nohz_tick_stopped()) {337/*338* If the state selected so far is shallow, waking up339* early won't hurt, so retain the tick in that case and340* let the governor run again in the next iteration of341* the idle loop.342*/343predicted_ns = drv->states[idx].target_residency_ns;344break;345}346347/*348* If the state selected so far is shallow and this state's349* target residency matches the time till the closest timer350* event, select this one to avoid getting stuck in the shallow351* one for too long.352*/353if (drv->states[idx].target_residency_ns < TICK_NSEC &&354s->target_residency_ns <= delta_tick)355idx = i;356357return idx;358}359360if (idx == -1)361idx = 0; /* No states enabled. Must use 0. */362363/*364* Don't stop the tick if the selected state is a polling one or if the365* expected idle duration is shorter than the tick period length.366*/367if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||368predicted_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) {369*stop_tick = false;370371if (idx > 0 && drv->states[idx].target_residency_ns > delta_tick) {372/*373* The tick is not going to be stopped and the target374* residency of the state to be returned is not within375* the time until the next timer event including the376* tick, so try to correct that.377*/378for (i = idx - 1; i >= 0; i--) {379if (dev->states_usage[i].disable)380continue;381382idx = i;383if (drv->states[i].target_residency_ns <= delta_tick)384break;385}386}387}388389return idx;390}391392/**393* menu_reflect - records that data structures need update394* @dev: the CPU395* @index: the index of actual entered state396*397* NOTE: it's important to be fast here because this operation will add to398* the overall exit latency.399*/400static void menu_reflect(struct cpuidle_device *dev, int index)401{402struct menu_device *data = this_cpu_ptr(&menu_devices);403404dev->last_state_idx = index;405data->needs_update = 1;406data->tick_wakeup = tick_nohz_idle_got_tick();407}408409/**410* menu_update - attempts to guess what happened after entry411* @drv: cpuidle driver containing state data412* @dev: the CPU413*/414static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)415{416struct menu_device *data = this_cpu_ptr(&menu_devices);417int last_idx = dev->last_state_idx;418struct cpuidle_state *target = &drv->states[last_idx];419u64 measured_ns;420unsigned int new_factor;421422/*423* Try to figure out how much time passed between entry to low424* power state and occurrence of the wakeup event.425*426* If the entered idle state didn't support residency measurements,427* we use them anyway if they are short, and if long,428* truncate to the whole expected time.429*430* Any measured amount of time will include the exit latency.431* Since we are interested in when the wakeup begun, not when it432* was completed, we must subtract the exit latency. However, if433* the measured amount of time is less than the exit latency,434* assume the state was never reached and the exit latency is 0.435*/436437if (data->tick_wakeup && data->next_timer_ns > TICK_NSEC) {438/*439* The nohz code said that there wouldn't be any events within440* the tick boundary (if the tick was stopped), but the idle441* duration predictor had a differing opinion. Since the CPU442* was woken up by a tick (that wasn't stopped after all), the443* predictor was not quite right, so assume that the CPU could444* have been idle long (but not forever) to help the idle445* duration predictor do a better job next time.446*/447measured_ns = 9 * MAX_INTERESTING / 10;448} else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&449dev->poll_time_limit) {450/*451* The CPU exited the "polling" state due to a time limit, so452* the idle duration prediction leading to the selection of that453* state was inaccurate. If a better prediction had been made,454* the CPU might have been woken up from idle by the next timer.455* Assume that to be the case.456*/457measured_ns = data->next_timer_ns;458} else {459/* measured value */460measured_ns = dev->last_residency_ns;461462/* Deduct exit latency */463if (measured_ns > 2 * target->exit_latency_ns)464measured_ns -= target->exit_latency_ns;465else466measured_ns /= 2;467}468469/* Make sure our coefficients do not exceed unity */470if (measured_ns > data->next_timer_ns)471measured_ns = data->next_timer_ns;472473/* Update our correction ratio */474new_factor = data->correction_factor[data->bucket];475new_factor -= new_factor / DECAY;476477if (data->next_timer_ns > 0 && measured_ns < MAX_INTERESTING)478new_factor += div64_u64(RESOLUTION * measured_ns,479data->next_timer_ns);480else481/*482* we were idle so long that we count it as a perfect483* prediction484*/485new_factor += RESOLUTION;486487/*488* We don't want 0 as factor; we always want at least489* a tiny bit of estimated time. Fortunately, due to rounding,490* new_factor will stay nonzero regardless of measured_us values491* and the compiler can eliminate this test as long as DECAY > 1.492*/493if (DECAY == 1 && unlikely(new_factor == 0))494new_factor = 1;495496data->correction_factor[data->bucket] = new_factor;497498menu_update_intervals(data, ktime_to_us(measured_ns));499}500501/**502* menu_enable_device - scans a CPU's states and does setup503* @drv: cpuidle driver504* @dev: the CPU505*/506static int menu_enable_device(struct cpuidle_driver *drv,507struct cpuidle_device *dev)508{509struct menu_device *data = &per_cpu(menu_devices, dev->cpu);510int i;511512memset(data, 0, sizeof(struct menu_device));513514/*515* if the correction factor is 0 (eg first time init or cpu hotplug516* etc), we actually want to start out with a unity factor.517*/518for(i = 0; i < BUCKETS; i++)519data->correction_factor[i] = RESOLUTION * DECAY;520521return 0;522}523524static struct cpuidle_governor menu_governor = {525.name = "menu",526.rating = 20,527.enable = menu_enable_device,528.select = menu_select,529.reflect = menu_reflect,530};531532/**533* init_menu - initializes the governor534*/535static int __init init_menu(void)536{537return cpuidle_register_governor(&menu_governor);538}539540postcore_initcall(init_menu);541542543