Path: blob/master/drivers/cpufreq/cpufreq_ondemand.c
15112 views
/*1* drivers/cpufreq/cpufreq_ondemand.c2*3* Copyright (C) 2001 Russell King4* (C) 2003 Venkatesh Pallipadi <[email protected]>.5* Jun Nakajima <[email protected]>6*7* This program is free software; you can redistribute it and/or modify8* it under the terms of the GNU General Public License version 2 as9* published by the Free Software Foundation.10*/1112#include <linux/kernel.h>13#include <linux/module.h>14#include <linux/init.h>15#include <linux/cpufreq.h>16#include <linux/cpu.h>17#include <linux/jiffies.h>18#include <linux/kernel_stat.h>19#include <linux/mutex.h>20#include <linux/hrtimer.h>21#include <linux/tick.h>22#include <linux/ktime.h>23#include <linux/sched.h>2425/*26* dbs is used in this file as a shortform for demandbased switching27* It helps to keep variable names smaller, simpler28*/2930#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)31#define DEF_FREQUENCY_UP_THRESHOLD (80)32#define DEF_SAMPLING_DOWN_FACTOR (1)33#define MAX_SAMPLING_DOWN_FACTOR (100000)34#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)35#define MICRO_FREQUENCY_UP_THRESHOLD (95)36#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)37#define MIN_FREQUENCY_UP_THRESHOLD (11)38#define MAX_FREQUENCY_UP_THRESHOLD (100)3940/*41* The polling frequency of this governor depends on the capability of42* the processor. Default polling frequency is 1000 times the transition43* latency of the processor. The governor will work on any processor with44* transition latency <= 10mS, using appropriate sampling45* rate.46* For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)47* this governor will not work.48* All times here are in uS.49*/50#define MIN_SAMPLING_RATE_RATIO (2)5152static unsigned int min_sampling_rate;5354#define LATENCY_MULTIPLIER (1000)55#define MIN_LATENCY_MULTIPLIER (100)56#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)5758static void do_dbs_timer(struct work_struct *work);59static int cpufreq_governor_dbs(struct cpufreq_policy *policy,60unsigned int event);6162#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND63static64#endif65struct cpufreq_governor cpufreq_gov_ondemand = {66.name = "ondemand",67.governor = cpufreq_governor_dbs,68.max_transition_latency = TRANSITION_LATENCY_LIMIT,69.owner = THIS_MODULE,70};7172/* Sampling types */73enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};7475struct cpu_dbs_info_s {76cputime64_t prev_cpu_idle;77cputime64_t prev_cpu_iowait;78cputime64_t prev_cpu_wall;79cputime64_t prev_cpu_nice;80struct cpufreq_policy *cur_policy;81struct delayed_work work;82struct cpufreq_frequency_table *freq_table;83unsigned int freq_lo;84unsigned int freq_lo_jiffies;85unsigned int freq_hi_jiffies;86unsigned int rate_mult;87int cpu;88unsigned int sample_type:1;89/*90* percpu mutex that serializes governor limit change with91* do_dbs_timer invocation. We do not want do_dbs_timer to run92* when user is changing the governor or limits.93*/94struct mutex timer_mutex;95};96static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);9798static unsigned int dbs_enable; /* number of CPUs using this policy */99100/*101* dbs_mutex protects dbs_enable in governor start/stop.102*/103static DEFINE_MUTEX(dbs_mutex);104105static struct dbs_tuners {106unsigned int sampling_rate;107unsigned int up_threshold;108unsigned int down_differential;109unsigned int ignore_nice;110unsigned int sampling_down_factor;111unsigned int powersave_bias;112unsigned int io_is_busy;113} dbs_tuners_ins = {114.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,115.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,116.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,117.ignore_nice = 0,118.powersave_bias = 0,119};120121static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,122cputime64_t *wall)123{124cputime64_t idle_time;125cputime64_t cur_wall_time;126cputime64_t busy_time;127128cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());129busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,130kstat_cpu(cpu).cpustat.system);131132busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);133busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);134busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);135busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);136137idle_time = cputime64_sub(cur_wall_time, busy_time);138if (wall)139*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);140141return (cputime64_t)jiffies_to_usecs(idle_time);142}143144static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)145{146u64 idle_time = get_cpu_idle_time_us(cpu, wall);147148if (idle_time == -1ULL)149return get_cpu_idle_time_jiffy(cpu, wall);150151return idle_time;152}153154static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)155{156u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);157158if (iowait_time == -1ULL)159return 0;160161return iowait_time;162}163164/*165* Find right freq to be set now with powersave_bias on.166* Returns the freq_hi to be used right now and will set freq_hi_jiffies,167* freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.168*/169static unsigned int powersave_bias_target(struct cpufreq_policy *policy,170unsigned int freq_next,171unsigned int relation)172{173unsigned int freq_req, freq_reduc, freq_avg;174unsigned int freq_hi, freq_lo;175unsigned int index = 0;176unsigned int jiffies_total, jiffies_hi, jiffies_lo;177struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,178policy->cpu);179180if (!dbs_info->freq_table) {181dbs_info->freq_lo = 0;182dbs_info->freq_lo_jiffies = 0;183return freq_next;184}185186cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,187relation, &index);188freq_req = dbs_info->freq_table[index].frequency;189freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;190freq_avg = freq_req - freq_reduc;191192/* Find freq bounds for freq_avg in freq_table */193index = 0;194cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,195CPUFREQ_RELATION_H, &index);196freq_lo = dbs_info->freq_table[index].frequency;197index = 0;198cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,199CPUFREQ_RELATION_L, &index);200freq_hi = dbs_info->freq_table[index].frequency;201202/* Find out how long we have to be in hi and lo freqs */203if (freq_hi == freq_lo) {204dbs_info->freq_lo = 0;205dbs_info->freq_lo_jiffies = 0;206return freq_lo;207}208jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);209jiffies_hi = (freq_avg - freq_lo) * jiffies_total;210jiffies_hi += ((freq_hi - freq_lo) / 2);211jiffies_hi /= (freq_hi - freq_lo);212jiffies_lo = jiffies_total - jiffies_hi;213dbs_info->freq_lo = freq_lo;214dbs_info->freq_lo_jiffies = jiffies_lo;215dbs_info->freq_hi_jiffies = jiffies_hi;216return freq_hi;217}218219static void ondemand_powersave_bias_init_cpu(int cpu)220{221struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);222dbs_info->freq_table = cpufreq_frequency_get_table(cpu);223dbs_info->freq_lo = 0;224}225226static void ondemand_powersave_bias_init(void)227{228int i;229for_each_online_cpu(i) {230ondemand_powersave_bias_init_cpu(i);231}232}233234/************************** sysfs interface ************************/235236static ssize_t show_sampling_rate_min(struct kobject *kobj,237struct attribute *attr, char *buf)238{239return sprintf(buf, "%u\n", min_sampling_rate);240}241242define_one_global_ro(sampling_rate_min);243244/* cpufreq_ondemand Governor Tunables */245#define show_one(file_name, object) \246static ssize_t show_##file_name \247(struct kobject *kobj, struct attribute *attr, char *buf) \248{ \249return sprintf(buf, "%u\n", dbs_tuners_ins.object); \250}251show_one(sampling_rate, sampling_rate);252show_one(io_is_busy, io_is_busy);253show_one(up_threshold, up_threshold);254show_one(sampling_down_factor, sampling_down_factor);255show_one(ignore_nice_load, ignore_nice);256show_one(powersave_bias, powersave_bias);257258static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,259const char *buf, size_t count)260{261unsigned int input;262int ret;263ret = sscanf(buf, "%u", &input);264if (ret != 1)265return -EINVAL;266dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);267return count;268}269270static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,271const char *buf, size_t count)272{273unsigned int input;274int ret;275276ret = sscanf(buf, "%u", &input);277if (ret != 1)278return -EINVAL;279dbs_tuners_ins.io_is_busy = !!input;280return count;281}282283static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,284const char *buf, size_t count)285{286unsigned int input;287int ret;288ret = sscanf(buf, "%u", &input);289290if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||291input < MIN_FREQUENCY_UP_THRESHOLD) {292return -EINVAL;293}294dbs_tuners_ins.up_threshold = input;295return count;296}297298static ssize_t store_sampling_down_factor(struct kobject *a,299struct attribute *b, const char *buf, size_t count)300{301unsigned int input, j;302int ret;303ret = sscanf(buf, "%u", &input);304305if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)306return -EINVAL;307dbs_tuners_ins.sampling_down_factor = input;308309/* Reset down sampling multiplier in case it was active */310for_each_online_cpu(j) {311struct cpu_dbs_info_s *dbs_info;312dbs_info = &per_cpu(od_cpu_dbs_info, j);313dbs_info->rate_mult = 1;314}315return count;316}317318static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,319const char *buf, size_t count)320{321unsigned int input;322int ret;323324unsigned int j;325326ret = sscanf(buf, "%u", &input);327if (ret != 1)328return -EINVAL;329330if (input > 1)331input = 1;332333if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */334return count;335}336dbs_tuners_ins.ignore_nice = input;337338/* we need to re-evaluate prev_cpu_idle */339for_each_online_cpu(j) {340struct cpu_dbs_info_s *dbs_info;341dbs_info = &per_cpu(od_cpu_dbs_info, j);342dbs_info->prev_cpu_idle = get_cpu_idle_time(j,343&dbs_info->prev_cpu_wall);344if (dbs_tuners_ins.ignore_nice)345dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;346347}348return count;349}350351static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,352const char *buf, size_t count)353{354unsigned int input;355int ret;356ret = sscanf(buf, "%u", &input);357358if (ret != 1)359return -EINVAL;360361if (input > 1000)362input = 1000;363364dbs_tuners_ins.powersave_bias = input;365ondemand_powersave_bias_init();366return count;367}368369define_one_global_rw(sampling_rate);370define_one_global_rw(io_is_busy);371define_one_global_rw(up_threshold);372define_one_global_rw(sampling_down_factor);373define_one_global_rw(ignore_nice_load);374define_one_global_rw(powersave_bias);375376static struct attribute *dbs_attributes[] = {377&sampling_rate_min.attr,378&sampling_rate.attr,379&up_threshold.attr,380&sampling_down_factor.attr,381&ignore_nice_load.attr,382&powersave_bias.attr,383&io_is_busy.attr,384NULL385};386387static struct attribute_group dbs_attr_group = {388.attrs = dbs_attributes,389.name = "ondemand",390};391392/************************** sysfs end ************************/393394static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)395{396if (dbs_tuners_ins.powersave_bias)397freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);398else if (p->cur == p->max)399return;400401__cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ?402CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);403}404405static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)406{407unsigned int max_load_freq;408409struct cpufreq_policy *policy;410unsigned int j;411412this_dbs_info->freq_lo = 0;413policy = this_dbs_info->cur_policy;414415/*416* Every sampling_rate, we check, if current idle time is less417* than 20% (default), then we try to increase frequency418* Every sampling_rate, we look for a the lowest419* frequency which can sustain the load while keeping idle time over420* 30%. If such a frequency exist, we try to decrease to this frequency.421*422* Any frequency increase takes it to the maximum frequency.423* Frequency reduction happens at minimum steps of424* 5% (default) of current frequency425*/426427/* Get Absolute Load - in terms of freq */428max_load_freq = 0;429430for_each_cpu(j, policy->cpus) {431struct cpu_dbs_info_s *j_dbs_info;432cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;433unsigned int idle_time, wall_time, iowait_time;434unsigned int load, load_freq;435int freq_avg;436437j_dbs_info = &per_cpu(od_cpu_dbs_info, j);438439cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);440cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);441442wall_time = (unsigned int) cputime64_sub(cur_wall_time,443j_dbs_info->prev_cpu_wall);444j_dbs_info->prev_cpu_wall = cur_wall_time;445446idle_time = (unsigned int) cputime64_sub(cur_idle_time,447j_dbs_info->prev_cpu_idle);448j_dbs_info->prev_cpu_idle = cur_idle_time;449450iowait_time = (unsigned int) cputime64_sub(cur_iowait_time,451j_dbs_info->prev_cpu_iowait);452j_dbs_info->prev_cpu_iowait = cur_iowait_time;453454if (dbs_tuners_ins.ignore_nice) {455cputime64_t cur_nice;456unsigned long cur_nice_jiffies;457458cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,459j_dbs_info->prev_cpu_nice);460/*461* Assumption: nice time between sampling periods will462* be less than 2^32 jiffies for 32 bit sys463*/464cur_nice_jiffies = (unsigned long)465cputime64_to_jiffies64(cur_nice);466467j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;468idle_time += jiffies_to_usecs(cur_nice_jiffies);469}470471/*472* For the purpose of ondemand, waiting for disk IO is an473* indication that you're performance critical, and not that474* the system is actually idle. So subtract the iowait time475* from the cpu idle time.476*/477478if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)479idle_time -= iowait_time;480481if (unlikely(!wall_time || wall_time < idle_time))482continue;483484load = 100 * (wall_time - idle_time) / wall_time;485486freq_avg = __cpufreq_driver_getavg(policy, j);487if (freq_avg <= 0)488freq_avg = policy->cur;489490load_freq = load * freq_avg;491if (load_freq > max_load_freq)492max_load_freq = load_freq;493}494495/* Check for frequency increase */496if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {497/* If switching to max speed, apply sampling_down_factor */498if (policy->cur < policy->max)499this_dbs_info->rate_mult =500dbs_tuners_ins.sampling_down_factor;501dbs_freq_increase(policy, policy->max);502return;503}504505/* Check for frequency decrease */506/* if we cannot reduce the frequency anymore, break out early */507if (policy->cur == policy->min)508return;509510/*511* The optimal frequency is the frequency that is the lowest that512* can support the current CPU usage without triggering the up513* policy. To be safe, we focus 10 points under the threshold.514*/515if (max_load_freq <516(dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *517policy->cur) {518unsigned int freq_next;519freq_next = max_load_freq /520(dbs_tuners_ins.up_threshold -521dbs_tuners_ins.down_differential);522523/* No longer fully busy, reset rate_mult */524this_dbs_info->rate_mult = 1;525526if (freq_next < policy->min)527freq_next = policy->min;528529if (!dbs_tuners_ins.powersave_bias) {530__cpufreq_driver_target(policy, freq_next,531CPUFREQ_RELATION_L);532} else {533int freq = powersave_bias_target(policy, freq_next,534CPUFREQ_RELATION_L);535__cpufreq_driver_target(policy, freq,536CPUFREQ_RELATION_L);537}538}539}540541static void do_dbs_timer(struct work_struct *work)542{543struct cpu_dbs_info_s *dbs_info =544container_of(work, struct cpu_dbs_info_s, work.work);545unsigned int cpu = dbs_info->cpu;546int sample_type = dbs_info->sample_type;547548int delay;549550mutex_lock(&dbs_info->timer_mutex);551552/* Common NORMAL_SAMPLE setup */553dbs_info->sample_type = DBS_NORMAL_SAMPLE;554if (!dbs_tuners_ins.powersave_bias ||555sample_type == DBS_NORMAL_SAMPLE) {556dbs_check_cpu(dbs_info);557if (dbs_info->freq_lo) {558/* Setup timer for SUB_SAMPLE */559dbs_info->sample_type = DBS_SUB_SAMPLE;560delay = dbs_info->freq_hi_jiffies;561} else {562/* We want all CPUs to do sampling nearly on563* same jiffy564*/565delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate566* dbs_info->rate_mult);567568if (num_online_cpus() > 1)569delay -= jiffies % delay;570}571} else {572__cpufreq_driver_target(dbs_info->cur_policy,573dbs_info->freq_lo, CPUFREQ_RELATION_H);574delay = dbs_info->freq_lo_jiffies;575}576schedule_delayed_work_on(cpu, &dbs_info->work, delay);577mutex_unlock(&dbs_info->timer_mutex);578}579580static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)581{582/* We want all CPUs to do sampling nearly on same jiffy */583int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);584585if (num_online_cpus() > 1)586delay -= jiffies % delay;587588dbs_info->sample_type = DBS_NORMAL_SAMPLE;589INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);590schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);591}592593static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)594{595cancel_delayed_work_sync(&dbs_info->work);596}597598/*599* Not all CPUs want IO time to be accounted as busy; this dependson how600* efficient idling at a higher frequency/voltage is.601* Pavel Machek says this is not so for various generations of AMD and old602* Intel systems.603* Mike Chan (androidlcom) calis this is also not true for ARM.604* Because of this, whitelist specific known (series) of CPUs by default, and605* leave all others up to the user.606*/607static int should_io_be_busy(void)608{609#if defined(CONFIG_X86)610/*611* For Intel, Core 2 (model 15) andl later have an efficient idle.612*/613if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&614boot_cpu_data.x86 == 6 &&615boot_cpu_data.x86_model >= 15)616return 1;617#endif618return 0;619}620621static int cpufreq_governor_dbs(struct cpufreq_policy *policy,622unsigned int event)623{624unsigned int cpu = policy->cpu;625struct cpu_dbs_info_s *this_dbs_info;626unsigned int j;627int rc;628629this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);630631switch (event) {632case CPUFREQ_GOV_START:633if ((!cpu_online(cpu)) || (!policy->cur))634return -EINVAL;635636mutex_lock(&dbs_mutex);637638dbs_enable++;639for_each_cpu(j, policy->cpus) {640struct cpu_dbs_info_s *j_dbs_info;641j_dbs_info = &per_cpu(od_cpu_dbs_info, j);642j_dbs_info->cur_policy = policy;643644j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,645&j_dbs_info->prev_cpu_wall);646if (dbs_tuners_ins.ignore_nice) {647j_dbs_info->prev_cpu_nice =648kstat_cpu(j).cpustat.nice;649}650}651this_dbs_info->cpu = cpu;652this_dbs_info->rate_mult = 1;653ondemand_powersave_bias_init_cpu(cpu);654/*655* Start the timerschedule work, when this governor656* is used for first time657*/658if (dbs_enable == 1) {659unsigned int latency;660661rc = sysfs_create_group(cpufreq_global_kobject,662&dbs_attr_group);663if (rc) {664mutex_unlock(&dbs_mutex);665return rc;666}667668/* policy latency is in nS. Convert it to uS first */669latency = policy->cpuinfo.transition_latency / 1000;670if (latency == 0)671latency = 1;672/* Bring kernel and HW constraints together */673min_sampling_rate = max(min_sampling_rate,674MIN_LATENCY_MULTIPLIER * latency);675dbs_tuners_ins.sampling_rate =676max(min_sampling_rate,677latency * LATENCY_MULTIPLIER);678dbs_tuners_ins.io_is_busy = should_io_be_busy();679}680mutex_unlock(&dbs_mutex);681682mutex_init(&this_dbs_info->timer_mutex);683dbs_timer_init(this_dbs_info);684break;685686case CPUFREQ_GOV_STOP:687dbs_timer_exit(this_dbs_info);688689mutex_lock(&dbs_mutex);690mutex_destroy(&this_dbs_info->timer_mutex);691dbs_enable--;692mutex_unlock(&dbs_mutex);693if (!dbs_enable)694sysfs_remove_group(cpufreq_global_kobject,695&dbs_attr_group);696697break;698699case CPUFREQ_GOV_LIMITS:700mutex_lock(&this_dbs_info->timer_mutex);701if (policy->max < this_dbs_info->cur_policy->cur)702__cpufreq_driver_target(this_dbs_info->cur_policy,703policy->max, CPUFREQ_RELATION_H);704else if (policy->min > this_dbs_info->cur_policy->cur)705__cpufreq_driver_target(this_dbs_info->cur_policy,706policy->min, CPUFREQ_RELATION_L);707mutex_unlock(&this_dbs_info->timer_mutex);708break;709}710return 0;711}712713static int __init cpufreq_gov_dbs_init(void)714{715cputime64_t wall;716u64 idle_time;717int cpu = get_cpu();718719idle_time = get_cpu_idle_time_us(cpu, &wall);720put_cpu();721if (idle_time != -1ULL) {722/* Idle micro accounting is supported. Use finer thresholds */723dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;724dbs_tuners_ins.down_differential =725MICRO_FREQUENCY_DOWN_DIFFERENTIAL;726/*727* In no_hz/micro accounting case we set the minimum frequency728* not depending on HZ, but fixed (very low). The deferred729* timer might skip some samples if idle/sleeping as needed.730*/731min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;732} else {733/* For correct statistics, we need 10 ticks for each measure */734min_sampling_rate =735MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);736}737738return cpufreq_register_governor(&cpufreq_gov_ondemand);739}740741static void __exit cpufreq_gov_dbs_exit(void)742{743cpufreq_unregister_governor(&cpufreq_gov_ondemand);744}745746747MODULE_AUTHOR("Venkatesh Pallipadi <[email protected]>");748MODULE_AUTHOR("Alexey Starikovskiy <[email protected]>");749MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "750"Low Latency Frequency Transition capable processors");751MODULE_LICENSE("GPL");752753#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND754fs_initcall(cpufreq_gov_dbs_init);755#else756module_init(cpufreq_gov_dbs_init);757#endif758module_exit(cpufreq_gov_dbs_exit);759760761