Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/sh/kernel/idle.c
10817 views
1
/*
2
* The idle loop for all SuperH platforms.
3
*
4
* Copyright (C) 2002 - 2009 Paul Mundt
5
*
6
* This file is subject to the terms and conditions of the GNU General Public
7
* License. See the file "COPYING" in the main directory of this archive
8
* for more details.
9
*/
10
#include <linux/module.h>
11
#include <linux/init.h>
12
#include <linux/mm.h>
13
#include <linux/pm.h>
14
#include <linux/tick.h>
15
#include <linux/preempt.h>
16
#include <linux/thread_info.h>
17
#include <linux/irqflags.h>
18
#include <linux/smp.h>
19
#include <asm/pgalloc.h>
20
#include <asm/system.h>
21
#include <asm/atomic.h>
22
#include <asm/smp.h>
23
24
void (*pm_idle)(void) = NULL;
25
26
static int hlt_counter;
27
28
static int __init nohlt_setup(char *__unused)
29
{
30
hlt_counter = 1;
31
return 1;
32
}
33
__setup("nohlt", nohlt_setup);
34
35
static int __init hlt_setup(char *__unused)
36
{
37
hlt_counter = 0;
38
return 1;
39
}
40
__setup("hlt", hlt_setup);
41
42
static inline int hlt_works(void)
43
{
44
return !hlt_counter;
45
}
46
47
/*
48
* On SMP it's slightly faster (but much more power-consuming!)
49
* to poll the ->work.need_resched flag instead of waiting for the
50
* cross-CPU IPI to arrive. Use this option with caution.
51
*/
52
static void poll_idle(void)
53
{
54
local_irq_enable();
55
while (!need_resched())
56
cpu_relax();
57
}
58
59
void default_idle(void)
60
{
61
if (hlt_works()) {
62
clear_thread_flag(TIF_POLLING_NRFLAG);
63
smp_mb__after_clear_bit();
64
65
set_bl_bit();
66
if (!need_resched()) {
67
local_irq_enable();
68
cpu_sleep();
69
} else
70
local_irq_enable();
71
72
set_thread_flag(TIF_POLLING_NRFLAG);
73
clear_bl_bit();
74
} else
75
poll_idle();
76
}
77
78
/*
79
* The idle thread. There's no useful work to be done, so just try to conserve
80
* power and have a low exit latency (ie sit in a loop waiting for somebody to
81
* say that they'd like to reschedule)
82
*/
83
void cpu_idle(void)
84
{
85
unsigned int cpu = smp_processor_id();
86
87
set_thread_flag(TIF_POLLING_NRFLAG);
88
89
/* endless idle loop with no priority at all */
90
while (1) {
91
tick_nohz_stop_sched_tick(1);
92
93
while (!need_resched()) {
94
check_pgt_cache();
95
rmb();
96
97
if (cpu_is_offline(cpu))
98
play_dead();
99
100
local_irq_disable();
101
/* Don't trace irqs off for idle */
102
stop_critical_timings();
103
pm_idle();
104
/*
105
* Sanity check to ensure that pm_idle() returns
106
* with IRQs enabled
107
*/
108
WARN_ON(irqs_disabled());
109
start_critical_timings();
110
}
111
112
tick_nohz_restart_sched_tick();
113
preempt_enable_no_resched();
114
schedule();
115
preempt_disable();
116
}
117
}
118
119
void __init select_idle_routine(void)
120
{
121
/*
122
* If a platform has set its own idle routine, leave it alone.
123
*/
124
if (pm_idle)
125
return;
126
127
if (hlt_works())
128
pm_idle = default_idle;
129
else
130
pm_idle = poll_idle;
131
}
132
133
static void do_nothing(void *unused)
134
{
135
}
136
137
void stop_this_cpu(void *unused)
138
{
139
local_irq_disable();
140
set_cpu_online(smp_processor_id(), false);
141
142
for (;;)
143
cpu_sleep();
144
}
145
146
/*
147
* cpu_idle_wait - Used to ensure that all the CPUs discard old value of
148
* pm_idle and update to new pm_idle value. Required while changing pm_idle
149
* handler on SMP systems.
150
*
151
* Caller must have changed pm_idle to the new value before the call. Old
152
* pm_idle value will not be used by any CPU after the return of this function.
153
*/
154
void cpu_idle_wait(void)
155
{
156
smp_mb();
157
/* kick all the CPUs so that they exit out of pm_idle */
158
smp_call_function(do_nothing, NULL, 1);
159
}
160
EXPORT_SYMBOL_GPL(cpu_idle_wait);
161
162