Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/x86/power/cpu.c
10817 views
1
/*
2
* Suspend support specific for i386/x86-64.
3
*
4
* Distribute under GPLv2
5
*
6
* Copyright (c) 2007 Rafael J. Wysocki <[email protected]>
7
* Copyright (c) 2002 Pavel Machek <[email protected]>
8
* Copyright (c) 2001 Patrick Mochel <[email protected]>
9
*/
10
11
#include <linux/suspend.h>
12
#include <linux/smp.h>
13
14
#include <asm/pgtable.h>
15
#include <asm/proto.h>
16
#include <asm/mtrr.h>
17
#include <asm/page.h>
18
#include <asm/mce.h>
19
#include <asm/xcr.h>
20
#include <asm/suspend.h>
21
#include <asm/debugreg.h>
22
23
#ifdef CONFIG_X86_32
24
static struct saved_context saved_context;
25
26
unsigned long saved_context_ebx;
27
unsigned long saved_context_esp, saved_context_ebp;
28
unsigned long saved_context_esi, saved_context_edi;
29
unsigned long saved_context_eflags;
30
#else
31
/* CONFIG_X86_64 */
32
struct saved_context saved_context;
33
#endif
34
35
/**
36
* __save_processor_state - save CPU registers before creating a
37
* hibernation image and before restoring the memory state from it
38
* @ctxt - structure to store the registers contents in
39
*
40
* NOTE: If there is a CPU register the modification of which by the
41
* boot kernel (ie. the kernel used for loading the hibernation image)
42
* might affect the operations of the restored target kernel (ie. the one
43
* saved in the hibernation image), then its contents must be saved by this
44
* function. In other words, if kernel A is hibernated and different
45
* kernel B is used for loading the hibernation image into memory, the
46
* kernel A's __save_processor_state() function must save all registers
47
* needed by kernel A, so that it can operate correctly after the resume
48
* regardless of what kernel B does in the meantime.
49
*/
50
static void __save_processor_state(struct saved_context *ctxt)
51
{
52
#ifdef CONFIG_X86_32
53
mtrr_save_fixed_ranges(NULL);
54
#endif
55
kernel_fpu_begin();
56
57
/*
58
* descriptor tables
59
*/
60
#ifdef CONFIG_X86_32
61
store_gdt(&ctxt->gdt);
62
store_idt(&ctxt->idt);
63
#else
64
/* CONFIG_X86_64 */
65
store_gdt((struct desc_ptr *)&ctxt->gdt_limit);
66
store_idt((struct desc_ptr *)&ctxt->idt_limit);
67
#endif
68
store_tr(ctxt->tr);
69
70
/* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
71
/*
72
* segment registers
73
*/
74
#ifdef CONFIG_X86_32
75
savesegment(es, ctxt->es);
76
savesegment(fs, ctxt->fs);
77
savesegment(gs, ctxt->gs);
78
savesegment(ss, ctxt->ss);
79
#else
80
/* CONFIG_X86_64 */
81
asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
82
asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
83
asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
84
asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
85
asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
86
87
rdmsrl(MSR_FS_BASE, ctxt->fs_base);
88
rdmsrl(MSR_GS_BASE, ctxt->gs_base);
89
rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
90
mtrr_save_fixed_ranges(NULL);
91
92
rdmsrl(MSR_EFER, ctxt->efer);
93
#endif
94
95
/*
96
* control registers
97
*/
98
ctxt->cr0 = read_cr0();
99
ctxt->cr2 = read_cr2();
100
ctxt->cr3 = read_cr3();
101
#ifdef CONFIG_X86_32
102
ctxt->cr4 = read_cr4_safe();
103
#else
104
/* CONFIG_X86_64 */
105
ctxt->cr4 = read_cr4();
106
ctxt->cr8 = read_cr8();
107
#endif
108
ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
109
&ctxt->misc_enable);
110
}
111
112
/* Needed by apm.c */
113
void save_processor_state(void)
114
{
115
__save_processor_state(&saved_context);
116
save_sched_clock_state();
117
}
118
#ifdef CONFIG_X86_32
119
EXPORT_SYMBOL(save_processor_state);
120
#endif
121
122
static void do_fpu_end(void)
123
{
124
/*
125
* Restore FPU regs if necessary.
126
*/
127
kernel_fpu_end();
128
}
129
130
static void fix_processor_context(void)
131
{
132
int cpu = smp_processor_id();
133
struct tss_struct *t = &per_cpu(init_tss, cpu);
134
135
set_tss_desc(cpu, t); /*
136
* This just modifies memory; should not be
137
* necessary. But... This is necessary, because
138
* 386 hardware has concept of busy TSS or some
139
* similar stupidity.
140
*/
141
142
#ifdef CONFIG_X86_64
143
get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
144
145
syscall_init(); /* This sets MSR_*STAR and related */
146
#endif
147
load_TR_desc(); /* This does ltr */
148
load_LDT(&current->active_mm->context); /* This does lldt */
149
}
150
151
/**
152
* __restore_processor_state - restore the contents of CPU registers saved
153
* by __save_processor_state()
154
* @ctxt - structure to load the registers contents from
155
*/
156
static void __restore_processor_state(struct saved_context *ctxt)
157
{
158
if (ctxt->misc_enable_saved)
159
wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
160
/*
161
* control registers
162
*/
163
/* cr4 was introduced in the Pentium CPU */
164
#ifdef CONFIG_X86_32
165
if (ctxt->cr4)
166
write_cr4(ctxt->cr4);
167
#else
168
/* CONFIG X86_64 */
169
wrmsrl(MSR_EFER, ctxt->efer);
170
write_cr8(ctxt->cr8);
171
write_cr4(ctxt->cr4);
172
#endif
173
write_cr3(ctxt->cr3);
174
write_cr2(ctxt->cr2);
175
write_cr0(ctxt->cr0);
176
177
/*
178
* now restore the descriptor tables to their proper values
179
* ltr is done i fix_processor_context().
180
*/
181
#ifdef CONFIG_X86_32
182
load_gdt(&ctxt->gdt);
183
load_idt(&ctxt->idt);
184
#else
185
/* CONFIG_X86_64 */
186
load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
187
load_idt((const struct desc_ptr *)&ctxt->idt_limit);
188
#endif
189
190
/*
191
* segment registers
192
*/
193
#ifdef CONFIG_X86_32
194
loadsegment(es, ctxt->es);
195
loadsegment(fs, ctxt->fs);
196
loadsegment(gs, ctxt->gs);
197
loadsegment(ss, ctxt->ss);
198
199
/*
200
* sysenter MSRs
201
*/
202
if (boot_cpu_has(X86_FEATURE_SEP))
203
enable_sep_cpu();
204
#else
205
/* CONFIG_X86_64 */
206
asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
207
asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
208
asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
209
load_gs_index(ctxt->gs);
210
asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
211
212
wrmsrl(MSR_FS_BASE, ctxt->fs_base);
213
wrmsrl(MSR_GS_BASE, ctxt->gs_base);
214
wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
215
#endif
216
217
/*
218
* restore XCR0 for xsave capable cpu's.
219
*/
220
if (cpu_has_xsave)
221
xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
222
223
fix_processor_context();
224
225
do_fpu_end();
226
mtrr_bp_restore();
227
}
228
229
/* Needed by apm.c */
230
void restore_processor_state(void)
231
{
232
__restore_processor_state(&saved_context);
233
restore_sched_clock_state();
234
}
235
#ifdef CONFIG_X86_32
236
EXPORT_SYMBOL(restore_processor_state);
237
#endif
238
239