Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/xen/xen-asm.S
26424 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
* Asm versions of Xen pv-ops, suitable for direct use.
4
*
5
* We only bother with direct forms (ie, vcpu in percpu data) of the
6
* operations here; the indirect forms are better handled in C.
7
*/
8
9
#include <asm/errno.h>
10
#include <asm/asm-offsets.h>
11
#include <asm/percpu.h>
12
#include <asm/processor-flags.h>
13
#include <asm/segment.h>
14
#include <asm/thread_info.h>
15
#include <asm/asm.h>
16
#include <asm/frame.h>
17
#include <asm/unwind_hints.h>
18
19
#include <xen/interface/xen.h>
20
21
#include <linux/init.h>
22
#include <linux/linkage.h>
23
#include <linux/objtool.h>
24
#include <../entry/calling.h>
25
26
.pushsection .noinstr.text, "ax"
27
/*
28
* PV hypercall interface to the hypervisor.
29
*
30
* Called via inline asm(), so better preserve %rcx and %r11.
31
*
32
* Input:
33
* %eax: hypercall number
34
* %rdi, %rsi, %rdx, %r10, %r8: args 1..5 for the hypercall
35
* Output: %rax
36
*/
37
SYM_FUNC_START(xen_hypercall_pv)
38
ANNOTATE_NOENDBR
39
push %rcx
40
push %r11
41
UNWIND_HINT_SAVE
42
syscall
43
UNWIND_HINT_RESTORE
44
pop %r11
45
pop %rcx
46
RET
47
SYM_FUNC_END(xen_hypercall_pv)
48
49
/*
50
* Disabling events is simply a matter of making the event mask
51
* non-zero.
52
*/
53
SYM_FUNC_START(xen_irq_disable_direct)
54
ENDBR
55
movb $1, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
56
RET
57
SYM_FUNC_END(xen_irq_disable_direct)
58
59
/*
60
* Force an event check by making a hypercall, but preserve regs
61
* before making the call.
62
*/
63
SYM_FUNC_START(check_events)
64
FRAME_BEGIN
65
push %rax
66
push %rcx
67
push %rdx
68
push %rsi
69
push %rdi
70
push %r8
71
push %r9
72
push %r10
73
push %r11
74
call xen_force_evtchn_callback
75
pop %r11
76
pop %r10
77
pop %r9
78
pop %r8
79
pop %rdi
80
pop %rsi
81
pop %rdx
82
pop %rcx
83
pop %rax
84
FRAME_END
85
RET
86
SYM_FUNC_END(check_events)
87
88
/*
89
* Enable events. This clears the event mask and tests the pending
90
* event status with one and operation. If there are pending events,
91
* then enter the hypervisor to get them handled.
92
*/
93
SYM_FUNC_START(xen_irq_enable_direct)
94
ENDBR
95
FRAME_BEGIN
96
/* Unmask events */
97
movb $0, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
98
99
/*
100
* Preempt here doesn't matter because that will deal with any
101
* pending interrupts. The pending check may end up being run
102
* on the wrong CPU, but that doesn't hurt.
103
*/
104
105
/* Test for pending */
106
testb $0xff, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_pending)
107
jz 1f
108
109
call check_events
110
1:
111
FRAME_END
112
RET
113
SYM_FUNC_END(xen_irq_enable_direct)
114
115
/*
116
* (xen_)save_fl is used to get the current interrupt enable status.
117
* Callers expect the status to be in X86_EFLAGS_IF, and other bits
118
* may be set in the return value. We take advantage of this by
119
* making sure that X86_EFLAGS_IF has the right value (and other bits
120
* in that byte are 0), but other bits in the return value are
121
* undefined. We need to toggle the state of the bit, because Xen and
122
* x86 use opposite senses (mask vs enable).
123
*/
124
SYM_FUNC_START(xen_save_fl_direct)
125
ENDBR
126
testb $0xff, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
127
setz %ah
128
addb %ah, %ah
129
RET
130
SYM_FUNC_END(xen_save_fl_direct)
131
132
SYM_FUNC_START(xen_read_cr2)
133
ENDBR
134
FRAME_BEGIN
135
_ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
136
_ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
137
FRAME_END
138
RET
139
SYM_FUNC_END(xen_read_cr2);
140
141
SYM_FUNC_START(xen_read_cr2_direct)
142
ENDBR
143
FRAME_BEGIN
144
_ASM_MOV PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_arch_cr2), %_ASM_AX
145
FRAME_END
146
RET
147
SYM_FUNC_END(xen_read_cr2_direct);
148
.popsection
149
150
.macro xen_pv_trap name
151
SYM_CODE_START(xen_\name)
152
UNWIND_HINT_ENTRY
153
ENDBR
154
pop %rcx
155
pop %r11
156
jmp \name
157
SYM_CODE_END(xen_\name)
158
_ASM_NOKPROBE(xen_\name)
159
.endm
160
161
xen_pv_trap asm_exc_divide_error
162
xen_pv_trap asm_xenpv_exc_debug
163
xen_pv_trap asm_exc_int3
164
xen_pv_trap asm_xenpv_exc_nmi
165
xen_pv_trap asm_exc_overflow
166
xen_pv_trap asm_exc_bounds
167
xen_pv_trap asm_exc_invalid_op
168
xen_pv_trap asm_exc_device_not_available
169
xen_pv_trap asm_xenpv_exc_double_fault
170
xen_pv_trap asm_exc_coproc_segment_overrun
171
xen_pv_trap asm_exc_invalid_tss
172
xen_pv_trap asm_exc_segment_not_present
173
xen_pv_trap asm_exc_stack_segment
174
xen_pv_trap asm_exc_general_protection
175
xen_pv_trap asm_exc_page_fault
176
xen_pv_trap asm_exc_spurious_interrupt_bug
177
xen_pv_trap asm_exc_coprocessor_error
178
xen_pv_trap asm_exc_alignment_check
179
#ifdef CONFIG_X86_CET
180
xen_pv_trap asm_exc_control_protection
181
#endif
182
#ifdef CONFIG_X86_MCE
183
xen_pv_trap asm_xenpv_exc_machine_check
184
#endif /* CONFIG_X86_MCE */
185
xen_pv_trap asm_exc_simd_coprocessor_error
186
#ifdef CONFIG_IA32_EMULATION
187
xen_pv_trap asm_int80_emulation
188
#endif
189
xen_pv_trap asm_exc_xen_unknown_trap
190
xen_pv_trap asm_exc_xen_hypervisor_callback
191
192
__INIT
193
SYM_CODE_START(xen_early_idt_handler_array)
194
i = 0
195
.rept NUM_EXCEPTION_VECTORS
196
UNWIND_HINT_UNDEFINED
197
ENDBR
198
pop %rcx
199
pop %r11
200
jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
201
i = i + 1
202
.fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
203
.endr
204
SYM_CODE_END(xen_early_idt_handler_array)
205
__FINIT
206
207
/*
208
* Xen64 iret frame:
209
*
210
* ss
211
* rsp
212
* rflags
213
* cs
214
* rip <-- standard iret frame
215
*
216
* flags <-- xen_iret must push from here on
217
*
218
* rcx
219
* r11
220
* rsp->rax
221
*/
222
.macro xen_hypercall_iret
223
pushq $0 /* Flags */
224
push %rcx
225
push %r11
226
push %rax
227
mov $__HYPERVISOR_iret, %eax
228
syscall /* Do the IRET. */
229
ud2 /* The SYSCALL should never return. */
230
.endm
231
232
SYM_CODE_START(xen_iret)
233
UNWIND_HINT_UNDEFINED
234
ANNOTATE_NOENDBR
235
xen_hypercall_iret
236
SYM_CODE_END(xen_iret)
237
238
/*
239
* XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is
240
* also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode()
241
* in XEN pv would cause %rsp to move up to the top of the kernel stack and
242
* leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI
243
* interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET
244
* frame at the same address is useless.
245
*/
246
SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode)
247
UNWIND_HINT_REGS
248
POP_REGS
249
250
/* stackleak_erase() can work safely on the kernel stack. */
251
STACKLEAK_ERASE_NOCLOBBER
252
253
addq $8, %rsp /* skip regs->orig_ax */
254
jmp xen_iret
255
SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
256
257
/*
258
* Xen handles syscall callbacks much like ordinary exceptions, which
259
* means we have:
260
* - kernel gs
261
* - kernel rsp
262
* - an iret-like stack frame on the stack (including rcx and r11):
263
* ss
264
* rsp
265
* rflags
266
* cs
267
* rip
268
* r11
269
* rsp->rcx
270
*/
271
272
/* Normal 64-bit system call target */
273
SYM_CODE_START(xen_entry_SYSCALL_64)
274
UNWIND_HINT_ENTRY
275
ENDBR
276
popq %rcx
277
popq %r11
278
279
/*
280
* Neither Xen nor the kernel really knows what the old SS and
281
* CS were. The kernel expects __USER_DS and __USER_CS, so
282
* report those values even though Xen will guess its own values.
283
*/
284
movq $__USER_DS, 4*8(%rsp)
285
movq $__USER_CS, 1*8(%rsp)
286
287
jmp entry_SYSCALL_64_after_hwframe
288
SYM_CODE_END(xen_entry_SYSCALL_64)
289
290
#ifdef CONFIG_IA32_EMULATION
291
292
/* 32-bit compat syscall target */
293
SYM_CODE_START(xen_entry_SYSCALL_compat)
294
UNWIND_HINT_ENTRY
295
ENDBR
296
popq %rcx
297
popq %r11
298
299
/*
300
* Neither Xen nor the kernel really knows what the old SS and
301
* CS were. The kernel expects __USER_DS and __USER32_CS, so
302
* report those values even though Xen will guess its own values.
303
*/
304
movq $__USER_DS, 4*8(%rsp)
305
movq $__USER32_CS, 1*8(%rsp)
306
307
jmp entry_SYSCALL_compat_after_hwframe
308
SYM_CODE_END(xen_entry_SYSCALL_compat)
309
310
/* 32-bit compat sysenter target */
311
SYM_CODE_START(xen_entry_SYSENTER_compat)
312
UNWIND_HINT_ENTRY
313
ENDBR
314
/*
315
* NB: Xen is polite and clears TF from EFLAGS for us. This means
316
* that we don't need to guard against single step exceptions here.
317
*/
318
popq %rcx
319
popq %r11
320
321
/*
322
* Neither Xen nor the kernel really knows what the old SS and
323
* CS were. The kernel expects __USER_DS and __USER32_CS, so
324
* report those values even though Xen will guess its own values.
325
*/
326
movq $__USER_DS, 4*8(%rsp)
327
movq $__USER32_CS, 1*8(%rsp)
328
329
jmp entry_SYSENTER_compat_after_hwframe
330
SYM_CODE_END(xen_entry_SYSENTER_compat)
331
332
#else /* !CONFIG_IA32_EMULATION */
333
334
SYM_CODE_START(xen_entry_SYSCALL_compat)
335
SYM_CODE_START(xen_entry_SYSENTER_compat)
336
UNWIND_HINT_ENTRY
337
ENDBR
338
lea 16(%rsp), %rsp /* strip %rcx, %r11 */
339
mov $-ENOSYS, %rax
340
xen_hypercall_iret
341
SYM_CODE_END(xen_entry_SYSENTER_compat)
342
SYM_CODE_END(xen_entry_SYSCALL_compat)
343
344
#endif /* CONFIG_IA32_EMULATION */
345
346