/* SPDX-License-Identifier: GPL-2.0 */1/*2* Asm versions of Xen pv-ops, suitable for direct use.3*4* We only bother with direct forms (ie, vcpu in percpu data) of the5* operations here; the indirect forms are better handled in C.6*/78#include <asm/errno.h>9#include <asm/asm-offsets.h>10#include <asm/percpu.h>11#include <asm/processor-flags.h>12#include <asm/segment.h>13#include <asm/thread_info.h>14#include <asm/asm.h>15#include <asm/frame.h>16#include <asm/unwind_hints.h>1718#include <xen/interface/xen.h>1920#include <linux/init.h>21#include <linux/linkage.h>22#include <linux/objtool.h>23#include <../entry/calling.h>2425.pushsection .noinstr.text, "ax"26/*27* PV hypercall interface to the hypervisor.28*29* Called via inline asm(), so better preserve %rcx and %r11.30*31* Input:32* %eax: hypercall number33* %rdi, %rsi, %rdx, %r10, %r8: args 1..5 for the hypercall34* Output: %rax35*/36SYM_FUNC_START(xen_hypercall_pv)37ANNOTATE_NOENDBR38push %rcx39push %r1140UNWIND_HINT_SAVE41syscall42UNWIND_HINT_RESTORE43pop %r1144pop %rcx45RET46SYM_FUNC_END(xen_hypercall_pv)4748/*49* Disabling events is simply a matter of making the event mask50* non-zero.51*/52SYM_FUNC_START(xen_irq_disable_direct)53ENDBR54movb $1, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)55RET56SYM_FUNC_END(xen_irq_disable_direct)5758/*59* Force an event check by making a hypercall, but preserve regs60* before making the call.61*/62SYM_FUNC_START(check_events)63FRAME_BEGIN64push %rax65push %rcx66push %rdx67push %rsi68push %rdi69push %r870push %r971push %r1072push %r1173call xen_force_evtchn_callback74pop %r1175pop %r1076pop %r977pop %r878pop %rdi79pop %rsi80pop %rdx81pop %rcx82pop %rax83FRAME_END84RET85SYM_FUNC_END(check_events)8687/*88* Enable events. This clears the event mask and tests the pending89* event status with one and operation. If there are pending events,90* then enter the hypervisor to get them handled.91*/92SYM_FUNC_START(xen_irq_enable_direct)93ENDBR94FRAME_BEGIN95/* Unmask events */96movb $0, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)9798/*99* Preempt here doesn't matter because that will deal with any100* pending interrupts. The pending check may end up being run101* on the wrong CPU, but that doesn't hurt.102*/103104/* Test for pending */105testb $0xff, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_pending)106jz 1f107108call check_events1091:110FRAME_END111RET112SYM_FUNC_END(xen_irq_enable_direct)113114/*115* (xen_)save_fl is used to get the current interrupt enable status.116* Callers expect the status to be in X86_EFLAGS_IF, and other bits117* may be set in the return value. We take advantage of this by118* making sure that X86_EFLAGS_IF has the right value (and other bits119* in that byte are 0), but other bits in the return value are120* undefined. We need to toggle the state of the bit, because Xen and121* x86 use opposite senses (mask vs enable).122*/123SYM_FUNC_START(xen_save_fl_direct)124ENDBR125testb $0xff, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)126setz %ah127addb %ah, %ah128RET129SYM_FUNC_END(xen_save_fl_direct)130131SYM_FUNC_START(xen_read_cr2)132ENDBR133FRAME_BEGIN134_ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX135_ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX136FRAME_END137RET138SYM_FUNC_END(xen_read_cr2);139140SYM_FUNC_START(xen_read_cr2_direct)141ENDBR142FRAME_BEGIN143_ASM_MOV PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_arch_cr2), %_ASM_AX144FRAME_END145RET146SYM_FUNC_END(xen_read_cr2_direct);147.popsection148149.macro xen_pv_trap name150SYM_CODE_START(xen_\name)151UNWIND_HINT_ENTRY152ENDBR153pop %rcx154pop %r11155jmp \name156SYM_CODE_END(xen_\name)157_ASM_NOKPROBE(xen_\name)158.endm159160xen_pv_trap asm_exc_divide_error161xen_pv_trap asm_xenpv_exc_debug162xen_pv_trap asm_exc_int3163xen_pv_trap asm_xenpv_exc_nmi164xen_pv_trap asm_exc_overflow165xen_pv_trap asm_exc_bounds166xen_pv_trap asm_exc_invalid_op167xen_pv_trap asm_exc_device_not_available168xen_pv_trap asm_xenpv_exc_double_fault169xen_pv_trap asm_exc_coproc_segment_overrun170xen_pv_trap asm_exc_invalid_tss171xen_pv_trap asm_exc_segment_not_present172xen_pv_trap asm_exc_stack_segment173xen_pv_trap asm_exc_general_protection174xen_pv_trap asm_exc_page_fault175xen_pv_trap asm_exc_spurious_interrupt_bug176xen_pv_trap asm_exc_coprocessor_error177xen_pv_trap asm_exc_alignment_check178#ifdef CONFIG_X86_CET179xen_pv_trap asm_exc_control_protection180#endif181#ifdef CONFIG_X86_MCE182xen_pv_trap asm_xenpv_exc_machine_check183#endif /* CONFIG_X86_MCE */184xen_pv_trap asm_exc_simd_coprocessor_error185#ifdef CONFIG_IA32_EMULATION186xen_pv_trap asm_int80_emulation187#endif188xen_pv_trap asm_exc_xen_unknown_trap189xen_pv_trap asm_exc_xen_hypervisor_callback190191__INIT192SYM_CODE_START(xen_early_idt_handler_array)193i = 0194.rept NUM_EXCEPTION_VECTORS195UNWIND_HINT_UNDEFINED196ENDBR197pop %rcx198pop %r11199jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE200i = i + 1201.fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc202.endr203SYM_CODE_END(xen_early_idt_handler_array)204__FINIT205206/*207* Xen64 iret frame:208*209* ss210* rsp211* rflags212* cs213* rip <-- standard iret frame214*215* flags <-- xen_iret must push from here on216*217* rcx218* r11219* rsp->rax220*/221.macro xen_hypercall_iret222pushq $0 /* Flags */223push %rcx224push %r11225push %rax226mov $__HYPERVISOR_iret, %eax227syscall /* Do the IRET. */228ud2 /* The SYSCALL should never return. */229.endm230231SYM_CODE_START(xen_iret)232UNWIND_HINT_UNDEFINED233ANNOTATE_NOENDBR234xen_hypercall_iret235SYM_CODE_END(xen_iret)236237/*238* XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is239* also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode()240* in XEN pv would cause %rsp to move up to the top of the kernel stack and241* leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI242* interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET243* frame at the same address is useless.244*/245SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode)246UNWIND_HINT_REGS247POP_REGS248249/* stackleak_erase() can work safely on the kernel stack. */250STACKLEAK_ERASE_NOCLOBBER251252addq $8, %rsp /* skip regs->orig_ax */253jmp xen_iret254SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)255256/*257* Xen handles syscall callbacks much like ordinary exceptions, which258* means we have:259* - kernel gs260* - kernel rsp261* - an iret-like stack frame on the stack (including rcx and r11):262* ss263* rsp264* rflags265* cs266* rip267* r11268* rsp->rcx269*/270271/* Normal 64-bit system call target */272SYM_CODE_START(xen_entry_SYSCALL_64)273UNWIND_HINT_ENTRY274ENDBR275popq %rcx276popq %r11277278/*279* Neither Xen nor the kernel really knows what the old SS and280* CS were. The kernel expects __USER_DS and __USER_CS, so281* report those values even though Xen will guess its own values.282*/283movq $__USER_DS, 4*8(%rsp)284movq $__USER_CS, 1*8(%rsp)285286jmp entry_SYSCALL_64_after_hwframe287SYM_CODE_END(xen_entry_SYSCALL_64)288289#ifdef CONFIG_IA32_EMULATION290291/* 32-bit compat syscall target */292SYM_CODE_START(xen_entry_SYSCALL_compat)293UNWIND_HINT_ENTRY294ENDBR295popq %rcx296popq %r11297298/*299* Neither Xen nor the kernel really knows what the old SS and300* CS were. The kernel expects __USER_DS and __USER32_CS, so301* report those values even though Xen will guess its own values.302*/303movq $__USER_DS, 4*8(%rsp)304movq $__USER32_CS, 1*8(%rsp)305306jmp entry_SYSCALL_compat_after_hwframe307SYM_CODE_END(xen_entry_SYSCALL_compat)308309/* 32-bit compat sysenter target */310SYM_CODE_START(xen_entry_SYSENTER_compat)311UNWIND_HINT_ENTRY312ENDBR313/*314* NB: Xen is polite and clears TF from EFLAGS for us. This means315* that we don't need to guard against single step exceptions here.316*/317popq %rcx318popq %r11319320/*321* Neither Xen nor the kernel really knows what the old SS and322* CS were. The kernel expects __USER_DS and __USER32_CS, so323* report those values even though Xen will guess its own values.324*/325movq $__USER_DS, 4*8(%rsp)326movq $__USER32_CS, 1*8(%rsp)327328jmp entry_SYSENTER_compat_after_hwframe329SYM_CODE_END(xen_entry_SYSENTER_compat)330331#else /* !CONFIG_IA32_EMULATION */332333SYM_CODE_START(xen_entry_SYSCALL_compat)334SYM_CODE_START(xen_entry_SYSENTER_compat)335UNWIND_HINT_ENTRY336ENDBR337lea 16(%rsp), %rsp /* strip %rcx, %r11 */338mov $-ENOSYS, %rax339xen_hypercall_iret340SYM_CODE_END(xen_entry_SYSENTER_compat)341SYM_CODE_END(xen_entry_SYSCALL_compat)342343#endif /* CONFIG_IA32_EMULATION */344345346