/*1* Asm versions of Xen pv-ops, suitable for either direct use or2* inlining. The inline versions are the same as the direct-use3* versions, with the pre- and post-amble chopped off.4*5* This code is encoded for size rather than absolute efficiency, with6* a view to being able to inline as much as possible.7*8* We only bother with direct forms (ie, vcpu in percpu data) of the9* operations here; the indirect forms are better handled in C, since10* they're generally too large to inline anyway.11*/1213#include <asm/asm-offsets.h>14#include <asm/percpu.h>15#include <asm/processor-flags.h>1617#include "xen-asm.h"1819/*20* Enable events. This clears the event mask and tests the pending21* event status with one and operation. If there are pending events,22* then enter the hypervisor to get them handled.23*/24ENTRY(xen_irq_enable_direct)25/* Unmask events */26movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask2728/*29* Preempt here doesn't matter because that will deal with any30* pending interrupts. The pending check may end up being run31* on the wrong CPU, but that doesn't hurt.32*/3334/* Test for pending */35testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending36jz 1f37382: call check_events391:40ENDPATCH(xen_irq_enable_direct)41ret42ENDPROC(xen_irq_enable_direct)43RELOC(xen_irq_enable_direct, 2b+1)444546/*47* Disabling events is simply a matter of making the event mask48* non-zero.49*/50ENTRY(xen_irq_disable_direct)51movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask52ENDPATCH(xen_irq_disable_direct)53ret54ENDPROC(xen_irq_disable_direct)55RELOC(xen_irq_disable_direct, 0)5657/*58* (xen_)save_fl is used to get the current interrupt enable status.59* Callers expect the status to be in X86_EFLAGS_IF, and other bits60* may be set in the return value. We take advantage of this by61* making sure that X86_EFLAGS_IF has the right value (and other bits62* in that byte are 0), but other bits in the return value are63* undefined. We need to toggle the state of the bit, because Xen and64* x86 use opposite senses (mask vs enable).65*/66ENTRY(xen_save_fl_direct)67testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask68setz %ah69addb %ah, %ah70ENDPATCH(xen_save_fl_direct)71ret72ENDPROC(xen_save_fl_direct)73RELOC(xen_save_fl_direct, 0)747576/*77* In principle the caller should be passing us a value return from78* xen_save_fl_direct, but for robustness sake we test only the79* X86_EFLAGS_IF flag rather than the whole byte. After setting the80* interrupt mask state, it checks for unmasked pending events and81* enters the hypervisor to get them delivered if so.82*/83ENTRY(xen_restore_fl_direct)84#ifdef CONFIG_X86_6485testw $X86_EFLAGS_IF, %di86#else87testb $X86_EFLAGS_IF>>8, %ah88#endif89setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask90/*91* Preempt here doesn't matter because that will deal with any92* pending interrupts. The pending check may end up being run93* on the wrong CPU, but that doesn't hurt.94*/9596/* check for unmasked and pending */97cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending98jz 1f992: call check_events1001:101ENDPATCH(xen_restore_fl_direct)102ret103ENDPROC(xen_restore_fl_direct)104RELOC(xen_restore_fl_direct, 2b+1)105106107/*108* Force an event check by making a hypercall, but preserve regs109* before making the call.110*/111check_events:112#ifdef CONFIG_X86_32113push %eax114push %ecx115push %edx116call xen_force_evtchn_callback117pop %edx118pop %ecx119pop %eax120#else121push %rax122push %rcx123push %rdx124push %rsi125push %rdi126push %r8127push %r9128push %r10129push %r11130call xen_force_evtchn_callback131pop %r11132pop %r10133pop %r9134pop %r8135pop %rdi136pop %rsi137pop %rdx138pop %rcx139pop %rax140#endif141ret142143144