Path: blob/master/arch/powerpc/kvm/book3s_interrupts.S
10817 views
/*1* This program is free software; you can redistribute it and/or modify2* it under the terms of the GNU General Public License, version 2, as3* published by the Free Software Foundation.4*5* This program is distributed in the hope that it will be useful,6* but WITHOUT ANY WARRANTY; without even the implied warranty of7* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the8* GNU General Public License for more details.9*10* You should have received a copy of the GNU General Public License11* along with this program; if not, write to the Free Software12* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.13*14* Copyright SUSE Linux Products GmbH 200915*16* Authors: Alexander Graf <[email protected]>17*/1819#include <asm/ppc_asm.h>20#include <asm/kvm_asm.h>21#include <asm/reg.h>22#include <asm/page.h>23#include <asm/asm-offsets.h>24#include <asm/exception-64s.h>2526#if defined(CONFIG_PPC_BOOK3S_64)2728#define ULONG_SIZE 829#define FUNC(name) GLUE(.,name)3031#define GET_SHADOW_VCPU(reg) \32addi reg, r13, PACA_KVM_SVCPU3334#define DISABLE_INTERRUPTS \35mfmsr r0; \36rldicl r0,r0,48,1; \37rotldi r0,r0,16; \38mtmsrd r0,1; \3940#elif defined(CONFIG_PPC_BOOK3S_32)4142#define ULONG_SIZE 443#define FUNC(name) name4445#define GET_SHADOW_VCPU(reg) \46lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2)4748#define DISABLE_INTERRUPTS \49mfmsr r0; \50rlwinm r0,r0,0,17,15; \51mtmsr r0; \5253#endif /* CONFIG_PPC_BOOK3S_XX */545556#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))57#define VCPU_LOAD_NVGPRS(vcpu) \58PPC_LL r14, VCPU_GPR(r14)(vcpu); \59PPC_LL r15, VCPU_GPR(r15)(vcpu); \60PPC_LL r16, VCPU_GPR(r16)(vcpu); \61PPC_LL r17, VCPU_GPR(r17)(vcpu); \62PPC_LL r18, VCPU_GPR(r18)(vcpu); \63PPC_LL r19, VCPU_GPR(r19)(vcpu); \64PPC_LL r20, VCPU_GPR(r20)(vcpu); \65PPC_LL r21, VCPU_GPR(r21)(vcpu); \66PPC_LL r22, VCPU_GPR(r22)(vcpu); \67PPC_LL r23, VCPU_GPR(r23)(vcpu); \68PPC_LL r24, VCPU_GPR(r24)(vcpu); \69PPC_LL r25, VCPU_GPR(r25)(vcpu); \70PPC_LL r26, VCPU_GPR(r26)(vcpu); \71PPC_LL r27, VCPU_GPR(r27)(vcpu); \72PPC_LL r28, VCPU_GPR(r28)(vcpu); \73PPC_LL r29, VCPU_GPR(r29)(vcpu); \74PPC_LL r30, VCPU_GPR(r30)(vcpu); \75PPC_LL r31, VCPU_GPR(r31)(vcpu); \7677/*****************************************************************************78* *79* Guest entry / exit code that is in kernel module memory (highmem) *80* *81****************************************************************************/8283/* Registers:84* r3: kvm_run pointer85* r4: vcpu pointer86*/87_GLOBAL(__kvmppc_vcpu_entry)8889kvm_start_entry:90/* Write correct stack frame */91mflr r092PPC_STL r0,PPC_LR_STKOFF(r1)9394/* Save host state to the stack */95PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)9697/* Save r3 (kvm_run) and r4 (vcpu) */98SAVE_2GPRS(3, r1)99100/* Save non-volatile registers (r14 - r31) */101SAVE_NVGPRS(r1)102103/* Save LR */104PPC_STL r0, _LINK(r1)105106/* Load non-volatile guest state from the vcpu */107VCPU_LOAD_NVGPRS(r4)108109GET_SHADOW_VCPU(r5)110111/* Save R1/R2 in the PACA */112PPC_STL r1, SVCPU_HOST_R1(r5)113PPC_STL r2, SVCPU_HOST_R2(r5)114115/* XXX swap in/out on load? */116PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4)117PPC_STL r3, SVCPU_VMHANDLER(r5)118119kvm_start_lightweight:120121PPC_LL r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */122123DISABLE_INTERRUPTS124125#ifdef CONFIG_PPC_BOOK3S_64126/* Some guests may need to have dcbz set to 32 byte length.127*128* Usually we ensure that by patching the guest's instructions129* to trap on dcbz and emulate it in the hypervisor.130*131* If we can, we should tell the CPU to use 32 byte dcbz though,132* because that's a lot faster.133*/134135PPC_LL r3, VCPU_HFLAGS(r4)136rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */137beq no_dcbz32_on138139mfspr r3,SPRN_HID5140ori r3, r3, 0x80 /* XXX HID5_dcbz32 = 0x80 */141mtspr SPRN_HID5,r3142143no_dcbz32_on:144145#endif /* CONFIG_PPC_BOOK3S_64 */146147PPC_LL r6, VCPU_RMCALL(r4)148mtctr r6149150PPC_LL r3, VCPU_TRAMPOLINE_ENTER(r4)151LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR))152153/* Jump to segment patching handler and into our guest */154bctr155156/*157* This is the handler in module memory. It gets jumped at from the158* lowmem trampoline code, so it's basically the guest exit code.159*160*/161162.global kvmppc_handler_highmem163kvmppc_handler_highmem:164165/*166* Register usage at this point:167*168* R1 = host R1169* R2 = host R2170* R12 = exit handler id171* R13 = PACA172* SVCPU.* = guest *173*174*/175176/* R7 = vcpu */177PPC_LL r7, GPR4(r1)178179#ifdef CONFIG_PPC_BOOK3S_64180181PPC_LL r5, VCPU_HFLAGS(r7)182rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */183beq no_dcbz32_off184185li r4, 0186mfspr r5,SPRN_HID5187rldimi r5,r4,6,56188mtspr SPRN_HID5,r5189190no_dcbz32_off:191192#endif /* CONFIG_PPC_BOOK3S_64 */193194PPC_STL r14, VCPU_GPR(r14)(r7)195PPC_STL r15, VCPU_GPR(r15)(r7)196PPC_STL r16, VCPU_GPR(r16)(r7)197PPC_STL r17, VCPU_GPR(r17)(r7)198PPC_STL r18, VCPU_GPR(r18)(r7)199PPC_STL r19, VCPU_GPR(r19)(r7)200PPC_STL r20, VCPU_GPR(r20)(r7)201PPC_STL r21, VCPU_GPR(r21)(r7)202PPC_STL r22, VCPU_GPR(r22)(r7)203PPC_STL r23, VCPU_GPR(r23)(r7)204PPC_STL r24, VCPU_GPR(r24)(r7)205PPC_STL r25, VCPU_GPR(r25)(r7)206PPC_STL r26, VCPU_GPR(r26)(r7)207PPC_STL r27, VCPU_GPR(r27)(r7)208PPC_STL r28, VCPU_GPR(r28)(r7)209PPC_STL r29, VCPU_GPR(r29)(r7)210PPC_STL r30, VCPU_GPR(r30)(r7)211PPC_STL r31, VCPU_GPR(r31)(r7)212213/* Restore host msr -> SRR1 */214PPC_LL r6, VCPU_HOST_MSR(r7)215216/*217* For some interrupts, we need to call the real Linux218* handler, so it can do work for us. This has to happen219* as if the interrupt arrived from the kernel though,220* so let's fake it here where most state is restored.221*222* Call Linux for hardware interrupts/decrementer223* r3 = address of interrupt handler (exit reason)224*/225226cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL227beq call_linux_handler228cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER229beq call_linux_handler230cmpwi r12, BOOK3S_INTERRUPT_PERFMON231beq call_linux_handler232233/* Back to EE=1 */234mtmsr r6235sync236b kvm_return_point237238call_linux_handler:239240/*241* If we land here we need to jump back to the handler we242* came from.243*244* We have a page that we can access from real mode, so let's245* jump back to that and use it as a trampoline to get back into the246* interrupt handler!247*248* R3 still contains the exit code,249* R5 VCPU_HOST_RETIP and250* R6 VCPU_HOST_MSR251*/252253/* Restore host IP -> SRR0 */254PPC_LL r5, VCPU_HOST_RETIP(r7)255256/* XXX Better move to a safe function?257* What if we get an HTAB flush in between mtsrr0 and mtsrr1? */258259mtlr r12260261PPC_LL r4, VCPU_TRAMPOLINE_LOWMEM(r7)262mtsrr0 r4263LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))264mtsrr1 r3265266RFI267268.global kvm_return_point269kvm_return_point:270271/* Jump back to lightweight entry if we're supposed to */272/* go back into the guest */273274/* Pass the exit number as 3rd argument to kvmppc_handle_exit */275mr r5, r12276277/* Restore r3 (kvm_run) and r4 (vcpu) */278REST_2GPRS(3, r1)279bl FUNC(kvmppc_handle_exit)280281/* If RESUME_GUEST, get back in the loop */282cmpwi r3, RESUME_GUEST283beq kvm_loop_lightweight284285cmpwi r3, RESUME_GUEST_NV286beq kvm_loop_heavyweight287288kvm_exit_loop:289290PPC_LL r4, _LINK(r1)291mtlr r4292293/* Restore non-volatile host registers (r14 - r31) */294REST_NVGPRS(r1)295296addi r1, r1, SWITCH_FRAME_SIZE297blr298299kvm_loop_heavyweight:300301PPC_LL r4, _LINK(r1)302PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1)303304/* Load vcpu and cpu_run */305REST_2GPRS(3, r1)306307/* Load non-volatile guest state from the vcpu */308VCPU_LOAD_NVGPRS(r4)309310/* Jump back into the beginning of this function */311b kvm_start_lightweight312313kvm_loop_lightweight:314315/* We'll need the vcpu pointer */316REST_GPR(4, r1)317318/* Jump back into the beginning of this function */319b kvm_start_lightweight320321322