Path: blob/master/arch/powerpc/kvm/book3s_64_slb.S
10818 views
/*1* This program is free software; you can redistribute it and/or modify2* it under the terms of the GNU General Public License, version 2, as3* published by the Free Software Foundation.4*5* This program is distributed in the hope that it will be useful,6* but WITHOUT ANY WARRANTY; without even the implied warranty of7* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the8* GNU General Public License for more details.9*10* You should have received a copy of the GNU General Public License11* along with this program; if not, write to the Free Software12* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.13*14* Copyright SUSE Linux Products GmbH 200915*16* Authors: Alexander Graf <[email protected]>17*/1819#define SHADOW_SLB_ESID(num) (SLBSHADOW_SAVEAREA + (num * 0x10))20#define SHADOW_SLB_VSID(num) (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8)21#define UNBOLT_SLB_ENTRY(num) \22ld r9, SHADOW_SLB_ESID(num)(r12); \23/* Invalid? Skip. */; \24rldicl. r0, r9, 37, 63; \25beq slb_entry_skip_ ## num; \26xoris r9, r9, SLB_ESID_V@h; \27std r9, SHADOW_SLB_ESID(num)(r12); \28slb_entry_skip_ ## num:2930#define REBOLT_SLB_ENTRY(num) \31ld r10, SHADOW_SLB_ESID(num)(r11); \32cmpdi r10, 0; \33beq slb_exit_skip_ ## num; \34oris r10, r10, SLB_ESID_V@h; \35ld r9, SHADOW_SLB_VSID(num)(r11); \36slbmte r9, r10; \37std r10, SHADOW_SLB_ESID(num)(r11); \38slb_exit_skip_ ## num:3940/******************************************************************************41* *42* Entry code *43* *44*****************************************************************************/4546.macro LOAD_GUEST_SEGMENTS4748/* Required state:49*50* MSR = ~IR|DR51* R13 = PACA52* R1 = host R153* R2 = host R254* R3 = shadow vcpu55* all other volatile GPRS = free56* SVCPU[CR] = guest CR57* SVCPU[XER] = guest XER58* SVCPU[CTR] = guest CTR59* SVCPU[LR] = guest LR60*/6162/* Remove LPAR shadow entries */6364#if SLB_NUM_BOLTED == 36566ld r12, PACA_SLBSHADOWPTR(r13)6768/* Save off the first entry so we can slbie it later */69ld r10, SHADOW_SLB_ESID(0)(r12)70ld r11, SHADOW_SLB_VSID(0)(r12)7172/* Remove bolted entries */73UNBOLT_SLB_ENTRY(0)74UNBOLT_SLB_ENTRY(1)75UNBOLT_SLB_ENTRY(2)7677#else78#error unknown number of bolted entries79#endif8081/* Flush SLB */8283slbia8485/* r0 = esid & ESID_MASK */86rldicr r10, r10, 0, 3587/* r0 |= CLASS_BIT(VSID) */88rldic r12, r11, 56 - 36, 3689or r10, r10, r1290slbie r109192isync9394/* Fill SLB with our shadow */9596lbz r12, SVCPU_SLB_MAX(r3)97mulli r12, r12, 1698addi r12, r12, SVCPU_SLB99add r12, r12, r3100101/* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */102li r11, SVCPU_SLB103add r11, r11, r3104105slb_loop_enter:106107ld r10, 0(r11)108109rldicl. r0, r10, 37, 63110beq slb_loop_enter_skip111112ld r9, 8(r11)113slbmte r9, r10114115slb_loop_enter_skip:116addi r11, r11, 16117cmpd cr0, r11, r12118blt slb_loop_enter119120slb_do_enter:121122.endm123124/******************************************************************************125* *126* Exit code *127* *128*****************************************************************************/129130.macro LOAD_HOST_SEGMENTS131132/* Register usage at this point:133*134* R1 = host R1135* R2 = host R2136* R12 = exit handler id137* R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]138* SVCPU.* = guest *139* SVCPU[CR] = guest CR140* SVCPU[XER] = guest XER141* SVCPU[CTR] = guest CTR142* SVCPU[LR] = guest LR143*144*/145146/* Restore bolted entries from the shadow and fix it along the way */147148/* We don't store anything in entry 0, so we don't need to take care of it */149slbia150isync151152#if SLB_NUM_BOLTED == 3153154ld r11, PACA_SLBSHADOWPTR(r13)155156REBOLT_SLB_ENTRY(0)157REBOLT_SLB_ENTRY(1)158REBOLT_SLB_ENTRY(2)159160#else161#error unknown number of bolted entries162#endif163164slb_do_exit:165166.endm167168169