/*1* Low-level SLB routines2*3* Copyright (C) 2004 David Gibson <[email protected]>, IBM4*5* Based on earlier C version:6* Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com7* Copyright (c) 2001 Dave Engebretsen8* Copyright (C) 2002 Anton Blanchard <[email protected]>, IBM9*10* This program is free software; you can redistribute it and/or11* modify it under the terms of the GNU General Public License12* as published by the Free Software Foundation; either version13* 2 of the License, or (at your option) any later version.14*/1516#include <asm/processor.h>17#include <asm/ppc_asm.h>18#include <asm/asm-offsets.h>19#include <asm/cputable.h>20#include <asm/page.h>21#include <asm/mmu.h>22#include <asm/pgtable.h>23#include <asm/firmware.h>2425/* void slb_allocate_realmode(unsigned long ea);26*27* Create an SLB entry for the given EA (user or kernel).28* r3 = faulting address, r13 = PACA29* r9, r10, r11 are clobbered by this function30* No other registers are examined or changed.31*/32_GLOBAL(slb_allocate_realmode)33/* r3 = faulting address */3435srdi r9,r3,60 /* get region */36srdi r10,r3,28 /* get esid */37cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */3839/* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */40blt cr7,0f /* user or kernel? */4142/* kernel address: proto-VSID = ESID */43/* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but44* this code will generate the protoVSID 0xfffffffff for the45* top segment. That's ok, the scramble below will translate46* it to VSID 0, which is reserved as a bad VSID - one which47* will never have any pages in it. */4849/* Check if hitting the linear mapping or some other kernel space50*/51bne cr7,1f5253/* Linear mapping encoding bits, the "li" instruction below will54* be patched by the kernel at boot55*/56_GLOBAL(slb_miss_kernel_load_linear)57li r11,058BEGIN_FTR_SECTION59b slb_finish_load60END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)61b slb_finish_load_1T62631:64#ifdef CONFIG_SPARSEMEM_VMEMMAP65/* Check virtual memmap region. To be patches at kernel boot */66cmpldi cr0,r9,0xf67bne 1f68_GLOBAL(slb_miss_kernel_load_vmemmap)69li r11,070b 6f711:72#endif /* CONFIG_SPARSEMEM_VMEMMAP */7374/* vmalloc mapping gets the encoding from the PACA as the mapping75* can be demoted from 64K -> 4K dynamically on some machines76*/77clrldi r11,r10,4878cmpldi r11,(VMALLOC_SIZE >> 28) - 179bgt 5f80lhz r11,PACAVMALLOCSLLP(r13)81b 6f825:83/* IO mapping */84_GLOBAL(slb_miss_kernel_load_io)85li r11,0866:87BEGIN_FTR_SECTION88b slb_finish_load89END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)90b slb_finish_load_1T91920: /* user address: proto-VSID = context << 15 | ESID. First check93* if the address is within the boundaries of the user region94*/95srdi. r9,r10,USER_ESID_BITS96bne- 8f /* invalid ea bits set */979899/* when using slices, we extract the psize off the slice bitmaps100* and then we need to get the sllp encoding off the mmu_psize_defs101* array.102*103* XXX This is a bit inefficient especially for the normal case,104* so we should try to implement a fast path for the standard page105* size using the old sllp value so we avoid the array. We cannot106* really do dynamic patching unfortunately as processes might flip107* between 4k and 64k standard page size108*/109#ifdef CONFIG_PPC_MM_SLICES110cmpldi r10,16111112/* Get the slice index * 4 in r11 and matching slice size mask in r9 */113ld r9,PACALOWSLICESPSIZE(r13)114sldi r11,r10,2115blt 5f116ld r9,PACAHIGHSLICEPSIZE(r13)117srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT - 2)118andi. r11,r11,0x3c1191205: /* Extract the psize and multiply to get an array offset */121srd r9,r9,r11122andi. r9,r9,0xf123mulli r9,r9,MMUPSIZEDEFSIZE124125/* Now get to the array and obtain the sllp126*/127ld r11,PACATOC(r13)128ld r11,mmu_psize_defs@got(r11)129add r11,r11,r9130ld r11,MMUPSIZESLLP(r11)131ori r11,r11,SLB_VSID_USER132#else133/* paca context sllp already contains the SLB_VSID_USER bits */134lhz r11,PACACONTEXTSLLP(r13)135#endif /* CONFIG_PPC_MM_SLICES */136137ld r9,PACACONTEXTID(r13)138BEGIN_FTR_SECTION139cmpldi r10,0x1000140END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)141rldimi r10,r9,USER_ESID_BITS,0142BEGIN_FTR_SECTION143bge slb_finish_load_1T144END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)145b slb_finish_load1461478: /* invalid EA */148li r10,0 /* BAD_VSID */149li r11,SLB_VSID_USER /* flags don't much matter */150b slb_finish_load151152#ifdef __DISABLED__153154/* void slb_allocate_user(unsigned long ea);155*156* Create an SLB entry for the given EA (user or kernel).157* r3 = faulting address, r13 = PACA158* r9, r10, r11 are clobbered by this function159* No other registers are examined or changed.160*161* It is called with translation enabled in order to be able to walk the162* page tables. This is not currently used.163*/164_GLOBAL(slb_allocate_user)165/* r3 = faulting address */166srdi r10,r3,28 /* get esid */167168crset 4*cr7+lt /* set "user" flag for later */169170/* check if we fit in the range covered by the pagetables*/171srdi. r9,r3,PGTABLE_EADDR_SIZE172crnot 4*cr0+eq,4*cr0+eq173beqlr174175/* now we need to get to the page tables in order to get the page176* size encoding from the PMD. In the future, we'll be able to deal177* with 1T segments too by getting the encoding from the PGD instead178*/179ld r9,PACAPGDIR(r13)180cmpldi cr0,r9,0181beqlr182rlwinm r11,r10,8,25,28183ldx r9,r9,r11 /* get pgd_t */184cmpldi cr0,r9,0185beqlr186rlwinm r11,r10,3,17,28187ldx r9,r9,r11 /* get pmd_t */188cmpldi cr0,r9,0189beqlr190191/* build vsid flags */192andi. r11,r9,SLB_VSID_LLP193ori r11,r11,SLB_VSID_USER194195/* get context to calculate proto-VSID */196ld r9,PACACONTEXTID(r13)197rldimi r10,r9,USER_ESID_BITS,0198199/* fall through slb_finish_load */200201#endif /* __DISABLED__ */202203204/*205* Finish loading of an SLB entry and return206*207* r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET208*/209slb_finish_load:210ASM_VSID_SCRAMBLE(r10,r9,256M)211rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */212213/* r3 = EA, r11 = VSID data */214/*215* Find a slot, round robin. Previously we tried to find a216* free slot first but that took too long. Unfortunately we217* dont have any LRU information to help us choose a slot.218*/219#ifdef CONFIG_PPC_ISERIES220BEGIN_FW_FTR_SECTION221/*222* On iSeries, the "bolted" stack segment can be cast out on223* shared processor switch so we need to check for a miss on224* it and restore it to the right slot.225*/226ld r9,PACAKSAVE(r13)227clrrdi r9,r9,28228clrrdi r3,r3,28229li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */230cmpld r9,r3231beq 3f232END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)233#endif /* CONFIG_PPC_ISERIES */2342357: ld r10,PACASTABRR(r13)236addi r10,r10,1237/* This gets soft patched on boot. */238_GLOBAL(slb_compare_rr_to_size)239cmpldi r10,0240241blt+ 4f242li r10,SLB_NUM_BOLTED2432444:245std r10,PACASTABRR(r13)2462473:248rldimi r3,r10,0,36 /* r3= EA[0:35] | entry */249oris r10,r3,SLB_ESID_V@h /* r3 |= SLB_ESID_V */250251/* r3 = ESID data, r11 = VSID data */252253/*254* No need for an isync before or after this slbmte. The exception255* we enter with and the rfid we exit with are context synchronizing.256*/257slbmte r11,r10258259/* we're done for kernel addresses */260crclr 4*cr0+eq /* set result to "success" */261bgelr cr7262263/* Update the slb cache */264lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */265cmpldi r3,SLB_CACHE_ENTRIES266bge 1f267268/* still room in the slb cache */269sldi r11,r3,1 /* r11 = offset * sizeof(u16) */270rldicl r10,r10,36,28 /* get low 16 bits of the ESID */271add r11,r11,r13 /* r11 = (u16 *)paca + offset */272sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */273addi r3,r3,1 /* offset++ */274b 2f2751: /* offset >= SLB_CACHE_ENTRIES */276li r3,SLB_CACHE_ENTRIES+12772:278sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */279crclr 4*cr0+eq /* set result to "success" */280blr281282/*283* Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.284* We assume legacy iSeries will never have 1T segments.285*286* r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9287*/288slb_finish_load_1T:289srdi r10,r10,40-28 /* get 1T ESID */290ASM_VSID_SCRAMBLE(r10,r9,1T)291rldimi r11,r10,SLB_VSID_SHIFT_1T,16 /* combine VSID and flags */292li r10,MMU_SEGSIZE_1T293rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */294295/* r3 = EA, r11 = VSID data */296clrrdi r3,r3,SID_SHIFT_1T /* clear out non-ESID bits */297b 7b298299300301