/* tlb-miss.S: TLB miss handlers1*2* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.3* Written by David Howells ([email protected])4*5* This program is free software; you can redistribute it and/or6* modify it under the terms of the GNU General Public License7* as published by the Free Software Foundation; either version8* 2 of the License, or (at your option) any later version.9*/1011#include <linux/sys.h>12#include <linux/linkage.h>13#include <asm/page.h>14#include <asm/pgtable.h>15#include <asm/spr-regs.h>1617.section .text..tlbmiss18.balign 41920.globl __entry_insn_mmu_miss21__entry_insn_mmu_miss:22break23nop2425.globl __entry_insn_mmu_exception26__entry_insn_mmu_exception:27break28nop2930.globl __entry_data_mmu_miss31__entry_data_mmu_miss:32break33nop3435.globl __entry_data_mmu_exception36__entry_data_mmu_exception:37break38nop3940###############################################################################41#42# handle a lookup failure of one sort or another in a kernel TLB handler43# On entry:44# GR29 - faulting address45# SCR2 - saved CCR46#47###############################################################################48.type __tlb_kernel_fault,@function49__tlb_kernel_fault:50# see if we're supposed to re-enable single-step mode upon return51sethi.p %hi(__break_tlb_miss_return_break),gr3052setlo %lo(__break_tlb_miss_return_break),gr3053movsg pcsr,gr315455subcc gr31,gr30,gr0,icc056beq icc0,#0,__tlb_kernel_fault_sstep5758movsg scr2,gr3059movgs gr30,ccr60movgs gr29,scr2 /* save EAR0 value */61sethi.p %hi(__kernel_current_task),gr2962setlo %lo(__kernel_current_task),gr2963ldi.p @(gr29,#0),gr29 /* restore GR29 */6465bra __entry_kernel_handle_mmu_fault6667# we've got to re-enable single-stepping68__tlb_kernel_fault_sstep:69sethi.p %hi(__break_tlb_miss_real_return_info),gr3070setlo %lo(__break_tlb_miss_real_return_info),gr3071lddi @(gr30,0),gr3072movgs gr30,pcsr73movgs gr31,psr7475movsg scr2,gr3076movgs gr30,ccr77movgs gr29,scr2 /* save EAR0 value */78sethi.p %hi(__kernel_current_task),gr2979setlo %lo(__kernel_current_task),gr2980ldi.p @(gr29,#0),gr29 /* restore GR29 */81bra __entry_kernel_handle_mmu_fault_sstep8283.size __tlb_kernel_fault, .-__tlb_kernel_fault8485###############################################################################86#87# handle a lookup failure of one sort or another in a user TLB handler88# On entry:89# GR28 - faulting address90# SCR2 - saved CCR91#92###############################################################################93.type __tlb_user_fault,@function94__tlb_user_fault:95# see if we're supposed to re-enable single-step mode upon return96sethi.p %hi(__break_tlb_miss_return_break),gr3097setlo %lo(__break_tlb_miss_return_break),gr3098movsg pcsr,gr3199subcc gr31,gr30,gr0,icc0100beq icc0,#0,__tlb_user_fault_sstep101102movsg scr2,gr30103movgs gr30,ccr104bra __entry_uspace_handle_mmu_fault105106# we've got to re-enable single-stepping107__tlb_user_fault_sstep:108sethi.p %hi(__break_tlb_miss_real_return_info),gr30109setlo %lo(__break_tlb_miss_real_return_info),gr30110lddi @(gr30,0),gr30111movgs gr30,pcsr112movgs gr31,psr113movsg scr2,gr30114movgs gr30,ccr115bra __entry_uspace_handle_mmu_fault_sstep116117.size __tlb_user_fault, .-__tlb_user_fault118119###############################################################################120#121# Kernel instruction TLB miss handler122# On entry:123# GR1 - kernel stack pointer124# GR28 - saved exception frame pointer125# GR29 - faulting address126# GR31 - EAR0 ^ SCR0127# SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)128# DAMR3 - mapped page directory129# DAMR4 - mapped page table as matched by SCR0130#131###############################################################################132.globl __entry_kernel_insn_tlb_miss133.type __entry_kernel_insn_tlb_miss,@function134__entry_kernel_insn_tlb_miss:135#if 0136sethi.p %hi(0xe1200004),gr30137setlo %lo(0xe1200004),gr30138st gr0,@(gr30,gr0)139sethi.p %hi(0xffc00100),gr30140setlo %lo(0xffc00100),gr30141sth gr30,@(gr30,gr0)142membar143#endif144145movsg ccr,gr30 /* save CCR */146movgs gr30,scr2147148# see if the cached page table mapping is appropriate149srlicc.p gr31,#26,gr0,icc0150setlos 0x3ffc,gr30151srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */152bne icc0,#0,__itlb_k_PTD_miss153154__itlb_k_PTD_mapped:155# access the PTD with EAR0[25:14]156# - DAMLR4 points to the virtual address of the appropriate page table157# - the PTD holds 4096 PTEs158# - the PTD must be accessed uncached159# - the PTE must be marked accessed if it was valid160#161and gr31,gr30,gr31162movsg damlr4,gr30163add gr30,gr31,gr31164ldi @(gr31,#0),gr30 /* fetch the PTE */165andicc gr30,#_PAGE_PRESENT,gr0,icc0166ori.p gr30,#_PAGE_ACCESSED,gr30167beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */168sti.p gr30,@(gr31,#0) /* update the PTE */169andi gr30,#~_PAGE_ACCESSED,gr30170171# we're using IAMR1 as an extra TLB entry172# - punt the entry here (if valid) to the real TLB and then replace with the new PTE173# - need to check DAMR1 lest we cause an multiple-DAT-hit exception174# - IAMPR1 has no WP bit, and we mustn't lose WP information175movsg iampr1,gr31176andicc gr31,#xAMPRx_V,gr0,icc0177setlos.p 0xfffff000,gr31178beq icc0,#0,__itlb_k_nopunt /* punt not required */179180movsg iamlr1,gr31181movgs gr31,tplr /* set TPLR.CXN */182tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */183184movsg dampr1,gr31185ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */186movgs gr31,tppr187movsg iamlr1,gr31 /* set TPLR.CXN */188movgs gr31,tplr189tlbpr gr31,gr0,#2,#0 /* save to the TLB */190movsg tpxr,gr31 /* check the TLB write error flag */191andicc.p gr31,#TPXR_E,gr0,icc0192setlos #0xfffff000,gr31193bne icc0,#0,__tlb_kernel_fault194195__itlb_k_nopunt:196197# assemble the new TLB entry198and gr29,gr31,gr29199movsg cxnr,gr31200or gr29,gr31,gr29201movgs gr29,iamlr1 /* xAMLR = address | context number */202movgs gr30,iampr1203movgs gr29,damlr1204movgs gr30,dampr1205206# return, restoring registers207movsg scr2,gr30208movgs gr30,ccr209sethi.p %hi(__kernel_current_task),gr29210setlo %lo(__kernel_current_task),gr29211ldi @(gr29,#0),gr29212rett #0213beq icc0,#3,0 /* prevent icache prefetch */214215# the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more216# appropriate page table and map that instead217# - access the PGD with EAR0[31:26]218# - DAMLR3 points to the virtual address of the page directory219# - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables220__itlb_k_PTD_miss:221srli gr29,#26,gr31 /* calculate PGE offset */222slli gr31,#8,gr31 /* and clear bottom bits */223224movsg damlr3,gr30225ld @(gr31,gr30),gr30 /* access the PGE */226227andicc.p gr30,#_PAGE_PRESENT,gr0,icc0228andicc gr30,#xAMPRx_SS,gr0,icc1229230# map this PTD instead and record coverage address231ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30232beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */233slli.p gr31,#18,gr31234bne icc1,#0,__itlb_k_bigpage235movgs gr30,dampr4236movgs gr31,scr0237238# we can now resume normal service239setlos 0x3ffc,gr30240srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */241bra __itlb_k_PTD_mapped242243__itlb_k_bigpage:244break245nop246247.size __entry_kernel_insn_tlb_miss, .-__entry_kernel_insn_tlb_miss248249###############################################################################250#251# Kernel data TLB miss handler252# On entry:253# GR1 - kernel stack pointer254# GR28 - saved exception frame pointer255# GR29 - faulting address256# GR31 - EAR0 ^ SCR1257# SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)258# DAMR3 - mapped page directory259# DAMR5 - mapped page table as matched by SCR1260#261###############################################################################262.globl __entry_kernel_data_tlb_miss263.type __entry_kernel_data_tlb_miss,@function264__entry_kernel_data_tlb_miss:265#if 0266sethi.p %hi(0xe1200004),gr30267setlo %lo(0xe1200004),gr30268st gr0,@(gr30,gr0)269sethi.p %hi(0xffc00100),gr30270setlo %lo(0xffc00100),gr30271sth gr30,@(gr30,gr0)272membar273#endif274275movsg ccr,gr30 /* save CCR */276movgs gr30,scr2277278# see if the cached page table mapping is appropriate279srlicc.p gr31,#26,gr0,icc0280setlos 0x3ffc,gr30281srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */282bne icc0,#0,__dtlb_k_PTD_miss283284__dtlb_k_PTD_mapped:285# access the PTD with EAR0[25:14]286# - DAMLR5 points to the virtual address of the appropriate page table287# - the PTD holds 4096 PTEs288# - the PTD must be accessed uncached289# - the PTE must be marked accessed if it was valid290#291and gr31,gr30,gr31292movsg damlr5,gr30293add gr30,gr31,gr31294ldi @(gr31,#0),gr30 /* fetch the PTE */295andicc gr30,#_PAGE_PRESENT,gr0,icc0296ori.p gr30,#_PAGE_ACCESSED,gr30297beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */298sti.p gr30,@(gr31,#0) /* update the PTE */299andi gr30,#~_PAGE_ACCESSED,gr30300301# we're using DAMR1 as an extra TLB entry302# - punt the entry here (if valid) to the real TLB and then replace with the new PTE303# - need to check IAMR1 lest we cause an multiple-DAT-hit exception304movsg dampr1,gr31305andicc gr31,#xAMPRx_V,gr0,icc0306setlos.p 0xfffff000,gr31307beq icc0,#0,__dtlb_k_nopunt /* punt not required */308309movsg damlr1,gr31310movgs gr31,tplr /* set TPLR.CXN */311tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */312313movsg dampr1,gr31314ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */315movgs gr31,tppr316movsg damlr1,gr31 /* set TPLR.CXN */317movgs gr31,tplr318tlbpr gr31,gr0,#2,#0 /* save to the TLB */319movsg tpxr,gr31 /* check the TLB write error flag */320andicc.p gr31,#TPXR_E,gr0,icc0321setlos #0xfffff000,gr31322bne icc0,#0,__tlb_kernel_fault323324__dtlb_k_nopunt:325326# assemble the new TLB entry327and gr29,gr31,gr29328movsg cxnr,gr31329or gr29,gr31,gr29330movgs gr29,iamlr1 /* xAMLR = address | context number */331movgs gr30,iampr1332movgs gr29,damlr1333movgs gr30,dampr1334335# return, restoring registers336movsg scr2,gr30337movgs gr30,ccr338sethi.p %hi(__kernel_current_task),gr29339setlo %lo(__kernel_current_task),gr29340ldi @(gr29,#0),gr29341rett #0342beq icc0,#3,0 /* prevent icache prefetch */343344# the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more345# appropriate page table and map that instead346# - access the PGD with EAR0[31:26]347# - DAMLR3 points to the virtual address of the page directory348# - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables349__dtlb_k_PTD_miss:350srli gr29,#26,gr31 /* calculate PGE offset */351slli gr31,#8,gr31 /* and clear bottom bits */352353movsg damlr3,gr30354ld @(gr31,gr30),gr30 /* access the PGE */355356andicc.p gr30,#_PAGE_PRESENT,gr0,icc0357andicc gr30,#xAMPRx_SS,gr0,icc1358359# map this PTD instead and record coverage address360ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30361beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */362slli.p gr31,#18,gr31363bne icc1,#0,__dtlb_k_bigpage364movgs gr30,dampr5365movgs gr31,scr1366367# we can now resume normal service368setlos 0x3ffc,gr30369srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */370bra __dtlb_k_PTD_mapped371372__dtlb_k_bigpage:373break374nop375376.size __entry_kernel_data_tlb_miss, .-__entry_kernel_data_tlb_miss377378###############################################################################379#380# Userspace instruction TLB miss handler (with PGE prediction)381# On entry:382# GR28 - faulting address383# GR31 - EAR0 ^ SCR0384# SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)385# DAMR3 - mapped page directory386# DAMR4 - mapped page table as matched by SCR0387#388###############################################################################389.globl __entry_user_insn_tlb_miss390.type __entry_user_insn_tlb_miss,@function391__entry_user_insn_tlb_miss:392#if 0393sethi.p %hi(0xe1200004),gr30394setlo %lo(0xe1200004),gr30395st gr0,@(gr30,gr0)396sethi.p %hi(0xffc00100),gr30397setlo %lo(0xffc00100),gr30398sth gr30,@(gr30,gr0)399membar400#endif401402movsg ccr,gr30 /* save CCR */403movgs gr30,scr2404405# see if the cached page table mapping is appropriate406srlicc.p gr31,#26,gr0,icc0407setlos 0x3ffc,gr30408srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */409bne icc0,#0,__itlb_u_PTD_miss410411__itlb_u_PTD_mapped:412# access the PTD with EAR0[25:14]413# - DAMLR4 points to the virtual address of the appropriate page table414# - the PTD holds 4096 PTEs415# - the PTD must be accessed uncached416# - the PTE must be marked accessed if it was valid417#418and gr31,gr30,gr31419movsg damlr4,gr30420add gr30,gr31,gr31421ldi @(gr31,#0),gr30 /* fetch the PTE */422andicc gr30,#_PAGE_PRESENT,gr0,icc0423ori.p gr30,#_PAGE_ACCESSED,gr30424beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */425sti.p gr30,@(gr31,#0) /* update the PTE */426andi gr30,#~_PAGE_ACCESSED,gr30427428# we're using IAMR1/DAMR1 as an extra TLB entry429# - punt the entry here (if valid) to the real TLB and then replace with the new PTE430movsg dampr1,gr31431andicc gr31,#xAMPRx_V,gr0,icc0432setlos.p 0xfffff000,gr31433beq icc0,#0,__itlb_u_nopunt /* punt not required */434435movsg dampr1,gr31436movgs gr31,tppr437movsg damlr1,gr31 /* set TPLR.CXN */438movgs gr31,tplr439tlbpr gr31,gr0,#2,#0 /* save to the TLB */440movsg tpxr,gr31 /* check the TLB write error flag */441andicc.p gr31,#TPXR_E,gr0,icc0442setlos #0xfffff000,gr31443bne icc0,#0,__tlb_user_fault444445__itlb_u_nopunt:446447# assemble the new TLB entry448and gr28,gr31,gr28449movsg cxnr,gr31450or gr28,gr31,gr28451movgs gr28,iamlr1 /* xAMLR = address | context number */452movgs gr30,iampr1453movgs gr28,damlr1454movgs gr30,dampr1455456# return, restoring registers457movsg scr2,gr30458movgs gr30,ccr459rett #0460beq icc0,#3,0 /* prevent icache prefetch */461462# the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more463# appropriate page table and map that instead464# - access the PGD with EAR0[31:26]465# - DAMLR3 points to the virtual address of the page directory466# - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables467__itlb_u_PTD_miss:468srli gr28,#26,gr31 /* calculate PGE offset */469slli gr31,#8,gr31 /* and clear bottom bits */470471movsg damlr3,gr30472ld @(gr31,gr30),gr30 /* access the PGE */473474andicc.p gr30,#_PAGE_PRESENT,gr0,icc0475andicc gr30,#xAMPRx_SS,gr0,icc1476477# map this PTD instead and record coverage address478ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30479beq icc0,#0,__tlb_user_fault /* jump if PGE not present */480slli.p gr31,#18,gr31481bne icc1,#0,__itlb_u_bigpage482movgs gr30,dampr4483movgs gr31,scr0484485# we can now resume normal service486setlos 0x3ffc,gr30487srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */488bra __itlb_u_PTD_mapped489490__itlb_u_bigpage:491break492nop493494.size __entry_user_insn_tlb_miss, .-__entry_user_insn_tlb_miss495496###############################################################################497#498# Userspace data TLB miss handler499# On entry:500# GR28 - faulting address501# GR31 - EAR0 ^ SCR1502# SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)503# DAMR3 - mapped page directory504# DAMR5 - mapped page table as matched by SCR1505#506###############################################################################507.globl __entry_user_data_tlb_miss508.type __entry_user_data_tlb_miss,@function509__entry_user_data_tlb_miss:510#if 0511sethi.p %hi(0xe1200004),gr30512setlo %lo(0xe1200004),gr30513st gr0,@(gr30,gr0)514sethi.p %hi(0xffc00100),gr30515setlo %lo(0xffc00100),gr30516sth gr30,@(gr30,gr0)517membar518#endif519520movsg ccr,gr30 /* save CCR */521movgs gr30,scr2522523# see if the cached page table mapping is appropriate524srlicc.p gr31,#26,gr0,icc0525setlos 0x3ffc,gr30526srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */527bne icc0,#0,__dtlb_u_PTD_miss528529__dtlb_u_PTD_mapped:530# access the PTD with EAR0[25:14]531# - DAMLR5 points to the virtual address of the appropriate page table532# - the PTD holds 4096 PTEs533# - the PTD must be accessed uncached534# - the PTE must be marked accessed if it was valid535#536and gr31,gr30,gr31537movsg damlr5,gr30538539__dtlb_u_using_iPTD:540add gr30,gr31,gr31541ldi @(gr31,#0),gr30 /* fetch the PTE */542andicc gr30,#_PAGE_PRESENT,gr0,icc0543ori.p gr30,#_PAGE_ACCESSED,gr30544beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */545sti.p gr30,@(gr31,#0) /* update the PTE */546andi gr30,#~_PAGE_ACCESSED,gr30547548# we're using DAMR1 as an extra TLB entry549# - punt the entry here (if valid) to the real TLB and then replace with the new PTE550movsg dampr1,gr31551andicc gr31,#xAMPRx_V,gr0,icc0552setlos.p 0xfffff000,gr31553beq icc0,#0,__dtlb_u_nopunt /* punt not required */554555movsg dampr1,gr31556movgs gr31,tppr557movsg damlr1,gr31 /* set TPLR.CXN */558movgs gr31,tplr559tlbpr gr31,gr0,#2,#0 /* save to the TLB */560movsg tpxr,gr31 /* check the TLB write error flag */561andicc.p gr31,#TPXR_E,gr0,icc0562setlos #0xfffff000,gr31563bne icc0,#0,__tlb_user_fault564565__dtlb_u_nopunt:566567# assemble the new TLB entry568and gr28,gr31,gr28569movsg cxnr,gr31570or gr28,gr31,gr28571movgs gr28,iamlr1 /* xAMLR = address | context number */572movgs gr30,iampr1573movgs gr28,damlr1574movgs gr30,dampr1575576# return, restoring registers577movsg scr2,gr30578movgs gr30,ccr579rett #0580beq icc0,#3,0 /* prevent icache prefetch */581582# the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more583# appropriate page table and map that instead584# - first of all, check the insn PGE cache - we may well get a hit there585# - access the PGD with EAR0[31:26]586# - DAMLR3 points to the virtual address of the page directory587# - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables588__dtlb_u_PTD_miss:589movsg scr0,gr31 /* consult the insn-PGE-cache key */590xor gr28,gr31,gr31591srlicc gr31,#26,gr0,icc0592srli gr28,#12,gr31 /* use EAR0[25:14] as PTE index */593bne icc0,#0,__dtlb_u_iPGE_miss594595# what we're looking for is covered by the insn-PGE-cache596setlos 0x3ffc,gr30597and gr31,gr30,gr31598movsg damlr4,gr30599bra __dtlb_u_using_iPTD600601__dtlb_u_iPGE_miss:602srli gr28,#26,gr31 /* calculate PGE offset */603slli gr31,#8,gr31 /* and clear bottom bits */604605movsg damlr3,gr30606ld @(gr31,gr30),gr30 /* access the PGE */607608andicc.p gr30,#_PAGE_PRESENT,gr0,icc0609andicc gr30,#xAMPRx_SS,gr0,icc1610611# map this PTD instead and record coverage address612ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30613beq icc0,#0,__tlb_user_fault /* jump if PGE not present */614slli.p gr31,#18,gr31615bne icc1,#0,__dtlb_u_bigpage616movgs gr30,dampr5617movgs gr31,scr1618619# we can now resume normal service620setlos 0x3ffc,gr30621srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */622bra __dtlb_u_PTD_mapped623624__dtlb_u_bigpage:625break626nop627628.size __entry_user_data_tlb_miss, .-__entry_user_data_tlb_miss629630631