Path: blob/master/arch/microblaze/kernel/hw_exception_handler.S
26439 views
/* SPDX-License-Identifier: GPL-2.0-only */1/*2* Exception handling for Microblaze3*4* Rewriten interrupt handling5*6* Copyright (C) 2008-2009 Michal Simek <[email protected]>7* Copyright (C) 2008-2009 PetaLogix8*9* uClinux customisation (C) 2005 John Williams10*11* MMU code derived from arch/ppc/kernel/head_4xx.S:12* Copyright (C) 1995-1996 Gary Thomas <[email protected]>13* Initial PowerPC version.14* Copyright (C) 1996 Cort Dougan <[email protected]>15* Rewritten for PReP16* Copyright (C) 1996 Paul Mackerras <[email protected]>17* Low-level exception handers, MMU support, and rewrite.18* Copyright (C) 1997 Dan Malek <[email protected]>19* PowerPC 8xx modifications.20* Copyright (C) 1998-1999 TiVo, Inc.21* PowerPC 403GCX modifications.22* Copyright (C) 1999 Grant Erickson <[email protected]>23* PowerPC 403GCX/405GP modifications.24* Copyright 2000 MontaVista Software Inc.25* PPC405 modifications26* PowerPC 403GCX/405GP modifications.27* Author: MontaVista Software, Inc.28* [email protected] or [email protected]29* [email protected]30*31* Original code32* Copyright (C) 2004 Xilinx, Inc.33*/3435/*36* Here are the handlers which don't require enabling translation37* and calling other kernel code thus we can keep their design very simple38* and do all processing in real mode. All what they need is a valid current39* (that is an issue for the CONFIG_REGISTER_TASK_PTR case)40* This handlers use r3,r4,r5,r6 and optionally r[current] to work therefore41* these registers are saved/restored42* The handlers which require translation are in entry.S --KAA43*44* Microblaze HW Exception Handler45* - Non self-modifying exception handler for the following exception conditions46* - Unalignment47* - Instruction bus error48* - Data bus error49* - Illegal instruction opcode50* - Divide-by-zero51*52* - Privileged instruction exception (MMU)53* - Data storage exception (MMU)54* - Instruction storage exception (MMU)55* - Data TLB miss exception (MMU)56* - Instruction TLB miss exception (MMU)57*58* Note we disable interrupts during exception handling, otherwise we will59* possibly get multiple re-entrancy if interrupt handles themselves cause60* exceptions. JW61*/6263#include <asm/exceptions.h>64#include <asm/unistd.h>65#include <asm/page.h>6667#include <asm/entry.h>68#include <asm/current.h>69#include <linux/linkage.h>70#include <linux/pgtable.h>7172#include <asm/mmu.h>73#include <asm/signal.h>74#include <asm/registers.h>75#include <asm/asm-offsets.h>7677#undef DEBUG7879/* Helpful Macros */80#define NUM_TO_REG(num) r ## num8182#define RESTORE_STATE \83lwi r5, r1, 0; \84mts rmsr, r5; \85nop; \86lwi r3, r1, PT_R3; \87lwi r4, r1, PT_R4; \88lwi r5, r1, PT_R5; \89lwi r6, r1, PT_R6; \90lwi r11, r1, PT_R11; \91lwi r31, r1, PT_R31; \92lwi r1, r1, PT_R1;9394#define LWREG_NOP \95bri ex_handler_unhandled; \96nop;9798#define SWREG_NOP \99bri ex_handler_unhandled; \100nop;101102/* r3 is the source */103#define R3_TO_LWREG_V(regnum) \104swi r3, r1, 4 * regnum; \105bri ex_handler_done;106107/* r3 is the source */108#define R3_TO_LWREG(regnum) \109or NUM_TO_REG (regnum), r0, r3; \110bri ex_handler_done;111112/* r3 is the target */113#define SWREG_TO_R3_V(regnum) \114lwi r3, r1, 4 * regnum; \115bri ex_sw_tail;116117/* r3 is the target */118#define SWREG_TO_R3(regnum) \119or r3, r0, NUM_TO_REG (regnum); \120bri ex_sw_tail;121122#define R3_TO_LWREG_VM_V(regnum) \123brid ex_lw_end_vm; \124swi r3, r7, 4 * regnum;125126#define R3_TO_LWREG_VM(regnum) \127brid ex_lw_end_vm; \128or NUM_TO_REG (regnum), r0, r3;129130#define SWREG_TO_R3_VM_V(regnum) \131brid ex_sw_tail_vm; \132lwi r3, r7, 4 * regnum;133134#define SWREG_TO_R3_VM(regnum) \135brid ex_sw_tail_vm; \136or r3, r0, NUM_TO_REG (regnum);137138/* Shift right instruction depending on available configuration */139#if CONFIG_XILINX_MICROBLAZE0_USE_BARREL == 0140/* Only the used shift constants defined here - add more if needed */141#define BSRLI2(rD, rA) \142srl rD, rA; /* << 1 */ \143srl rD, rD; /* << 2 */144#define BSRLI4(rD, rA) \145BSRLI2(rD, rA); \146BSRLI2(rD, rD)147#define BSRLI10(rD, rA) \148srl rD, rA; /* << 1 */ \149srl rD, rD; /* << 2 */ \150srl rD, rD; /* << 3 */ \151srl rD, rD; /* << 4 */ \152srl rD, rD; /* << 5 */ \153srl rD, rD; /* << 6 */ \154srl rD, rD; /* << 7 */ \155srl rD, rD; /* << 8 */ \156srl rD, rD; /* << 9 */ \157srl rD, rD /* << 10 */158#define BSRLI20(rD, rA) \159BSRLI10(rD, rA); \160BSRLI10(rD, rD)161162.macro bsrli, rD, rA, IMM163.if (\IMM) == 2164BSRLI2(\rD, \rA)165.elseif (\IMM) == 10166BSRLI10(\rD, \rA)167.elseif (\IMM) == 12168BSRLI2(\rD, \rA)169BSRLI10(\rD, \rD)170.elseif (\IMM) == 14171BSRLI4(\rD, \rA)172BSRLI10(\rD, \rD)173.elseif (\IMM) == 20174BSRLI20(\rD, \rA)175.elseif (\IMM) == 24176BSRLI4(\rD, \rA)177BSRLI20(\rD, \rD)178.elseif (\IMM) == 28179BSRLI4(\rD, \rA)180BSRLI4(\rD, \rD)181BSRLI20(\rD, \rD)182.else183.error "BSRLI shift macros \IMM"184.endif185.endm186#endif187188189.extern other_exception_handler /* Defined in exception.c */190191/*192* hw_exception_handler - Handler for exceptions193*194* Exception handler notes:195* - Handles all exceptions196* - Does not handle unaligned exceptions during load into r17, r1, r0.197* - Does not handle unaligned exceptions during store from r17 (cannot be198* done) and r1 (slows down common case)199*200* Relevant register structures201*202* EAR - |----|----|----|----|----|----|----|----|203* - < ## 32 bit faulting address ## >204*205* ESR - |----|----|----|----|----| - | - |-----|-----|206* - W S REG EXC207*208*209* STACK FRAME STRUCTURE (for CONFIG_MMU=n)210* ----------------------------------------211*212* +-------------+ + 0213* | MSR |214* +-------------+ + 4215* | r1 |216* | . |217* | . |218* | . |219* | . |220* | r18 |221* +-------------+ + 76222* | . |223* | . |224*225* MMU kernel uses the same 'pt_pool_space' pointed space226* which is used for storing register values - noMMu style was, that values were227* stored in stack but in case of failure you lost information about register.228* Currently you can see register value in memory in specific place.229* In compare to with previous solution the speed should be the same.230*231* MMU exception handler has different handling compare to no MMU kernel.232* Exception handler use jump table for directing of what happen. For MMU kernel233* is this approach better because MMU relate exception are handled by asm code234* in this file. In compare to with MMU expect of unaligned exception235* is everything handled by C code.236*/237238/*239* every of these handlers is entered having R3/4/5/6/11/current saved on stack240* and clobbered so care should be taken to restore them if someone is going to241* return from exception242*/243244/* wrappers to restore state before coming to entry.S */245.section .data246.align 4247pt_pool_space:248.space PT_SIZE249250#ifdef DEBUG251/* Create space for exception counting. */252.section .data253.global exception_debug_table254.align 4255exception_debug_table:256/* Look at exception vector table. There is 32 exceptions * word size */257.space (32 * 4)258#endif /* DEBUG */259260.section .rodata261.align 4262_MB_HW_ExceptionVectorTable:263/* 0 - Undefined */264.long TOPHYS(ex_handler_unhandled)265/* 1 - Unaligned data access exception */266.long TOPHYS(handle_unaligned_ex)267/* 2 - Illegal op-code exception */268.long TOPHYS(full_exception_trapw)269/* 3 - Instruction bus error exception */270.long TOPHYS(full_exception_trapw)271/* 4 - Data bus error exception */272.long TOPHYS(full_exception_trapw)273/* 5 - Divide by zero exception */274.long TOPHYS(full_exception_trapw)275/* 6 - Floating point unit exception */276.long TOPHYS(full_exception_trapw)277/* 7 - Privileged instruction exception */278.long TOPHYS(full_exception_trapw)279/* 8 - 15 - Undefined */280.long TOPHYS(ex_handler_unhandled)281.long TOPHYS(ex_handler_unhandled)282.long TOPHYS(ex_handler_unhandled)283.long TOPHYS(ex_handler_unhandled)284.long TOPHYS(ex_handler_unhandled)285.long TOPHYS(ex_handler_unhandled)286.long TOPHYS(ex_handler_unhandled)287.long TOPHYS(ex_handler_unhandled)288/* 16 - Data storage exception */289.long TOPHYS(handle_data_storage_exception)290/* 17 - Instruction storage exception */291.long TOPHYS(handle_instruction_storage_exception)292/* 18 - Data TLB miss exception */293.long TOPHYS(handle_data_tlb_miss_exception)294/* 19 - Instruction TLB miss exception */295.long TOPHYS(handle_instruction_tlb_miss_exception)296/* 20 - 31 - Undefined */297.long TOPHYS(ex_handler_unhandled)298.long TOPHYS(ex_handler_unhandled)299.long TOPHYS(ex_handler_unhandled)300.long TOPHYS(ex_handler_unhandled)301.long TOPHYS(ex_handler_unhandled)302.long TOPHYS(ex_handler_unhandled)303.long TOPHYS(ex_handler_unhandled)304.long TOPHYS(ex_handler_unhandled)305.long TOPHYS(ex_handler_unhandled)306.long TOPHYS(ex_handler_unhandled)307.long TOPHYS(ex_handler_unhandled)308.long TOPHYS(ex_handler_unhandled)309310.global _hw_exception_handler311.section .text312.align 4313.ent _hw_exception_handler314_hw_exception_handler:315swi r1, r0, TOPHYS(pt_pool_space + PT_R1); /* GET_SP */316/* Save date to kernel memory. Here is the problem317* when you came from user space */318ori r1, r0, TOPHYS(pt_pool_space);319swi r3, r1, PT_R3320swi r4, r1, PT_R4321swi r5, r1, PT_R5322swi r6, r1, PT_R6323324swi r11, r1, PT_R11325swi r31, r1, PT_R31326lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)) /* get saved current */327328mfs r5, rmsr;329nop330swi r5, r1, 0;331mfs r4, resr332nop333mfs r3, rear;334nop335336andi r5, r4, 0x1F; /* Extract ESR[EXC] */337338/* Calculate exception vector offset = r5 << 2 */339addk r6, r5, r5; /* << 1 */340addk r6, r6, r6; /* << 2 */341342#ifdef DEBUG343/* counting which exception happen */344lwi r5, r0, TOPHYS(exception_debug_table)345addi r5, r5, 1346swi r5, r0, TOPHYS(exception_debug_table)347lwi r5, r6, TOPHYS(exception_debug_table)348addi r5, r5, 1349swi r5, r6, TOPHYS(exception_debug_table)350#endif351/* end */352/* Load the HW Exception vector */353lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable)354bra r6355356full_exception_trapw:357RESTORE_STATE358bri full_exception_trap359360/* 0x01 - Unaligned data access exception361* This occurs when a word access is not aligned on a word boundary,362* or when a 16-bit access is not aligned on a 16-bit boundary.363* This handler perform the access, and returns, except for MMU when364* the unaligned address is last on a 4k page or the physical address is365* not found in the page table, in which case unaligned_data_trap is called.366*/367handle_unaligned_ex:368/* Working registers already saved: R3, R4, R5, R6369* R4 = ESR370* R3 = EAR371*/372andi r6, r4, 0x1000 /* Check ESR[DS] */373beqi r6, _no_delayslot /* Branch if ESR[DS] not set */374mfs r17, rbtr; /* ESR[DS] set - return address in BTR */375nop376_no_delayslot:377/* jump to high level unaligned handler */378RESTORE_STATE;379bri unaligned_data_trap380381andi r6, r4, 0x3E0; /* Mask and extract the register operand */382srl r6, r6; /* r6 >> 5 */383srl r6, r6;384srl r6, r6;385srl r6, r6;386srl r6, r6;387/* Store the register operand in a temporary location */388sbi r6, r0, TOPHYS(ex_reg_op);389390andi r6, r4, 0x400; /* Extract ESR[S] */391bnei r6, ex_sw;392ex_lw:393andi r6, r4, 0x800; /* Extract ESR[W] */394beqi r6, ex_lhw;395lbui r5, r3, 0; /* Exception address in r3 */396/* Load a word, byte-by-byte from destination address397and save it in tmp space */398sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);399lbui r5, r3, 1;400sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);401lbui r5, r3, 2;402sbi r5, r0, TOPHYS(ex_tmp_data_loc_2);403lbui r5, r3, 3;404sbi r5, r0, TOPHYS(ex_tmp_data_loc_3);405/* Get the destination register value into r4 */406lwi r4, r0, TOPHYS(ex_tmp_data_loc_0);407bri ex_lw_tail;408ex_lhw:409lbui r5, r3, 0; /* Exception address in r3 */410/* Load a half-word, byte-by-byte from destination411address and save it in tmp space */412sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);413lbui r5, r3, 1;414sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);415/* Get the destination register value into r4 */416lhui r4, r0, TOPHYS(ex_tmp_data_loc_0);417ex_lw_tail:418/* Get the destination register number into r5 */419lbui r5, r0, TOPHYS(ex_reg_op);420/* Form load_word jump table offset (lw_table + (8 * regnum)) */421addik r6, r0, TOPHYS(lw_table);422addk r5, r5, r5;423addk r5, r5, r5;424addk r5, r5, r5;425addk r5, r5, r6;426bra r5;427ex_lw_end: /* Exception handling of load word, ends */428ex_sw:429/* Get the destination register number into r5 */430lbui r5, r0, TOPHYS(ex_reg_op);431/* Form store_word jump table offset (sw_table + (8 * regnum)) */432addik r6, r0, TOPHYS(sw_table);433add r5, r5, r5;434add r5, r5, r5;435add r5, r5, r5;436add r5, r5, r6;437bra r5;438ex_sw_tail:439mfs r6, resr;440nop441andi r6, r6, 0x800; /* Extract ESR[W] */442beqi r6, ex_shw;443/* Get the word - delay slot */444swi r4, r0, TOPHYS(ex_tmp_data_loc_0);445/* Store the word, byte-by-byte into destination address */446lbui r4, r0, TOPHYS(ex_tmp_data_loc_0);447sbi r4, r3, 0;448lbui r4, r0, TOPHYS(ex_tmp_data_loc_1);449sbi r4, r3, 1;450lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);451sbi r4, r3, 2;452lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);453sbi r4, r3, 3;454bri ex_handler_done;455456ex_shw:457/* Store the lower half-word, byte-by-byte into destination address */458swi r4, r0, TOPHYS(ex_tmp_data_loc_0);459lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);460sbi r4, r3, 0;461lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);462sbi r4, r3, 1;463ex_sw_end: /* Exception handling of store word, ends. */464465ex_handler_done:466RESTORE_STATE;467rted r17, 0468nop469470/* Exception vector entry code. This code runs with address translation471* turned off (i.e. using physical addresses). */472473/* Exception vectors. */474475/* 0x10 - Data Storage Exception476* This happens for just a few reasons. U0 set (but we don't do that),477* or zone protection fault (user violation, write to protected page).478* If this is just an update of modified status, we do that quickly479* and exit. Otherwise, we call heavyweight functions to do the work.480*/481handle_data_storage_exception:482/* Working registers already saved: R3, R4, R5, R6483* R3 = ESR484*/485mfs r11, rpid486nop487/* If we are faulting a kernel address, we have to use the488* kernel page tables.489*/490ori r5, r0, CONFIG_KERNEL_START491cmpu r5, r3, r5492bgti r5, ex3493/* First, check if it was a zone fault (which means a user494* tried to access a kernel or read-protected page - always495* a SEGV). All other faults here must be stores, so no496* need to check ESR_S as well. */497andi r4, r4, ESR_DIZ /* ESR_Z - zone protection */498bnei r4, ex2499500ori r4, r0, swapper_pg_dir501mts rpid, r0 /* TLB will have 0 TID */502nop503bri ex4504505/* Get the PGD for the current thread. */506ex3:507/* First, check if it was a zone fault (which means a user508* tried to access a kernel or read-protected page - always509* a SEGV). All other faults here must be stores, so no510* need to check ESR_S as well. */511andi r4, r4, ESR_DIZ /* ESR_Z */512bnei r4, ex2513/* get current task address */514addi r4 ,CURRENT_TASK, TOPHYS(0);515lwi r4, r4, TASK_THREAD+PGDIR516ex4:517tophys(r4,r4)518/* Create L1 (pgdir/pmd) address */519bsrli r5, r3, PGDIR_SHIFT - 2520andi r5, r5, PAGE_SIZE - 4521/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */522or r4, r4, r5523lwi r4, r4, 0 /* Get L1 entry */524andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */525beqi r5, ex2 /* Bail if no table */526527tophys(r5,r5)528bsrli r6, r3, PTE_SHIFT /* Compute PTE address */529andi r6, r6, PAGE_SIZE - 4530or r5, r5, r6531lwi r4, r5, 0 /* Get Linux PTE */532533andi r6, r4, _PAGE_RW /* Is it writeable? */534beqi r6, ex2 /* Bail if not */535536/* Update 'changed' */537ori r4, r4, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE538swi r4, r5, 0 /* Update Linux page table */539540/* Most of the Linux PTE is ready to load into the TLB LO.541* We set ZSEL, where only the LS-bit determines user access.542* We set execute, because we don't have the granularity to543* properly set this at the page level (Linux problem).544* If shared is set, we cause a zero PID->TID load.545* Many of these bits are software only. Bits we don't set546* here we (properly should) assume have the appropriate value.547*/548/* Ignore memory coherent, just LSB on ZSEL is used + EX/WR */549andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \550TLB_ZSEL(1) | TLB_ATTR_MASK551ori r4, r4, _PAGE_HWEXEC /* make it executable */552553/* find the TLB index that caused the fault. It has to be here*/554mts rtlbsx, r3555nop556mfs r5, rtlbx /* DEBUG: TBD */557nop558mts rtlblo, r4 /* Load TLB LO */559nop560/* Will sync shadow TLBs */561562/* Done...restore registers and get out of here. */563mts rpid, r11564nop565bri 4566567RESTORE_STATE;568rted r17, 0569nop570ex2:571/* The bailout. Restore registers to pre-exception conditions572* and call the heavyweights to help us out. */573mts rpid, r11574nop575bri 4576RESTORE_STATE;577bri page_fault_data_trap578579580/* 0x11 - Instruction Storage Exception581* This is caused by a fetch from non-execute or guarded pages. */582handle_instruction_storage_exception:583/* Working registers already saved: R3, R4, R5, R6584* R3 = ESR585*/586587RESTORE_STATE;588bri page_fault_instr_trap589590/* 0x12 - Data TLB Miss Exception591* As the name implies, translation is not in the MMU, so search the592* page tables and fix it. The only purpose of this function is to593* load TLB entries from the page table if they exist.594*/595handle_data_tlb_miss_exception:596/* Working registers already saved: R3, R4, R5, R6597* R3 = EAR, R4 = ESR598*/599mfs r11, rpid600nop601602/* If we are faulting a kernel address, we have to use the603* kernel page tables. */604ori r6, r0, CONFIG_KERNEL_START605cmpu r4, r3, r6606bgti r4, ex5607ori r4, r0, swapper_pg_dir608mts rpid, r0 /* TLB will have 0 TID */609nop610bri ex6611612/* Get the PGD for the current thread. */613ex5:614/* get current task address */615addi r4 ,CURRENT_TASK, TOPHYS(0);616lwi r4, r4, TASK_THREAD+PGDIR617ex6:618tophys(r4,r4)619/* Create L1 (pgdir/pmd) address */620bsrli r5, r3, PGDIR_SHIFT - 2621andi r5, r5, PAGE_SIZE - 4622/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */623or r4, r4, r5624lwi r4, r4, 0 /* Get L1 entry */625andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */626beqi r5, ex7 /* Bail if no table */627628tophys(r5,r5)629bsrli r6, r3, PTE_SHIFT /* Compute PTE address */630andi r6, r6, PAGE_SIZE - 4631or r5, r5, r6632lwi r4, r5, 0 /* Get Linux PTE */633634andi r6, r4, _PAGE_PRESENT635beqi r6, ex7636637ori r4, r4, _PAGE_ACCESSED638swi r4, r5, 0639640/* Most of the Linux PTE is ready to load into the TLB LO.641* We set ZSEL, where only the LS-bit determines user access.642* We set execute, because we don't have the granularity to643* properly set this at the page level (Linux problem).644* If shared is set, we cause a zero PID->TID load.645* Many of these bits are software only. Bits we don't set646* here we (properly should) assume have the appropriate value.647*/648brid finish_tlb_load649andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \650TLB_ZSEL(1) | TLB_ATTR_MASK651ex7:652/* The bailout. Restore registers to pre-exception conditions653* and call the heavyweights to help us out.654*/655mts rpid, r11656nop657bri 4658RESTORE_STATE;659bri page_fault_data_trap660661/* 0x13 - Instruction TLB Miss Exception662* Nearly the same as above, except we get our information from663* different registers and bailout to a different point.664*/665handle_instruction_tlb_miss_exception:666/* Working registers already saved: R3, R4, R5, R6667* R3 = ESR668*/669mfs r11, rpid670nop671672/* If we are faulting a kernel address, we have to use the673* kernel page tables.674*/675ori r4, r0, CONFIG_KERNEL_START676cmpu r4, r3, r4677bgti r4, ex8678ori r4, r0, swapper_pg_dir679mts rpid, r0 /* TLB will have 0 TID */680nop681bri ex9682683/* Get the PGD for the current thread. */684ex8:685/* get current task address */686addi r4 ,CURRENT_TASK, TOPHYS(0);687lwi r4, r4, TASK_THREAD+PGDIR688ex9:689tophys(r4,r4)690/* Create L1 (pgdir/pmd) address */691bsrli r5, r3, PGDIR_SHIFT - 2692andi r5, r5, PAGE_SIZE - 4693/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */694or r4, r4, r5695lwi r4, r4, 0 /* Get L1 entry */696andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */697beqi r5, ex10 /* Bail if no table */698699tophys(r5,r5)700bsrli r6, r3, PTE_SHIFT /* Compute PTE address */701andi r6, r6, PAGE_SIZE - 4702or r5, r5, r6703lwi r4, r5, 0 /* Get Linux PTE */704705andi r6, r4, _PAGE_PRESENT706beqi r6, ex10707708ori r4, r4, _PAGE_ACCESSED709swi r4, r5, 0710711/* Most of the Linux PTE is ready to load into the TLB LO.712* We set ZSEL, where only the LS-bit determines user access.713* We set execute, because we don't have the granularity to714* properly set this at the page level (Linux problem).715* If shared is set, we cause a zero PID->TID load.716* Many of these bits are software only. Bits we don't set717* here we (properly should) assume have the appropriate value.718*/719brid finish_tlb_load720andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \721TLB_ZSEL(1) | TLB_ATTR_MASK722ex10:723/* The bailout. Restore registers to pre-exception conditions724* and call the heavyweights to help us out.725*/726mts rpid, r11727nop728bri 4729RESTORE_STATE;730bri page_fault_instr_trap731732/* Both the instruction and data TLB miss get to this point to load the TLB.733* r3 - EA of fault734* r4 - TLB LO (info from Linux PTE)735* r5, r6 - available to use736* PID - loaded with proper value when we get here737* Upon exit, we reload everything and RFI.738* A common place to load the TLB.739*/740.section .data741.align 4742.global tlb_skip743tlb_skip:744.long MICROBLAZE_TLB_SKIP745tlb_index:746/* MS: storing last used tlb index */747.long MICROBLAZE_TLB_SIZE/2748.previous749finish_tlb_load:750/* MS: load the last used TLB index. */751lwi r5, r0, TOPHYS(tlb_index)752addik r5, r5, 1 /* MS: inc tlb_index -> use next one */753754/* MS: FIXME this is potential fault, because this is mask not count */755andi r5, r5, MICROBLAZE_TLB_SIZE - 1756ori r6, r0, 1757cmp r31, r5, r6758blti r31, ex12759lwi r5, r0, TOPHYS(tlb_skip)760ex12:761/* MS: save back current TLB index */762swi r5, r0, TOPHYS(tlb_index)763764ori r4, r4, _PAGE_HWEXEC /* make it executable */765mts rtlbx, r5 /* MS: save current TLB */766nop767mts rtlblo, r4 /* MS: save to TLB LO */768nop769770/* Create EPN. This is the faulting address plus a static771* set of bits. These are size, valid, E, U0, and ensure772* bits 20 and 21 are zero.773*/774andi r3, r3, PAGE_MASK775ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_4K)776mts rtlbhi, r3 /* Load TLB HI */777nop778779/* Done...restore registers and get out of here. */780mts rpid, r11781nop782bri 4783RESTORE_STATE;784rted r17, 0785nop786787/* extern void giveup_fpu(struct task_struct *prev)788*789* The MicroBlaze processor may have an FPU, so this should not just790* return: TBD.791*/792.globl giveup_fpu;793.align 4;794giveup_fpu:795bralid r15,0 /* TBD */796nop797798/* At present, this routine just hangs. - extern void abort(void) */799.globl abort;800.align 4;801abort:802br r0803804.globl set_context;805.align 4;806set_context:807mts rpid, r5 /* Shadow TLBs are automatically */808nop809bri 4 /* flushed by changing PID */810rtsd r15,8811nop812813.end _hw_exception_handler814815/* Unaligned data access exception last on a 4k page for MMU.816* When this is called, we are in virtual mode with exceptions enabled817* and registers 1-13,15,17,18 saved.818*819* R3 = ESR820* R4 = EAR821* R7 = pointer to saved registers (struct pt_regs *regs)822*823* This handler perform the access, and returns via ret_from_exc.824*/825.global _unaligned_data_exception826.ent _unaligned_data_exception827_unaligned_data_exception:828andi r8, r3, 0x3E0; /* Mask and extract the register operand */829bsrli r8, r8, 2; /* r8 >> 2 = register operand * 8 */830andi r6, r3, 0x400; /* Extract ESR[S] */831bneid r6, ex_sw_vm;832andi r6, r3, 0x800; /* Extract ESR[W] - delay slot */833ex_lw_vm:834beqid r6, ex_lhw_vm;835load1: lbui r5, r4, 0; /* Exception address in r4 - delay slot */836/* Load a word, byte-by-byte from destination address and save it in tmp space*/837addik r6, r0, ex_tmp_data_loc_0;838sbi r5, r6, 0;839load2: lbui r5, r4, 1;840sbi r5, r6, 1;841load3: lbui r5, r4, 2;842sbi r5, r6, 2;843load4: lbui r5, r4, 3;844sbi r5, r6, 3;845brid ex_lw_tail_vm;846/* Get the destination register value into r3 - delay slot */847lwi r3, r6, 0;848ex_lhw_vm:849/* Load a half-word, byte-by-byte from destination address and850* save it in tmp space */851addik r6, r0, ex_tmp_data_loc_0;852sbi r5, r6, 0;853load5: lbui r5, r4, 1;854sbi r5, r6, 1;855lhui r3, r6, 0; /* Get the destination register value into r3 */856ex_lw_tail_vm:857/* Form load_word jump table offset (lw_table_vm + (8 * regnum)) */858addik r5, r8, lw_table_vm;859bra r5;860ex_lw_end_vm: /* Exception handling of load word, ends */861brai ret_from_exc;862ex_sw_vm:863/* Form store_word jump table offset (sw_table_vm + (8 * regnum)) */864addik r5, r8, sw_table_vm;865bra r5;866ex_sw_tail_vm:867addik r5, r0, ex_tmp_data_loc_0;868beqid r6, ex_shw_vm;869swi r3, r5, 0; /* Get the word - delay slot */870/* Store the word, byte-by-byte into destination address */871lbui r3, r5, 0;872store1: sbi r3, r4, 0;873lbui r3, r5, 1;874store2: sbi r3, r4, 1;875lbui r3, r5, 2;876store3: sbi r3, r4, 2;877lbui r3, r5, 3;878brid ret_from_exc;879store4: sbi r3, r4, 3; /* Delay slot */880ex_shw_vm:881/* Store the lower half-word, byte-by-byte into destination address */882#ifdef __MICROBLAZEEL__883lbui r3, r5, 0;884store5: sbi r3, r4, 0;885lbui r3, r5, 1;886brid ret_from_exc;887store6: sbi r3, r4, 1; /* Delay slot */888#else889lbui r3, r5, 2;890store5: sbi r3, r4, 0;891lbui r3, r5, 3;892brid ret_from_exc;893store6: sbi r3, r4, 1; /* Delay slot */894#endif895896ex_sw_end_vm: /* Exception handling of store word, ends. */897898/* We have to prevent cases that get/put_user macros get unaligned pointer899* to bad page area. We have to find out which origin instruction caused it900* and called fixup for that origin instruction not instruction in unaligned901* handler */902ex_unaligned_fixup:903ori r5, r7, 0 /* setup pointer to pt_regs */904lwi r6, r7, PT_PC; /* faulting address is one instruction above */905addik r6, r6, -4 /* for finding proper fixup */906swi r6, r7, PT_PC; /* a save back it to PT_PC */907addik r7, r0, SIGSEGV908/* call bad_page_fault for finding aligned fixup, fixup address is saved909* in PT_PC which is used as return address from exception */910addik r15, r0, ret_from_exc-8 /* setup return address */911brid bad_page_fault912nop913914/* We prevent all load/store because it could failed any attempt to access */915.section __ex_table,"a";916.word load1,ex_unaligned_fixup;917.word load2,ex_unaligned_fixup;918.word load3,ex_unaligned_fixup;919.word load4,ex_unaligned_fixup;920.word load5,ex_unaligned_fixup;921.word store1,ex_unaligned_fixup;922.word store2,ex_unaligned_fixup;923.word store3,ex_unaligned_fixup;924.word store4,ex_unaligned_fixup;925.word store5,ex_unaligned_fixup;926.word store6,ex_unaligned_fixup;927.previous;928.end _unaligned_data_exception929930.global ex_handler_unhandled931ex_handler_unhandled:932/* FIXME add handle function for unhandled exception - dump register */933bri 0934935/*936* hw_exception_handler Jump Table937* - Contains code snippets for each register that caused the unalign exception938* - Hence exception handler is NOT self-modifying939* - Separate table for load exceptions and store exceptions.940* - Each table is of size: (8 * 32) = 256 bytes941*/942943.section .text944.align 4945lw_table:946lw_r0: R3_TO_LWREG (0);947lw_r1: LWREG_NOP;948lw_r2: R3_TO_LWREG (2);949lw_r3: R3_TO_LWREG_V (3);950lw_r4: R3_TO_LWREG_V (4);951lw_r5: R3_TO_LWREG_V (5);952lw_r6: R3_TO_LWREG_V (6);953lw_r7: R3_TO_LWREG (7);954lw_r8: R3_TO_LWREG (8);955lw_r9: R3_TO_LWREG (9);956lw_r10: R3_TO_LWREG (10);957lw_r11: R3_TO_LWREG (11);958lw_r12: R3_TO_LWREG (12);959lw_r13: R3_TO_LWREG (13);960lw_r14: R3_TO_LWREG (14);961lw_r15: R3_TO_LWREG (15);962lw_r16: R3_TO_LWREG (16);963lw_r17: LWREG_NOP;964lw_r18: R3_TO_LWREG (18);965lw_r19: R3_TO_LWREG (19);966lw_r20: R3_TO_LWREG (20);967lw_r21: R3_TO_LWREG (21);968lw_r22: R3_TO_LWREG (22);969lw_r23: R3_TO_LWREG (23);970lw_r24: R3_TO_LWREG (24);971lw_r25: R3_TO_LWREG (25);972lw_r26: R3_TO_LWREG (26);973lw_r27: R3_TO_LWREG (27);974lw_r28: R3_TO_LWREG (28);975lw_r29: R3_TO_LWREG (29);976lw_r30: R3_TO_LWREG (30);977lw_r31: R3_TO_LWREG_V (31);978979sw_table:980sw_r0: SWREG_TO_R3 (0);981sw_r1: SWREG_NOP;982sw_r2: SWREG_TO_R3 (2);983sw_r3: SWREG_TO_R3_V (3);984sw_r4: SWREG_TO_R3_V (4);985sw_r5: SWREG_TO_R3_V (5);986sw_r6: SWREG_TO_R3_V (6);987sw_r7: SWREG_TO_R3 (7);988sw_r8: SWREG_TO_R3 (8);989sw_r9: SWREG_TO_R3 (9);990sw_r10: SWREG_TO_R3 (10);991sw_r11: SWREG_TO_R3 (11);992sw_r12: SWREG_TO_R3 (12);993sw_r13: SWREG_TO_R3 (13);994sw_r14: SWREG_TO_R3 (14);995sw_r15: SWREG_TO_R3 (15);996sw_r16: SWREG_TO_R3 (16);997sw_r17: SWREG_NOP;998sw_r18: SWREG_TO_R3 (18);999sw_r19: SWREG_TO_R3 (19);1000sw_r20: SWREG_TO_R3 (20);1001sw_r21: SWREG_TO_R3 (21);1002sw_r22: SWREG_TO_R3 (22);1003sw_r23: SWREG_TO_R3 (23);1004sw_r24: SWREG_TO_R3 (24);1005sw_r25: SWREG_TO_R3 (25);1006sw_r26: SWREG_TO_R3 (26);1007sw_r27: SWREG_TO_R3 (27);1008sw_r28: SWREG_TO_R3 (28);1009sw_r29: SWREG_TO_R3 (29);1010sw_r30: SWREG_TO_R3 (30);1011sw_r31: SWREG_TO_R3_V (31);10121013lw_table_vm:1014lw_r0_vm: R3_TO_LWREG_VM (0);1015lw_r1_vm: R3_TO_LWREG_VM_V (1);1016lw_r2_vm: R3_TO_LWREG_VM_V (2);1017lw_r3_vm: R3_TO_LWREG_VM_V (3);1018lw_r4_vm: R3_TO_LWREG_VM_V (4);1019lw_r5_vm: R3_TO_LWREG_VM_V (5);1020lw_r6_vm: R3_TO_LWREG_VM_V (6);1021lw_r7_vm: R3_TO_LWREG_VM_V (7);1022lw_r8_vm: R3_TO_LWREG_VM_V (8);1023lw_r9_vm: R3_TO_LWREG_VM_V (9);1024lw_r10_vm: R3_TO_LWREG_VM_V (10);1025lw_r11_vm: R3_TO_LWREG_VM_V (11);1026lw_r12_vm: R3_TO_LWREG_VM_V (12);1027lw_r13_vm: R3_TO_LWREG_VM_V (13);1028lw_r14_vm: R3_TO_LWREG_VM_V (14);1029lw_r15_vm: R3_TO_LWREG_VM_V (15);1030lw_r16_vm: R3_TO_LWREG_VM_V (16);1031lw_r17_vm: R3_TO_LWREG_VM_V (17);1032lw_r18_vm: R3_TO_LWREG_VM_V (18);1033lw_r19_vm: R3_TO_LWREG_VM_V (19);1034lw_r20_vm: R3_TO_LWREG_VM_V (20);1035lw_r21_vm: R3_TO_LWREG_VM_V (21);1036lw_r22_vm: R3_TO_LWREG_VM_V (22);1037lw_r23_vm: R3_TO_LWREG_VM_V (23);1038lw_r24_vm: R3_TO_LWREG_VM_V (24);1039lw_r25_vm: R3_TO_LWREG_VM_V (25);1040lw_r26_vm: R3_TO_LWREG_VM_V (26);1041lw_r27_vm: R3_TO_LWREG_VM_V (27);1042lw_r28_vm: R3_TO_LWREG_VM_V (28);1043lw_r29_vm: R3_TO_LWREG_VM_V (29);1044lw_r30_vm: R3_TO_LWREG_VM_V (30);1045lw_r31_vm: R3_TO_LWREG_VM_V (31);10461047sw_table_vm:1048sw_r0_vm: SWREG_TO_R3_VM (0);1049sw_r1_vm: SWREG_TO_R3_VM_V (1);1050sw_r2_vm: SWREG_TO_R3_VM_V (2);1051sw_r3_vm: SWREG_TO_R3_VM_V (3);1052sw_r4_vm: SWREG_TO_R3_VM_V (4);1053sw_r5_vm: SWREG_TO_R3_VM_V (5);1054sw_r6_vm: SWREG_TO_R3_VM_V (6);1055sw_r7_vm: SWREG_TO_R3_VM_V (7);1056sw_r8_vm: SWREG_TO_R3_VM_V (8);1057sw_r9_vm: SWREG_TO_R3_VM_V (9);1058sw_r10_vm: SWREG_TO_R3_VM_V (10);1059sw_r11_vm: SWREG_TO_R3_VM_V (11);1060sw_r12_vm: SWREG_TO_R3_VM_V (12);1061sw_r13_vm: SWREG_TO_R3_VM_V (13);1062sw_r14_vm: SWREG_TO_R3_VM_V (14);1063sw_r15_vm: SWREG_TO_R3_VM_V (15);1064sw_r16_vm: SWREG_TO_R3_VM_V (16);1065sw_r17_vm: SWREG_TO_R3_VM_V (17);1066sw_r18_vm: SWREG_TO_R3_VM_V (18);1067sw_r19_vm: SWREG_TO_R3_VM_V (19);1068sw_r20_vm: SWREG_TO_R3_VM_V (20);1069sw_r21_vm: SWREG_TO_R3_VM_V (21);1070sw_r22_vm: SWREG_TO_R3_VM_V (22);1071sw_r23_vm: SWREG_TO_R3_VM_V (23);1072sw_r24_vm: SWREG_TO_R3_VM_V (24);1073sw_r25_vm: SWREG_TO_R3_VM_V (25);1074sw_r26_vm: SWREG_TO_R3_VM_V (26);1075sw_r27_vm: SWREG_TO_R3_VM_V (27);1076sw_r28_vm: SWREG_TO_R3_VM_V (28);1077sw_r29_vm: SWREG_TO_R3_VM_V (29);1078sw_r30_vm: SWREG_TO_R3_VM_V (30);1079sw_r31_vm: SWREG_TO_R3_VM_V (31);10801081/* Temporary data structures used in the handler */1082.section .data1083.align 41084ex_tmp_data_loc_0:1085.byte 01086ex_tmp_data_loc_1:1087.byte 01088ex_tmp_data_loc_2:1089.byte 01090ex_tmp_data_loc_3:1091.byte 01092ex_reg_op:1093.byte 0109410951096