/*1* arch/sh/kernel/head_64.S2*3* Copyright (C) 2000, 2001 Paolo Alberelli4* Copyright (C) 2003, 2004 Paul Mundt5*6* This file is subject to the terms and conditions of the GNU General Public7* License. See the file "COPYING" in the main directory of this archive8* for more details.9*/1011#include <linux/init.h>1213#include <asm/page.h>14#include <asm/cache.h>15#include <asm/tlb.h>16#include <cpu/registers.h>17#include <cpu/mmu_context.h>18#include <asm/thread_info.h>1920/*21* MMU defines: TLB boundaries.22*/2324#define MMUIR_FIRST ITLB_FIXED25#define MMUIR_END ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP26#define MMUIR_STEP TLB_STEP2728#define MMUDR_FIRST DTLB_FIXED29#define MMUDR_END DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP30#define MMUDR_STEP TLB_STEP3132/* Safety check : CONFIG_PAGE_OFFSET has to be a multiple of 512Mb */33#if (CONFIG_PAGE_OFFSET & ((1UL<<29)-1))34#error "CONFIG_PAGE_OFFSET must be a multiple of 512Mb"35#endif3637/*38* MMU defines: Fixed TLBs.39*/40/* Deal safely with the case where the base of RAM is not 512Mb aligned */4142#define ALIGN_512M_MASK (0xffffffffe0000000)43#define ALIGNED_EFFECTIVE ((CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK)44#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)4546#define MMUIR_TEXT_H (0x0000000000000003 | ALIGNED_EFFECTIVE)47/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */4849#define MMUIR_TEXT_L (0x000000000000009a | ALIGNED_PHYSICAL)50/* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */5152#define MMUDR_CACHED_H 0x0000000000000003 | ALIGNED_EFFECTIVE53/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */54#define MMUDR_CACHED_L 0x000000000000015a | ALIGNED_PHYSICAL55/* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */5657#ifdef CONFIG_CACHE_OFF58#define ICCR0_INIT_VAL ICCR0_OFF /* ICACHE off */59#else60#define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */61#endif62#define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */6364#if defined (CONFIG_CACHE_OFF)65#define OCCR0_INIT_VAL OCCR0_OFF /* D-cache: off */66#elif defined (CONFIG_CACHE_WRITETHROUGH)67#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WT /* D-cache: on, */68/* WT, invalidate */69#elif defined (CONFIG_CACHE_WRITEBACK)70#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* D-cache: on, */71/* WB, invalidate */72#else73#error preprocessor flag CONFIG_CACHE_... not recognized!74#endif7576#define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */7778.section .empty_zero_page, "aw"79.global empty_zero_page8081empty_zero_page:82.long 1 /* MOUNT_ROOT_RDONLY */83.long 0 /* RAMDISK_FLAGS */84.long 0x0200 /* ORIG_ROOT_DEV */85.long 1 /* LOADER_TYPE */86.long 0x00800000 /* INITRD_START */87.long 0x00800000 /* INITRD_SIZE */88.long 08990.text91.balign 4096,0,40969293.section .data, "aw"94.balign PAGE_SIZE9596.section .data, "aw"97.balign PAGE_SIZE9899.global mmu_pdtp_cache100mmu_pdtp_cache:101.space PAGE_SIZE, 0102103.global empty_bad_page104empty_bad_page:105.space PAGE_SIZE, 0106107.global empty_bad_pte_table108empty_bad_pte_table:109.space PAGE_SIZE, 0110111.global fpu_in_use112fpu_in_use: .quad 0113114115__HEAD116.balign L1_CACHE_BYTES117/*118* Condition at the entry of __stext:119* . Reset state:120* . SR.FD = 1 (FPU disabled)121* . SR.BL = 1 (Exceptions disabled)122* . SR.MD = 1 (Privileged Mode)123* . SR.MMU = 0 (MMU Disabled)124* . SR.CD = 0 (CTC User Visible)125* . SR.IMASK = Undefined (Interrupt Mask)126*127* Operations supposed to be performed by __stext:128* . prevent speculative fetch onto device memory while MMU is off129* . reflect as much as possible SH5 ABI (r15, r26, r27, r18)130* . first, save CPU state and set it to something harmless131* . any CPU detection and/or endianness settings (?)132* . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD133* . set initial TLB entries for cached and uncached regions134* (no fine granularity paging)135* . set initial cache state136* . enable MMU and caches137* . set CPU to a consistent state138* . registers (including stack pointer and current/KCR0)139* . NOT expecting to set Exception handling nor VBR/RESVEC/DCR140* at this stage. This is all to later Linux initialization steps.141* . initialize FPU142* . clear BSS143* . jump into start_kernel()144* . be prepared to hopeless start_kernel() returns.145*146*/147.global _stext148_stext:149/*150* Prevent speculative fetch on device memory due to151* uninitialized target registers.152*/153ptabs/u ZERO, tr0154ptabs/u ZERO, tr1155ptabs/u ZERO, tr2156ptabs/u ZERO, tr3157ptabs/u ZERO, tr4158ptabs/u ZERO, tr5159ptabs/u ZERO, tr6160ptabs/u ZERO, tr7161synci162163/*164* Read/Set CPU state. After this block:165* r29 = Initial SR166*/167getcon SR, r29168movi SR_HARMLESS, r20169putcon r20, SR170171/*172* Initialize EMI/LMI. To Be Done.173*/174175/*176* CPU detection and/or endianness settings (?). To Be Done.177* Pure PIC code here, please ! Just save state into r30.178* After this block:179* r30 = CPU type/Platform Endianness180*/181182/*183* Set initial TLB entries for cached and uncached regions.184* Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !185*/186/* Clear ITLBs */187pta clear_ITLB, tr1188movi MMUIR_FIRST, r21189movi MMUIR_END, r22190clear_ITLB:191putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */192addi r21, MMUIR_STEP, r21193bne r21, r22, tr1194195/* Clear DTLBs */196pta clear_DTLB, tr1197movi MMUDR_FIRST, r21198movi MMUDR_END, r22199clear_DTLB:200putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */201addi r21, MMUDR_STEP, r21202bne r21, r22, tr1203204/* Map one big (512Mb) page for ITLB */205movi MMUIR_FIRST, r21206movi MMUIR_TEXT_L, r22 /* PTEL first */207add.l r22, r63, r22 /* Sign extend */208putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */209movi MMUIR_TEXT_H, r22 /* PTEH last */210add.l r22, r63, r22 /* Sign extend */211putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */212213/* Map one big CACHED (512Mb) page for DTLB */214movi MMUDR_FIRST, r21215movi MMUDR_CACHED_L, r22 /* PTEL first */216add.l r22, r63, r22 /* Sign extend */217putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */218movi MMUDR_CACHED_H, r22 /* PTEH last */219add.l r22, r63, r22 /* Sign extend */220putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */221222/*223* Setup a DTLB translation for SCIF phys.224*/225addi r21, MMUDR_STEP, r21226movi 0x0a03, r22 /* SCIF phys */227shori 0x0148, r22228putcfg r21, 1, r22 /* PTEL first */229movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */230shori 0x0003, r22231putcfg r21, 0, r22 /* PTEH last */232233/*234* Set cache behaviours.235*/236/* ICache */237movi ICCR_BASE, r21238movi ICCR0_INIT_VAL, r22239movi ICCR1_INIT_VAL, r23240putcfg r21, ICCR_REG0, r22241putcfg r21, ICCR_REG1, r23242243/* OCache */244movi OCCR_BASE, r21245movi OCCR0_INIT_VAL, r22246movi OCCR1_INIT_VAL, r23247putcfg r21, OCCR_REG0, r22248putcfg r21, OCCR_REG1, r23249250251/*252* Enable Caches and MMU. Do the first non-PIC jump.253* Now head.S global variables, constants and externs254* can be used.255*/256getcon SR, r21257movi SR_ENABLE_MMU, r22258or r21, r22, r21259putcon r21, SSR260movi hyperspace, r22261ori r22, 1, r22 /* Make it SHmedia, not required but..*/262putcon r22, SPC263synco264rte /* And now go into the hyperspace ... */265hyperspace: /* ... that's the next instruction ! */266267/*268* Set CPU to a consistent state.269* r31 = FPU support flag270* tr0/tr7 in use. Others give a chance to loop somewhere safe271*/272movi start_kernel, r32273ori r32, 1, r32274275ptabs r32, tr0 /* r32 = _start_kernel address */276pta/u hopeless, tr1277pta/u hopeless, tr2278pta/u hopeless, tr3279pta/u hopeless, tr4280pta/u hopeless, tr5281pta/u hopeless, tr6282pta/u hopeless, tr7283gettr tr1, r28 /* r28 = hopeless address */284285/* Set initial stack pointer */286movi init_thread_union, SP287putcon SP, KCR0 /* Set current to init_task */288movi THREAD_SIZE, r22 /* Point to the end */289add SP, r22, SP290291/*292* Initialize FPU.293* Keep FPU flag in r31. After this block:294* r31 = FPU flag295*/296movi fpu_in_use, r31 /* Temporary */297298#ifdef CONFIG_SH_FPU299getcon SR, r21300movi SR_ENABLE_FPU, r22301and r21, r22, r22302putcon r22, SR /* Try to enable */303getcon SR, r22304xor r21, r22, r21305shlri r21, 15, r21 /* Supposedly 0/1 */306st.q r31, 0 , r21 /* Set fpu_in_use */307#else308movi 0, r21309st.q r31, 0 , r21 /* Set fpu_in_use */310#endif311or r21, ZERO, r31 /* Set FPU flag at last */312313#ifndef CONFIG_SH_NO_BSS_INIT314/* Don't clear BSS if running on slow platforms such as an RTL simulation,315remote memory via SHdebug link, etc. For these the memory can be guaranteed316to be all zero on boot anyway. */317/*318* Clear bss319*/320pta clear_quad, tr1321movi __bss_start, r22322movi _end, r23323clear_quad:324st.q r22, 0, ZERO325addi r22, 8, r22326bne r22, r23, tr1 /* Both quad aligned, see vmlinux.lds.S */327#endif328pta/u hopeless, tr1329330/* Say bye to head.S but be prepared to wrongly get back ... */331blink tr0, LINK332333/* If we ever get back here through LINK/tr1-tr7 */334pta/u hopeless, tr7335336hopeless:337/*338* Something's badly wrong here. Loop endlessly,339* there's nothing more we can do about it.340*341* Note on hopeless: it can be jumped into invariably342* before or after jumping into hyperspace. The only343* requirement is to be PIC called (PTA) before and344* any way (PTA/PTABS) after. According to Virtual345* to Physical mapping a simulator/emulator can easily346* tell where we came here from just looking at hopeless347* (PC) address.348*349* For debugging purposes:350* (r28) hopeless/loop address351* (r29) Original SR352* (r30) CPU type/Platform endianness353* (r31) FPU Support354* (r32) _start_kernel address355*/356blink tr7, ZERO357358359