/* SPDX-License-Identifier: GPL-2.0-only */1/*2* Early kernel startup code for Hexagon3*4* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.5*/67#include <linux/linkage.h>8#include <linux/init.h>9#include <asm/asm-offsets.h>10#include <asm/mem-layout.h>11#include <asm/vm_mmu.h>12#include <asm/page.h>13#include <asm/hexagon_vm.h>1415#define SEGTABLE_ENTRIES #0x0e01617__INIT18ENTRY(stext)19/*20* VMM will already have set up true vector page, MMU, etc.21* To set up initial kernel identity map, we have to pass22* the VMM a pointer to some canonical page tables. In23* this implementation, we're assuming that we've got24* them precompiled. Generate value in R24, as we'll need25* it again shortly.26*/27r24.L = #LO(swapper_pg_dir)28r24.H = #HI(swapper_pg_dir)2930/*31* Symbol is kernel segment address, but we need32* the logical/physical address.33*/34r25 = pc;35r2.h = #0xffc0;36r2.l = #0x0000;37r25 = and(r2,r25); /* R25 holds PHYS_OFFSET now */38r1.h = #HI(PAGE_OFFSET);39r1.l = #LO(PAGE_OFFSET);40r24 = sub(r24,r1); /* swapper_pg_dir - PAGE_OFFSET */41r24 = add(r24,r25); /* + PHYS_OFFSET */4243r0 = r24; /* aka __pa(swapper_pg_dir) */4445/*46* Initialize page dir to make the virtual and physical47* addresses where the kernel was loaded be identical.48* Done in 4MB chunks.49*/50#define PTE_BITS ( __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \51| __HEXAGON_C_WB_L2 << 6 \52| __HVM_PDE_S_4MB)5354/*55* Get number of VA=PA entries; only really needed for jump56* to hyperspace; gets blown away immediately after57*/5859{60r1.l = #LO(_end);61r2.l = #LO(stext);62r3 = #1;63}64{65r1.h = #HI(_end);66r2.h = #HI(stext);67r3 = asl(r3, #22);68}69{70r1 = sub(r1, r2);71r3 = add(r3, #-1);72} /* r1 = _end - stext */73r1 = add(r1, r3); /* + (4M-1) */74r26 = lsr(r1, #22); /* / 4M = # of entries */7576r1 = r25;77r2.h = #0xffc0;78r2.l = #0x0000; /* round back down to 4MB boundary */79r1 = and(r1,r2);80r2 = lsr(r1, #22) /* 4MB page number */81r2 = asl(r2, #2) /* times sizeof(PTE) (4bytes) */82r0 = add(r0,r2) /* r0 = address of correct PTE */83r2 = #PTE_BITS84r1 = add(r1,r2) /* r1 = 4MB PTE for the first entry */85r2.h = #0x004086r2.l = #0x0000 /* 4MB increments */87loop0(1f,r26);881:89memw(r0 ++ #4) = r190{ r1 = add(r1, r2); } :endloop09192/* Also need to overwrite the initial 0xc0000000 entries */93/* PAGE_OFFSET >> (4MB shift - 4 bytes per entry shift) */94R1.H = #HI(PAGE_OFFSET >> (22 - 2))95R1.L = #LO(PAGE_OFFSET >> (22 - 2))9697r0 = add(r1, r24); /* advance to 0xc0000000 entry */98r1 = r25;99r2.h = #0xffc0;100r2.l = #0x0000; /* round back down to 4MB boundary */101r1 = and(r1,r2); /* for huge page */102r2 = #PTE_BITS103r1 = add(r1,r2);104r2.h = #0x0040105r2.l = #0x0000 /* 4MB increments */106107loop0(1f,SEGTABLE_ENTRIES);1081:109memw(r0 ++ #4) = r1;110{ r1 = add(r1,r2); } :endloop0111112r0 = r24;113114/*115* The subroutine wrapper around the virtual instruction touches116* no memory, so we should be able to use it even here.117* Note that in this version, R1 and R2 get "clobbered"; see118* vm_ops.S119*/120r1 = #VM_TRANS_TYPE_TABLE121call __vmnewmap;122123/* Jump into virtual address range. */124125r31.h = #hi(__head_s_vaddr_target)126r31.l = #lo(__head_s_vaddr_target)127jumpr r31128129/* Insert trippy space effects. */130131__head_s_vaddr_target:132/*133* Tear down VA=PA translation now that we are running134* in kernel virtual space.135*/136r0 = #__HVM_PDE_S_INVALID137138r1.h = #0xffc0;139r1.l = #0x0000;140r2 = r25; /* phys_offset */141r2 = and(r1,r2);142143r1.l = #lo(swapper_pg_dir)144r1.h = #hi(swapper_pg_dir)145r2 = lsr(r2, #22) /* 4MB page number */146r2 = asl(r2, #2) /* times sizeof(PTE) (4bytes) */147r1 = add(r1,r2);148loop0(1f,r26)1491501:151{152memw(R1 ++ #4) = R0153}:endloop0154155r0 = r24156r1 = #VM_TRANS_TYPE_TABLE157call __vmnewmap158159/* Go ahead and install the trap0 return so angel calls work */160r0.h = #hi(_K_provisional_vec)161r0.l = #lo(_K_provisional_vec)162call __vmsetvec163164/*165* OK, at this point we should start to be much more careful,166* we're going to enter C code and start touching memory167* in all sorts of places.168* This means:169* SGP needs to be OK170* Need to lock shared resources171* A bunch of other things that will cause172* all kinds of painful bugs173*/174175/*176* Stack pointer should be pointed at the init task's177* thread stack, which should have been declared in arch/init_task.c.178* So uhhhhh...179* It's accessible via the init_thread_union, which is a union180* of a thread_info struct and a stack; of course, the top181* of the stack is not for you. The end of the stack182* is simply init_thread_union + THREAD_SIZE.183*/184185{r29.H = #HI(init_thread_union); r0.H = #HI(_THREAD_SIZE); }186{r29.L = #LO(init_thread_union); r0.L = #LO(_THREAD_SIZE); }187188/* initialize the register used to point to current_thread_info */189/* Fixme: THREADINFO_REG can't be R2 because of that memset thing. */190{r29 = add(r29,r0); THREADINFO_REG = r29; }191192/* Hack: zero bss; */193{ r0.L = #LO(__bss_start); r1 = #0; r2.l = #LO(__bss_stop); }194{ r0.H = #HI(__bss_start); r2.h = #HI(__bss_stop); }195196r2 = sub(r2,r0);197call memset;198199/* Set PHYS_OFFSET; should be in R25 */200#ifdef CONFIG_HEXAGON_PHYS_OFFSET201r0.l = #LO(__phys_offset);202r0.h = #HI(__phys_offset);203memw(r0) = r25;204#endif205206/* Time to make the doughnuts. */207call start_kernel208209/*210* Should not reach here.211*/2121:213jump 1b214215.p2align PAGE_SHIFT216ENTRY(external_cmdline_buffer)217.fill _PAGE_SIZE,1,0218219.data220.p2align PAGE_SHIFT221ENTRY(empty_zero_page)222.fill _PAGE_SIZE,1,0223224225