Path: blob/master/arch/x86/boot/compressed/head_64.S
10820 views
/*1* linux/boot/head.S2*3* Copyright (C) 1991, 1992, 1993 Linus Torvalds4*/56/*7* head.S contains the 32-bit startup code.8*9* NOTE!!! Startup happens at absolute address 0x00001000, which is also where10* the page directory will exist. The startup code will be overwritten by11* the page directory. [According to comments etc elsewhere on a compressed12* kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]13*14* Page 0 is deliberately kept safe, since System Management Mode code in15* laptops may need to access the BIOS data stored there. This is also16* useful for future device drivers that either access the BIOS via VM8617* mode.18*/1920/*21* High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 199622*/23.code3224.text2526#include <linux/init.h>27#include <linux/linkage.h>28#include <asm/segment.h>29#include <asm/pgtable_types.h>30#include <asm/page_types.h>31#include <asm/boot.h>32#include <asm/msr.h>33#include <asm/processor-flags.h>34#include <asm/asm-offsets.h>3536__HEAD37.code3238ENTRY(startup_32)39cld40/*41* Test KEEP_SEGMENTS flag to see if the bootloader is asking42* us to not reload segments43*/44testb $(1<<6), BP_loadflags(%esi)45jnz 1f4647cli48movl $(__KERNEL_DS), %eax49movl %eax, %ds50movl %eax, %es51movl %eax, %ss521:5354/*55* Calculate the delta between where we were compiled to run56* at and where we were actually loaded at. This can only be done57* with a short local call on x86. Nothing else will tell us what58* address we are running at. The reserved chunk of the real-mode59* data at 0x1e4 (defined as a scratch field) are used as the stack60* for this calculation. Only 4 bytes are needed.61*/62leal (BP_scratch+4)(%esi), %esp63call 1f641: popl %ebp65subl $1b, %ebp6667/* setup a stack and make sure cpu supports long mode. */68movl $boot_stack_end, %eax69addl %ebp, %eax70movl %eax, %esp7172call verify_cpu73testl %eax, %eax74jnz no_longmode7576/*77* Compute the delta between where we were compiled to run at78* and where the code will actually run at.79*80* %ebp contains the address we are loaded at by the boot loader and %ebx81* contains the address where we should move the kernel image temporarily82* for safe in-place decompression.83*/8485#ifdef CONFIG_RELOCATABLE86movl %ebp, %ebx87movl BP_kernel_alignment(%esi), %eax88decl %eax89addl %eax, %ebx90notl %eax91andl %eax, %ebx92#else93movl $LOAD_PHYSICAL_ADDR, %ebx94#endif9596/* Target address to relocate to for decompression */97addl $z_extract_offset, %ebx9899/*100* Prepare for entering 64 bit mode101*/102103/* Load new GDT with the 64bit segments using 32bit descriptor */104leal gdt(%ebp), %eax105movl %eax, gdt+2(%ebp)106lgdt gdt(%ebp)107108/* Enable PAE mode */109movl $(X86_CR4_PAE), %eax110movl %eax, %cr4111112/*113* Build early 4G boot pagetable114*/115/* Initialize Page tables to 0 */116leal pgtable(%ebx), %edi117xorl %eax, %eax118movl $((4096*6)/4), %ecx119rep stosl120121/* Build Level 4 */122leal pgtable + 0(%ebx), %edi123leal 0x1007 (%edi), %eax124movl %eax, 0(%edi)125126/* Build Level 3 */127leal pgtable + 0x1000(%ebx), %edi128leal 0x1007(%edi), %eax129movl $4, %ecx1301: movl %eax, 0x00(%edi)131addl $0x00001000, %eax132addl $8, %edi133decl %ecx134jnz 1b135136/* Build Level 2 */137leal pgtable + 0x2000(%ebx), %edi138movl $0x00000183, %eax139movl $2048, %ecx1401: movl %eax, 0(%edi)141addl $0x00200000, %eax142addl $8, %edi143decl %ecx144jnz 1b145146/* Enable the boot page tables */147leal pgtable(%ebx), %eax148movl %eax, %cr3149150/* Enable Long mode in EFER (Extended Feature Enable Register) */151movl $MSR_EFER, %ecx152rdmsr153btsl $_EFER_LME, %eax154wrmsr155156/*157* Setup for the jump to 64bit mode158*159* When the jump is performend we will be in long mode but160* in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1161* (and in turn EFER.LMA = 1). To jump into 64bit mode we use162* the new gdt/idt that has __KERNEL_CS with CS.L = 1.163* We place all of the values on our mini stack so lret can164* used to perform that far jump.165*/166pushl $__KERNEL_CS167leal startup_64(%ebp), %eax168pushl %eax169170/* Enter paged protected Mode, activating Long Mode */171movl $(X86_CR0_PG | X86_CR0_PE), %eax /* Enable Paging and Protected mode */172movl %eax, %cr0173174/* Jump from 32bit compatibility mode into 64bit mode. */175lret176ENDPROC(startup_32)177178no_longmode:179/* This isn't an x86-64 CPU so hang */1801:181hlt182jmp 1b183184#include "../../kernel/verify_cpu.S"185186/*187* Be careful here startup_64 needs to be at a predictable188* address so I can export it in an ELF header. Bootloaders189* should look at the ELF header to find this address, as190* it may change in the future.191*/192.code64193.org 0x200194ENTRY(startup_64)195/*196* We come here either from startup_32 or directly from a197* 64bit bootloader. If we come here from a bootloader we depend on198* an identity mapped page table being provied that maps our199* entire text+data+bss and hopefully all of memory.200*/201202/* Setup data segments. */203xorl %eax, %eax204movl %eax, %ds205movl %eax, %es206movl %eax, %ss207movl %eax, %fs208movl %eax, %gs209lldt %ax210movl $0x20, %eax211ltr %ax212213/*214* Compute the decompressed kernel start address. It is where215* we were loaded at aligned to a 2M boundary. %rbp contains the216* decompressed kernel start address.217*218* If it is a relocatable kernel then decompress and run the kernel219* from load address aligned to 2MB addr, otherwise decompress and220* run the kernel from LOAD_PHYSICAL_ADDR221*222* We cannot rely on the calculation done in 32-bit mode, since we223* may have been invoked via the 64-bit entry point.224*/225226/* Start with the delta to where the kernel will run at. */227#ifdef CONFIG_RELOCATABLE228leaq startup_32(%rip) /* - $startup_32 */, %rbp229movl BP_kernel_alignment(%rsi), %eax230decl %eax231addq %rax, %rbp232notq %rax233andq %rax, %rbp234#else235movq $LOAD_PHYSICAL_ADDR, %rbp236#endif237238/* Target address to relocate to for decompression */239leaq z_extract_offset(%rbp), %rbx240241/* Set up the stack */242leaq boot_stack_end(%rbx), %rsp243244/* Zero EFLAGS */245pushq $0246popfq247248/*249* Copy the compressed kernel to the end of our buffer250* where decompression in place becomes safe.251*/252pushq %rsi253leaq (_bss-8)(%rip), %rsi254leaq (_bss-8)(%rbx), %rdi255movq $_bss /* - $startup_32 */, %rcx256shrq $3, %rcx257std258rep movsq259cld260popq %rsi261262/*263* Jump to the relocated address.264*/265leaq relocated(%rbx), %rax266jmp *%rax267268.text269relocated:270271/*272* Clear BSS (stack is currently empty)273*/274xorl %eax, %eax275leaq _bss(%rip), %rdi276leaq _ebss(%rip), %rcx277subq %rdi, %rcx278shrq $3, %rcx279rep stosq280281/*282* Adjust our own GOT283*/284leaq _got(%rip), %rdx285leaq _egot(%rip), %rcx2861:287cmpq %rcx, %rdx288jae 2f289addq %rbx, (%rdx)290addq $8, %rdx291jmp 1b2922:293294/*295* Do the decompression, and jump to the new kernel..296*/297pushq %rsi /* Save the real mode argument */298movq %rsi, %rdi /* real mode address */299leaq boot_heap(%rip), %rsi /* malloc area for uncompression */300leaq input_data(%rip), %rdx /* input_data */301movl $z_input_len, %ecx /* input_len */302movq %rbp, %r8 /* output target address */303call decompress_kernel304popq %rsi305306/*307* Jump to the decompressed kernel.308*/309jmp *%rbp310311.data312gdt:313.word gdt_end - gdt314.long gdt315.word 0316.quad 0x0000000000000000 /* NULL descriptor */317.quad 0x00af9a000000ffff /* __KERNEL_CS */318.quad 0x00cf92000000ffff /* __KERNEL_DS */319.quad 0x0080890000000000 /* TS descriptor */320.quad 0x0000000000000000 /* TS continued */321gdt_end:322323/*324* Stack and heap for uncompression325*/326.bss327.balign 4328boot_heap:329.fill BOOT_HEAP_SIZE, 1, 0330boot_stack:331.fill BOOT_STACK_SIZE, 1, 0332boot_stack_end:333334/*335* Space for page tables (not in .bss so not zeroed)336*/337.section ".pgtable","a",@nobits338.balign 4096339pgtable:340.fill 6*4096, 1, 0341342343