/* SPDX-License-Identifier: GPL-2.0 */1/*2* linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit3*4* Copyright (C) 2000 Andrea Arcangeli <[email protected]> SuSE5* Copyright (C) 2000 Pavel Machek <[email protected]>6* Copyright (C) 2000 Karsten Keil <[email protected]>7* Copyright (C) 2001,2002 Andi Kleen <[email protected]>8* Copyright (C) 2005 Eric Biederman <[email protected]>9*/1011#include <linux/export.h>12#include <linux/linkage.h>13#include <linux/threads.h>14#include <linux/init.h>15#include <linux/pgtable.h>16#include <asm/segment.h>17#include <asm/page.h>18#include <asm/msr.h>19#include <asm/cache.h>20#include <asm/processor-flags.h>21#include <asm/percpu.h>22#include <asm/nops.h>23#include "../entry/calling.h"24#include <asm/nospec-branch.h>25#include <asm/apicdef.h>26#include <asm/fixmap.h>27#include <asm/smp.h>28#include <asm/thread_info.h>2930/*31* We are not able to switch in one step to the final KERNEL ADDRESS SPACE32* because we need identity-mapped pages.33*/3435__HEAD36.code6437SYM_CODE_START_NOALIGN(startup_64)38UNWIND_HINT_END_OF_STACK39/*40* At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,41* and someone has loaded an identity mapped page table42* for us. These identity mapped page tables map all of the43* kernel pages and possibly all of memory.44*45* %RSI holds the physical address of the boot_params structure46* provided by the bootloader. Preserve it in %R15 so C function calls47* will not clobber it.48*49* We come here either directly from a 64bit bootloader, or from50* arch/x86/boot/compressed/head_64.S.51*52* We only come here initially at boot nothing else comes here.53*54* Since we may be loaded at an address different from what we were55* compiled to run at we first fixup the physical addresses in our page56* tables and then reload them.57*/58mov %rsi, %r155960/* Set up the stack for verify_cpu() */61leaq __top_init_kernel_stack(%rip), %rsp6263/*64* Set up GSBASE.65* Note that on SMP the boot CPU uses the init data section until66* the per-CPU areas are set up.67*/68movl $MSR_GS_BASE, %ecx69xorl %eax, %eax70xorl %edx, %edx71wrmsr7273call startup_64_setup_gdt_idt7475/* Now switch to __KERNEL_CS so IRET works reliably */76pushq $__KERNEL_CS77leaq .Lon_kernel_cs(%rip), %rax78pushq %rax79lretq8081.Lon_kernel_cs:82ANNOTATE_NOENDBR83UNWIND_HINT_END_OF_STACK8485#ifdef CONFIG_AMD_MEM_ENCRYPT86/*87* Activate SEV/SME memory encryption if supported/enabled. This needs to88* be done now, since this also includes setup of the SEV-SNP CPUID table,89* which needs to be done before any CPUID instructions are executed in90* subsequent code. Pass the boot_params pointer as the first argument.91*/92movq %r15, %rdi93call sme_enable94#endif9596/* Sanitize CPU configuration */97call verify_cpu9899/*100* Derive the kernel's physical-to-virtual offset from the physical and101* virtual addresses of common_startup_64().102*/103leaq common_startup_64(%rip), %rdi104subq .Lcommon_startup_64(%rip), %rdi105106/*107* Perform pagetable fixups. Additionally, if SME is active, encrypt108* the kernel and retrieve the modifier (SME encryption mask if SME109* is active) to be added to the initial pgdir entry that will be110* programmed into CR3.111*/112movq %r15, %rsi113call __startup_64114115/* Form the CR3 value being sure to include the CR3 modifier */116leaq early_top_pgt(%rip), %rcx117addq %rcx, %rax118119#ifdef CONFIG_AMD_MEM_ENCRYPT120mov %rax, %rdi121122/*123* For SEV guests: Verify that the C-bit is correct. A malicious124* hypervisor could lie about the C-bit position to perform a ROP125* attack on the guest by writing to the unencrypted stack and wait for126* the next RET instruction.127*/128call sev_verify_cbit129#endif130131/*132* Switch to early_top_pgt which still has the identity mappings133* present.134*/135movq %rax, %cr3136137/* Branch to the common startup code at its kernel virtual address */138ANNOTATE_RETPOLINE_SAFE139jmp *.Lcommon_startup_64(%rip)140SYM_CODE_END(startup_64)141142__INITRODATA143SYM_DATA_LOCAL(.Lcommon_startup_64, .quad common_startup_64)144145.text146SYM_CODE_START(secondary_startup_64)147UNWIND_HINT_END_OF_STACK148ANNOTATE_NOENDBR149/*150* At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,151* and someone has loaded a mapped page table.152*153* We come here either from startup_64 (using physical addresses)154* or from trampoline.S (using virtual addresses).155*156* Using virtual addresses from trampoline.S removes the need157* to have any identity mapped pages in the kernel page table158* after the boot processor executes this code.159*/160161/* Sanitize CPU configuration */162call verify_cpu163164/*165* The secondary_startup_64_no_verify entry point is only used by166* SEV-ES guests. In those guests the call to verify_cpu() would cause167* #VC exceptions which can not be handled at this stage of secondary168* CPU bringup.169*170* All non SEV-ES systems, especially Intel systems, need to execute171* verify_cpu() above to make sure NX is enabled.172*/173SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)174UNWIND_HINT_END_OF_STACK175ANNOTATE_NOENDBR176177/* Clear %R15 which holds the boot_params pointer on the boot CPU */178xorl %r15d, %r15d179180/* Derive the runtime physical address of init_top_pgt[] */181movq phys_base(%rip), %rax182addq $(init_top_pgt - __START_KERNEL_map), %rax183184/*185* Retrieve the modifier (SME encryption mask if SME is active) to be186* added to the initial pgdir entry that will be programmed into CR3.187*/188#ifdef CONFIG_AMD_MEM_ENCRYPT189addq sme_me_mask(%rip), %rax190#endif191/*192* Switch to the init_top_pgt here, away from the trampoline_pgd and193* unmap the identity mapped ranges.194*/195movq %rax, %cr3196197SYM_INNER_LABEL(common_startup_64, SYM_L_LOCAL)198UNWIND_HINT_END_OF_STACK199ANNOTATE_NOENDBR200201/*202* Create a mask of CR4 bits to preserve. Omit PGE in order to flush203* global 1:1 translations from the TLBs.204*205* From the SDM:206* "If CR4.PGE is changing from 0 to 1, there were no global TLB207* entries before the execution; if CR4.PGE is changing from 1 to 0,208* there will be no global TLB entries after the execution."209*/210movl $(X86_CR4_PAE | X86_CR4_LA57), %edx211#ifdef CONFIG_X86_MCE212/*213* Preserve CR4.MCE if the kernel will enable #MC support.214* Clearing MCE may fault in some environments (that also force #MC215* support). Any machine check that occurs before #MC support is fully216* configured will crash the system regardless of the CR4.MCE value set217* here.218*/219orl $X86_CR4_MCE, %edx220#endif221movq %cr4, %rcx222andl %edx, %ecx223224/* Even if ignored in long mode, set PSE uniformly on all logical CPUs. */225btsl $X86_CR4_PSE_BIT, %ecx226movq %rcx, %cr4227228/*229* Set CR4.PGE to re-enable global translations.230*/231btsl $X86_CR4_PGE_BIT, %ecx232movq %rcx, %cr4233234#ifdef CONFIG_SMP235/*236* For parallel boot, the APIC ID is read from the APIC, and then237* used to look up the CPU number. For booting a single CPU, the238* CPU number is encoded in smpboot_control.239*240* Bit 31 STARTUP_READ_APICID (Read APICID from APIC)241* Bit 0-23 CPU# if STARTUP_xx flags are not set242*/243movl smpboot_control(%rip), %ecx244testl $STARTUP_READ_APICID, %ecx245jnz .Lread_apicid246/*247* No control bit set, single CPU bringup. CPU number is provided248* in bit 0-23. This is also the boot CPU case (CPU number 0).249*/250andl $(~STARTUP_PARALLEL_MASK), %ecx251jmp .Lsetup_cpu252253.Lread_apicid:254/* Check whether X2APIC mode is already enabled */255mov $MSR_IA32_APICBASE, %ecx256rdmsr257testl $X2APIC_ENABLE, %eax258jnz .Lread_apicid_msr259260#ifdef CONFIG_X86_X2APIC261/*262* If system is in X2APIC mode then MMIO base might not be263* mapped causing the MMIO read below to fault. Faults can't264* be handled at that point.265*/266cmpl $0, x2apic_mode(%rip)267jz .Lread_apicid_mmio268269/* Force the AP into X2APIC mode. */270orl $X2APIC_ENABLE, %eax271wrmsr272jmp .Lread_apicid_msr273#endif274275.Lread_apicid_mmio:276/* Read the APIC ID from the fix-mapped MMIO space. */277movq apic_mmio_base(%rip), %rcx278addq $APIC_ID, %rcx279movl (%rcx), %eax280shr $24, %eax281jmp .Llookup_AP282283.Lread_apicid_msr:284mov $APIC_X2APIC_ID_MSR, %ecx285rdmsr286287.Llookup_AP:288/* EAX contains the APIC ID of the current CPU */289xorl %ecx, %ecx290leaq cpuid_to_apicid(%rip), %rbx291292.Lfind_cpunr:293cmpl (%rbx,%rcx,4), %eax294jz .Lsetup_cpu295inc %ecx296#ifdef CONFIG_FORCE_NR_CPUS297cmpl $NR_CPUS, %ecx298#else299cmpl nr_cpu_ids(%rip), %ecx300#endif301jb .Lfind_cpunr302303/* APIC ID not found in the table. Drop the trampoline lock and bail. */304movq trampoline_lock(%rip), %rax305movl $0, (%rax)3063071: cli308hlt309jmp 1b310311.Lsetup_cpu:312/* Get the per cpu offset for the given CPU# which is in ECX */313movq __per_cpu_offset(,%rcx,8), %rdx314#else315xorl %edx, %edx /* zero-extended to clear all of RDX */316#endif /* CONFIG_SMP */317318/*319* Setup a boot time stack - Any secondary CPU will have lost its stack320* by now because the cr3-switch above unmaps the real-mode stack.321*322* RDX contains the per-cpu offset323*/324movq current_task(%rdx), %rax325movq TASK_threadsp(%rax), %rsp326327/*328* Now that this CPU is running on its own stack, drop the realmode329* protection. For the boot CPU the pointer is NULL!330*/331movq trampoline_lock(%rip), %rax332testq %rax, %rax333jz .Lsetup_gdt334movl $0, (%rax)335336.Lsetup_gdt:337/*338* We must switch to a new descriptor in kernel space for the GDT339* because soon the kernel won't have access anymore to the userspace340* addresses where we're currently running on. We have to do that here341* because in 32bit we couldn't load a 64bit linear address.342*/343subq $16, %rsp344movw $(GDT_SIZE-1), (%rsp)345leaq gdt_page(%rdx), %rax346movq %rax, 2(%rsp)347lgdt (%rsp)348addq $16, %rsp349350/* set up data segments */351xorl %eax,%eax352movl %eax,%ds353movl %eax,%ss354movl %eax,%es355356/*357* We don't really need to load %fs or %gs, but load them anyway358* to kill any stale realmode selectors. This allows execution359* under VT hardware.360*/361movl %eax,%fs362movl %eax,%gs363364/*365* Set up GSBASE.366* Note that, on SMP, the boot cpu uses init data section until367* the per cpu areas are set up.368*/369movl $MSR_GS_BASE,%ecx370movl %edx, %eax371shrq $32, %rdx372wrmsr373374/* Setup and Load IDT */375call early_setup_idt376377/* Check if nx is implemented */378movl $0x80000001, %eax379cpuid380movl %edx,%edi381382/* Setup EFER (Extended Feature Enable Register) */383movl $MSR_EFER, %ecx384rdmsr385/*386* Preserve current value of EFER for comparison and to skip387* EFER writes if no change was made (for TDX guest)388*/389movl %eax, %edx390btsl $_EFER_SCE, %eax /* Enable System Call */391btl $20,%edi /* No Execute supported? */392jnc 1f393btsl $_EFER_NX, %eax394btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)395396/* Avoid writing EFER if no change was made (for TDX guest) */3971: cmpl %edx, %eax398je 1f399xor %edx, %edx400wrmsr /* Make changes effective */4011:402/* Setup cr0 */403movl $CR0_STATE, %eax404/* Make changes effective */405movq %rax, %cr0406407/* zero EFLAGS after setting rsp */408pushq $0409popfq410411/* Pass the boot_params pointer as first argument */412movq %r15, %rdi413414.Ljump_to_C_code:415xorl %ebp, %ebp # clear frame pointer416ANNOTATE_RETPOLINE_SAFE417callq *initial_code(%rip)418ud2419SYM_CODE_END(secondary_startup_64)420421#include "verify_cpu.S"422#include "sev_verify_cbit.S"423424#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT)425/*426* Entry point for soft restart of a CPU. Invoked from xxx_play_dead() for427* restarting the boot CPU or for restarting SEV guest CPUs after CPU hot428* unplug. Everything is set up already except the stack.429*/430SYM_CODE_START(soft_restart_cpu)431ANNOTATE_NOENDBR432UNWIND_HINT_END_OF_STACK433434/* Find the idle task stack */435movq PER_CPU_VAR(current_task), %rcx436movq TASK_threadsp(%rcx), %rsp437438jmp .Ljump_to_C_code439SYM_CODE_END(soft_restart_cpu)440#endif441442#ifdef CONFIG_AMD_MEM_ENCRYPT443/*444* VC Exception handler used during early boot when running on kernel445* addresses, but before the switch to the idt_table can be made.446* The early_idt_handler_array can't be used here because it calls into a lot447* of __init code and this handler is also used during CPU offlining/onlining.448* Therefore this handler ends up in the .text section so that it stays around449* when .init.text is freed.450*/451SYM_CODE_START_NOALIGN(vc_boot_ghcb)452UNWIND_HINT_IRET_REGS offset=8453ENDBR454455/* Build pt_regs */456PUSH_AND_CLEAR_REGS457458/* Call C handler */459movq %rsp, %rdi460movq ORIG_RAX(%rsp), %rsi461movq initial_vc_handler(%rip), %rax462ANNOTATE_RETPOLINE_SAFE463call *%rax464465/* Unwind pt_regs */466POP_REGS467468/* Remove Error Code */469addq $8, %rsp470471iretq472SYM_CODE_END(vc_boot_ghcb)473#endif474475/* Both SMP bootup and ACPI suspend change these variables */476__REFDATA477.balign 8478SYM_DATA(initial_code, .quad x86_64_start_kernel)479#ifdef CONFIG_AMD_MEM_ENCRYPT480SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb)481#endif482483SYM_DATA(trampoline_lock, .quad 0);484__FINITDATA485486__INIT487SYM_CODE_START(early_idt_handler_array)488i = 0489.rept NUM_EXCEPTION_VECTORS490.if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0491UNWIND_HINT_IRET_REGS492ENDBR493pushq $0 # Dummy error code, to make stack frame uniform494.else495UNWIND_HINT_IRET_REGS offset=8496ENDBR497.endif498pushq $i # 72(%rsp) Vector number499jmp early_idt_handler_common500UNWIND_HINT_IRET_REGS501i = i + 1502.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc503.endr504SYM_CODE_END(early_idt_handler_array)505ANNOTATE_NOENDBR // early_idt_handler_array[NUM_EXCEPTION_VECTORS]506507SYM_CODE_START_LOCAL(early_idt_handler_common)508UNWIND_HINT_IRET_REGS offset=16509/*510* The stack is the hardware frame, an error code or zero, and the511* vector number.512*/513cld514515incl early_recursion_flag(%rip)516517/* The vector number is currently in the pt_regs->di slot. */518pushq %rsi /* pt_regs->si */519movq 8(%rsp), %rsi /* RSI = vector number */520movq %rdi, 8(%rsp) /* pt_regs->di = RDI */521pushq %rdx /* pt_regs->dx */522pushq %rcx /* pt_regs->cx */523pushq %rax /* pt_regs->ax */524pushq %r8 /* pt_regs->r8 */525pushq %r9 /* pt_regs->r9 */526pushq %r10 /* pt_regs->r10 */527pushq %r11 /* pt_regs->r11 */528pushq %rbx /* pt_regs->bx */529pushq %rbp /* pt_regs->bp */530pushq %r12 /* pt_regs->r12 */531pushq %r13 /* pt_regs->r13 */532pushq %r14 /* pt_regs->r14 */533pushq %r15 /* pt_regs->r15 */534UNWIND_HINT_REGS535536movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */537call do_early_exception538539decl early_recursion_flag(%rip)540jmp restore_regs_and_return_to_kernel541SYM_CODE_END(early_idt_handler_common)542543#ifdef CONFIG_AMD_MEM_ENCRYPT544/*545* VC Exception handler used during very early boot. The546* early_idt_handler_array can't be used because it returns via the547* paravirtualized INTERRUPT_RETURN and pv-ops don't work that early.548*549* XXX it does, fix this.550*551* This handler will end up in the .init.text section and not be552* available to boot secondary CPUs.553*/554SYM_CODE_START_NOALIGN(vc_no_ghcb)555UNWIND_HINT_IRET_REGS offset=8556ENDBR557558/* Build pt_regs */559PUSH_AND_CLEAR_REGS560561/* Call C handler */562movq %rsp, %rdi563movq ORIG_RAX(%rsp), %rsi564call do_vc_no_ghcb565566/* Unwind pt_regs */567POP_REGS568569/* Remove Error Code */570addq $8, %rsp571572/* Pure iret required here - don't use INTERRUPT_RETURN */573iretq574SYM_CODE_END(vc_no_ghcb)575SYM_PIC_ALIAS(vc_no_ghcb);576#endif577578#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION579/*580* Each PGD needs to be 8k long and 8k aligned. We do not581* ever go out to userspace with these, so we do not582* strictly *need* the second page, but this allows us to583* have a single set_pgd() implementation that does not584* need to worry about whether it has 4k or 8k to work585* with.586*587* This ensures PGDs are 8k long:588*/589#define PTI_USER_PGD_FILL 512590/* This ensures they are 8k-aligned: */591#define SYM_DATA_START_PTI_ALIGNED(name) \592SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE)593#else594#define SYM_DATA_START_PTI_ALIGNED(name) \595SYM_DATA_START_PAGE_ALIGNED(name)596#define PTI_USER_PGD_FILL 0597#endif598599__INITDATA600.balign 4601602SYM_DATA_START_PTI_ALIGNED(early_top_pgt)603.fill 511,8,0604.quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC605.fill PTI_USER_PGD_FILL,8,0606SYM_DATA_END(early_top_pgt)607SYM_PIC_ALIAS(early_top_pgt)608609SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts)610.fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0611SYM_DATA_END(early_dynamic_pgts)612SYM_PIC_ALIAS(early_dynamic_pgts);613614SYM_DATA(early_recursion_flag, .long 0)615616.data617618#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH)619SYM_DATA_START_PTI_ALIGNED(init_top_pgt)620.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC621.org init_top_pgt + L4_PAGE_OFFSET*8, 0622.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC623.org init_top_pgt + L4_START_KERNEL*8, 0624/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */625.quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC626.fill PTI_USER_PGD_FILL,8,0627SYM_DATA_END(init_top_pgt)628629SYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt)630.quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC631.fill 511, 8, 0632SYM_DATA_END(level3_ident_pgt)633SYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt)634/*635* Since I easily can, map the first 1G.636* Don't set NX because code runs from these pages.637*638* Note: This sets _PAGE_GLOBAL despite whether639* the CPU supports it or it is enabled. But,640* the CPU should ignore the bit.641*/642PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)643SYM_DATA_END(level2_ident_pgt)644#else645SYM_DATA_START_PTI_ALIGNED(init_top_pgt)646.fill 512,8,0647.fill PTI_USER_PGD_FILL,8,0648SYM_DATA_END(init_top_pgt)649#endif650651SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt)652.fill 511,8,0653.quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC654SYM_DATA_END(level4_kernel_pgt)655SYM_PIC_ALIAS(level4_kernel_pgt)656657SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt)658.fill L3_START_KERNEL,8,0659/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */660.quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC661.quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC662SYM_DATA_END(level3_kernel_pgt)663SYM_PIC_ALIAS(level3_kernel_pgt)664665SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)666/*667* Kernel high mapping.668*669* The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in670* virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled,671* 512 MiB otherwise.672*673* (NOTE: after that starts the module area, see MODULES_VADDR.)674*675* This table is eventually used by the kernel during normal runtime.676* Care must be taken to clear out undesired bits later, like _PAGE_RW677* or _PAGE_GLOBAL in some cases.678*/679PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)680SYM_DATA_END(level2_kernel_pgt)681SYM_PIC_ALIAS(level2_kernel_pgt)682683SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)684.fill (512 - 4 - FIXMAP_PMD_NUM),8,0685pgtno = 0686.rept (FIXMAP_PMD_NUM)687.quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \688+ _PAGE_TABLE_NOENC;689pgtno = pgtno + 1690.endr691/* 6 MB reserved space + a 2MB hole */692.fill 4,8,0693SYM_DATA_END(level2_fixmap_pgt)694SYM_PIC_ALIAS(level2_fixmap_pgt)695696SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt)697.rept (FIXMAP_PMD_NUM)698.fill 512,8,0699.endr700SYM_DATA_END(level1_fixmap_pgt)701702.data703.align 16704705SYM_DATA(smpboot_control, .long 0)706707.align 16708/* This must match the first entry in level2_kernel_pgt */709SYM_DATA(phys_base, .quad 0x0)710SYM_PIC_ALIAS(phys_base);711EXPORT_SYMBOL(phys_base)712713#include "../xen/xen-head.S"714715__PAGE_ALIGNED_BSS716SYM_DATA_START_PAGE_ALIGNED(empty_zero_page)717.skip PAGE_SIZE718SYM_DATA_END(empty_zero_page)719EXPORT_SYMBOL(empty_zero_page)720721722723