Path: blob/master/arch/x86/power/hibernate_asm_64.S
10818 views
/*1* Hibernation support for x86-642*3* Distribute under GPLv2.4*5* Copyright 2007 Rafael J. Wysocki <[email protected]>6* Copyright 2005 Andi Kleen <[email protected]>7* Copyright 2004 Pavel Machek <[email protected]>8*9* swsusp_arch_resume must not use any stack or any nonlocal variables while10* copying pages:11*12* Its rewriting one kernel image with another. What is stack in "old"13* image could very well be data page in "new" image, and overwriting14* your own stack under you is bad idea.15*/1617.text18#include <linux/linkage.h>19#include <asm/segment.h>20#include <asm/page_types.h>21#include <asm/asm-offsets.h>22#include <asm/processor-flags.h>2324ENTRY(swsusp_arch_suspend)25movq $saved_context, %rax26movq %rsp, pt_regs_sp(%rax)27movq %rbp, pt_regs_bp(%rax)28movq %rsi, pt_regs_si(%rax)29movq %rdi, pt_regs_di(%rax)30movq %rbx, pt_regs_bx(%rax)31movq %rcx, pt_regs_cx(%rax)32movq %rdx, pt_regs_dx(%rax)33movq %r8, pt_regs_r8(%rax)34movq %r9, pt_regs_r9(%rax)35movq %r10, pt_regs_r10(%rax)36movq %r11, pt_regs_r11(%rax)37movq %r12, pt_regs_r12(%rax)38movq %r13, pt_regs_r13(%rax)39movq %r14, pt_regs_r14(%rax)40movq %r15, pt_regs_r15(%rax)41pushfq42popq pt_regs_flags(%rax)4344/* save the address of restore_registers */45movq $restore_registers, %rax46movq %rax, restore_jump_address(%rip)47/* save cr3 */48movq %cr3, %rax49movq %rax, restore_cr3(%rip)5051call swsusp_save52ret5354ENTRY(restore_image)55/* switch to temporary page tables */56movq $__PAGE_OFFSET, %rdx57movq temp_level4_pgt(%rip), %rax58subq %rdx, %rax59movq %rax, %cr360/* Flush TLB */61movq mmu_cr4_features(%rip), %rax62movq %rax, %rdx63andq $~(X86_CR4_PGE), %rdx64movq %rdx, %cr4; # turn off PGE65movq %cr3, %rcx; # flush TLB66movq %rcx, %cr3;67movq %rax, %cr4; # turn PGE back on6869/* prepare to jump to the image kernel */70movq restore_jump_address(%rip), %rax71movq restore_cr3(%rip), %rbx7273/* prepare to copy image data to their original locations */74movq restore_pblist(%rip), %rdx75movq relocated_restore_code(%rip), %rcx76jmpq *%rcx7778/* code below has been relocated to a safe page */79ENTRY(core_restore_code)80loop:81testq %rdx, %rdx82jz done8384/* get addresses from the pbe and copy the page */85movq pbe_address(%rdx), %rsi86movq pbe_orig_address(%rdx), %rdi87movq $(PAGE_SIZE >> 3), %rcx88rep89movsq9091/* progress to the next pbe */92movq pbe_next(%rdx), %rdx93jmp loop94done:95/* jump to the restore_registers address from the image header */96jmpq *%rax97/*98* NOTE: This assumes that the boot kernel's text mapping covers the99* image kernel's page containing restore_registers and the address of100* this page is the same as in the image kernel's text mapping (it101* should always be true, because the text mapping is linear, starting102* from 0, and is supposed to cover the entire kernel text for every103* kernel).104*105* code below belongs to the image kernel106*/107108ENTRY(restore_registers)109/* go back to the original page tables */110movq %rbx, %cr3111112/* Flush TLB, including "global" things (vmalloc) */113movq mmu_cr4_features(%rip), %rax114movq %rax, %rdx115andq $~(X86_CR4_PGE), %rdx116movq %rdx, %cr4; # turn off PGE117movq %cr3, %rcx; # flush TLB118movq %rcx, %cr3119movq %rax, %cr4; # turn PGE back on120121/* We don't restore %rax, it must be 0 anyway */122movq $saved_context, %rax123movq pt_regs_sp(%rax), %rsp124movq pt_regs_bp(%rax), %rbp125movq pt_regs_si(%rax), %rsi126movq pt_regs_di(%rax), %rdi127movq pt_regs_bx(%rax), %rbx128movq pt_regs_cx(%rax), %rcx129movq pt_regs_dx(%rax), %rdx130movq pt_regs_r8(%rax), %r8131movq pt_regs_r9(%rax), %r9132movq pt_regs_r10(%rax), %r10133movq pt_regs_r11(%rax), %r11134movq pt_regs_r12(%rax), %r12135movq pt_regs_r13(%rax), %r13136movq pt_regs_r14(%rax), %r14137movq pt_regs_r15(%rax), %r15138pushq pt_regs_flags(%rax)139popfq140141xorq %rax, %rax142143/* tell the hibernation core that we've just restored the memory */144movq %rax, in_suspend(%rip)145146ret147148149