Path: blob/master/arch/unicore32/kernel/hibernate.c
10817 views
/*1* linux/arch/unicore32/kernel/hibernate.c2*3* Code specific to PKUnity SoC and UniCore ISA4*5* Maintained by GUAN Xue-tao <[email protected]>6* Copyright (C) 2001-2010 Guan Xuetao7*8* This program is free software; you can redistribute it and/or modify9* it under the terms of the GNU General Public License version 2 as10* published by the Free Software Foundation.11*/1213#include <linux/gfp.h>14#include <linux/suspend.h>15#include <linux/bootmem.h>1617#include <asm/system.h>18#include <asm/page.h>19#include <asm/pgtable.h>20#include <asm/pgalloc.h>21#include <asm/suspend.h>2223#include "mach/pm.h"2425/* Pointer to the temporary resume page tables */26pgd_t *resume_pg_dir;2728struct swsusp_arch_regs swsusp_arch_regs_cpu0;2930/*31* Create a middle page table on a resume-safe page and put a pointer to it in32* the given global directory entry. This only returns the gd entry33* in non-PAE compilation mode, since the middle layer is folded.34*/35static pmd_t *resume_one_md_table_init(pgd_t *pgd)36{37pud_t *pud;38pmd_t *pmd_table;3940pud = pud_offset(pgd, 0);41pmd_table = pmd_offset(pud, 0);4243return pmd_table;44}4546/*47* Create a page table on a resume-safe page and place a pointer to it in48* a middle page directory entry.49*/50static pte_t *resume_one_page_table_init(pmd_t *pmd)51{52if (pmd_none(*pmd)) {53pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);54if (!page_table)55return NULL;5657set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_KERNEL_TABLE));5859BUG_ON(page_table != pte_offset_kernel(pmd, 0));6061return page_table;62}6364return pte_offset_kernel(pmd, 0);65}6667/*68* This maps the physical memory to kernel virtual address space, a total69* of max_low_pfn pages, by creating page tables starting from address70* PAGE_OFFSET. The page tables are allocated out of resume-safe pages.71*/72static int resume_physical_mapping_init(pgd_t *pgd_base)73{74unsigned long pfn;75pgd_t *pgd;76pmd_t *pmd;77pte_t *pte;78int pgd_idx, pmd_idx;7980pgd_idx = pgd_index(PAGE_OFFSET);81pgd = pgd_base + pgd_idx;82pfn = 0;8384for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {85pmd = resume_one_md_table_init(pgd);86if (!pmd)87return -ENOMEM;8889if (pfn >= max_low_pfn)90continue;9192for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {93pte_t *max_pte;9495if (pfn >= max_low_pfn)96break;9798/* Map with normal page tables.99* NOTE: We can mark everything as executable here100*/101pte = resume_one_page_table_init(pmd);102if (!pte)103return -ENOMEM;104105max_pte = pte + PTRS_PER_PTE;106for (; pte < max_pte; pte++, pfn++) {107if (pfn >= max_low_pfn)108break;109110set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));111}112}113}114115return 0;116}117118static inline void resume_init_first_level_page_table(pgd_t *pg_dir)119{120}121122int swsusp_arch_resume(void)123{124int error;125126resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);127if (!resume_pg_dir)128return -ENOMEM;129130resume_init_first_level_page_table(resume_pg_dir);131error = resume_physical_mapping_init(resume_pg_dir);132if (error)133return error;134135/* We have got enough memory and from now on we cannot recover */136restore_image(resume_pg_dir, restore_pblist);137return 0;138}139140/*141* pfn_is_nosave - check if given pfn is in the 'nosave' section142*/143144int pfn_is_nosave(unsigned long pfn)145{146unsigned long begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;147unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;148149return (pfn >= begin_pfn) && (pfn < end_pfn);150}151152void save_processor_state(void)153{154}155156void restore_processor_state(void)157{158local_flush_tlb_all();159}160161162