Path: blob/master/arch/mn10300/include/asm/highmem.h
15126 views
/* MN10300 Virtual kernel memory mappings for high memory1*2* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.3* Written by David Howells ([email protected])4* - Derived from include/asm-i386/highmem.h5*6* This program is free software; you can redistribute it and/or7* modify it under the terms of the GNU General Public Licence8* as published by the Free Software Foundation; either version9* 2 of the Licence, or (at your option) any later version.10*/11#ifndef _ASM_HIGHMEM_H12#define _ASM_HIGHMEM_H1314#ifdef __KERNEL__1516#include <linux/init.h>17#include <linux/interrupt.h>18#include <linux/highmem.h>19#include <asm/kmap_types.h>20#include <asm/pgtable.h>2122/* undef for production */23#undef HIGHMEM_DEBUG2425/* declarations for highmem.c */26extern unsigned long highstart_pfn, highend_pfn;2728extern pte_t *kmap_pte;29extern pgprot_t kmap_prot;30extern pte_t *pkmap_page_table;3132extern void __init kmap_init(void);3334/*35* Right now we initialize only a single pte table. It can be extended36* easily, subsequent pte tables have to be allocated in one physical37* chunk of RAM.38*/39#define PKMAP_BASE 0xfe000000UL40#define LAST_PKMAP 102441#define LAST_PKMAP_MASK (LAST_PKMAP - 1)42#define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)43#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))4445extern unsigned long kmap_high(struct page *page);46extern void kunmap_high(struct page *page);4748static inline unsigned long kmap(struct page *page)49{50if (in_interrupt())51BUG();52if (page < highmem_start_page)53return page_address(page);54return kmap_high(page);55}5657static inline void kunmap(struct page *page)58{59if (in_interrupt())60BUG();61if (page < highmem_start_page)62return;63kunmap_high(page);64}6566/*67* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap68* gives a more generic (and caching) interface. But kmap_atomic can69* be used in IRQ contexts, so in some (very limited) cases we need70* it.71*/72static inline unsigned long __kmap_atomic(struct page *page)73{74unsigned long vaddr;75int idx, type;7677pagefault_disable();78if (page < highmem_start_page)79return page_address(page);8081type = kmap_atomic_idx_push();82idx = type + KM_TYPE_NR * smp_processor_id();83vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);84#if HIGHMEM_DEBUG85if (!pte_none(*(kmap_pte - idx)))86BUG();87#endif88set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));89local_flush_tlb_one(vaddr);9091return vaddr;92}9394static inline void __kunmap_atomic(unsigned long vaddr)95{96int type;9798if (vaddr < FIXADDR_START) { /* FIXME */99pagefault_enable();100return;101}102103type = kmap_atomic_idx();104105#if HIGHMEM_DEBUG106{107unsigned int idx;108idx = type + KM_TYPE_NR * smp_processor_id();109110if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx))111BUG();112113/*114* force other mappings to Oops if they'll try to access115* this pte without first remap it116*/117pte_clear(kmap_pte - idx);118local_flush_tlb_one(vaddr);119}120#endif121122kmap_atomic_idx_pop();123pagefault_enable();124}125#endif /* __KERNEL__ */126127#endif /* _ASM_HIGHMEM_H */128129130