Path: blob/master/arch/unicore32/include/asm/mmu_context.h
10818 views
/*1* linux/arch/unicore32/include/asm/mmu_context.h2*3* Code specific to PKUnity SoC and UniCore ISA4*5* Copyright (C) 2001-2010 GUAN Xue-tao6*7* This program is free software; you can redistribute it and/or modify8* it under the terms of the GNU General Public License version 2 as9* published by the Free Software Foundation.10*/11#ifndef __UNICORE_MMU_CONTEXT_H__12#define __UNICORE_MMU_CONTEXT_H__1314#include <linux/compiler.h>15#include <linux/sched.h>16#include <linux/io.h>1718#include <asm/cacheflush.h>19#include <asm/cpu-single.h>2021#define init_new_context(tsk, mm) 02223#define destroy_context(mm) do { } while (0)2425/*26* This is called when "tsk" is about to enter lazy TLB mode.27*28* mm: describes the currently active mm context29* tsk: task which is entering lazy tlb30* cpu: cpu number which is entering lazy tlb31*32* tsk->mm will be NULL33*/34static inline void35enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)36{37}3839/*40* This is the actual mm switch as far as the scheduler41* is concerned. No registers are touched. We avoid42* calling the CPU specific function when the mm hasn't43* actually changed.44*/45static inline void46switch_mm(struct mm_struct *prev, struct mm_struct *next,47struct task_struct *tsk)48{49unsigned int cpu = smp_processor_id();5051if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)52cpu_switch_mm(next->pgd, next);53}5455#define deactivate_mm(tsk, mm) do { } while (0)56#define activate_mm(prev, next) switch_mm(prev, next, NULL)5758/*59* We are inserting a "fake" vma for the user-accessible vector page so60* gdb and friends can get to it through ptrace and /proc/<pid>/mem.61* But we also want to remove it before the generic code gets to see it62* during process exit or the unmapping of it would cause total havoc.63* (the macro is used as remove_vma() is static to mm/mmap.c)64*/65#define arch_exit_mmap(mm) \66do { \67struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \68if (high_vma) { \69BUG_ON(high_vma->vm_next); /* it should be last */ \70if (high_vma->vm_prev) \71high_vma->vm_prev->vm_next = NULL; \72else \73mm->mmap = NULL; \74rb_erase(&high_vma->vm_rb, &mm->mm_rb); \75mm->mmap_cache = NULL; \76mm->map_count--; \77remove_vma(high_vma); \78} \79} while (0)8081static inline void arch_dup_mmap(struct mm_struct *oldmm,82struct mm_struct *mm)83{84}8586#endif878889