#ifndef __MM_VMA_INTERNAL_H
#define __MM_VMA_INTERNAL_H
#define __private
#define __bitwise
#define __randomize_layout
#define CONFIG_MMU
#define CONFIG_PER_VMA_LOCK
#include <stdlib.h>
#include <linux/list.h>
#include <linux/maple_tree.h>
#include <linux/mm.h>
#include <linux/rbtree.h>
#include <linux/refcount.h>
extern unsigned long stack_guard_gap;
#ifdef CONFIG_MMU
extern unsigned long mmap_min_addr;
extern unsigned long dac_mmap_min_addr;
#else
#define mmap_min_addr 0UL
#define dac_mmap_min_addr 0UL
#endif
#define VM_WARN_ON(_expr) (WARN_ON(_expr))
#define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr))
#define VM_WARN_ON_VMG(_expr, _vmg) (WARN_ON(_expr))
#define VM_BUG_ON(_expr) (BUG_ON(_expr))
#define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr))
#define MMF_HAS_MDWE 28
#define VM_NONE 0x00000000
#define VM_READ 0x00000001
#define VM_WRITE 0x00000002
#define VM_EXEC 0x00000004
#define VM_SHARED 0x00000008
#define VM_MAYREAD 0x00000010
#define VM_MAYWRITE 0x00000020
#define VM_MAYEXEC 0x00000040
#define VM_GROWSDOWN 0x00000100
#define VM_PFNMAP 0x00000400
#define VM_LOCKED 0x00002000
#define VM_IO 0x00004000
#define VM_SEQ_READ 0x00008000
#define VM_RAND_READ 0x00010000
#define VM_DONTEXPAND 0x00040000
#define VM_LOCKONFAULT 0x00080000
#define VM_ACCOUNT 0x00100000
#define VM_NORESERVE 0x00200000
#define VM_MIXEDMAP 0x10000000
#define VM_STACK VM_GROWSDOWN
#define VM_SHADOW_STACK VM_NONE
#define VM_SOFTDIRTY 0
#define VM_ARCH_1 0x01000000
#define VM_GROWSUP VM_NONE
#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
#ifdef CONFIG_STACK_GROWSUP
#define VM_STACK VM_GROWSUP
#define VM_STACK_EARLY VM_GROWSDOWN
#else
#define VM_STACK VM_GROWSDOWN
#define VM_STACK_EARLY 0
#endif
#define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE)
#define TASK_SIZE_LOW DEFAULT_MAP_WINDOW
#define TASK_SIZE_MAX DEFAULT_MAP_WINDOW
#define STACK_TOP TASK_SIZE_LOW
#define STACK_TOP_MAX TASK_SIZE_MAX
#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT)
#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
#define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
#define RLIMIT_STACK 3
#define RLIMIT_MEMLOCK 8
#define CAP_IPC_LOCK 14
#ifdef CONFIG_64BIT
#define VM_SEALED_BIT 42
#define VM_SEALED BIT(VM_SEALED_BIT)
#else
#define VM_SEALED VM_NONE
#endif
#define FIRST_USER_ADDRESS 0UL
#define USER_PGTABLES_CEILING 0UL
#define vma_policy(vma) NULL
#define down_write_nest_lock(sem, nest_lock)
#define pgprot_val(x) ((x).pgprot)
#define __pgprot(x) ((pgprot_t) { (x) } )
#define for_each_vma(__vmi, __vma) \
while (((__vma) = vma_next(&(__vmi))) != NULL)
#define for_each_vma_range(__vmi, __vma, __end) \
while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
#define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT))
#define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr)
#define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr)
#define TASK_SIZE ((1ul << 47)-PAGE_SIZE)
#define AS_MM_ALL_LOCKS 2
#define sysctl_max_map_count 0x1000000UL
#define pgoff_t unsigned long
typedef unsigned long pgprotval_t;
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
typedef unsigned long vm_flags_t;
typedef __bitwise unsigned int vm_fault_t;
#define pr_warn_once pr_err
#define data_race(expr) expr
#define ASSERT_EXCLUSIVE_WRITER(x)
#define swap(a, b) \
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
struct kref {
refcount_t refcount;
};
enum {
TASK_COMM_LEN = 16,
};
enum {
READ_IMPLIES_EXEC = 0x0400000,
};
struct task_struct {
char comm[TASK_COMM_LEN];
pid_t pid;
struct mm_struct *mm;
unsigned int personality;
};
struct task_struct *get_current(void);
#define current get_current()
struct anon_vma {
struct anon_vma *root;
struct rb_root_cached rb_root;
bool was_cloned;
bool was_unlinked;
};
struct anon_vma_chain {
struct anon_vma *anon_vma;
struct list_head same_vma;
};
struct anon_vma_name {
struct kref kref;
char name[];
};
struct vma_iterator {
struct ma_state mas;
};
#define VMA_ITERATOR(name, __mm, __addr) \
struct vma_iterator name = { \
.mas = { \
.tree = &(__mm)->mm_mt, \
.index = __addr, \
.node = NULL, \
.status = ma_start, \
}, \
}
struct address_space {
struct rb_root_cached i_mmap;
unsigned long flags;
atomic_t i_mmap_writable;
};
struct vm_userfaultfd_ctx {};
struct mempolicy {};
struct mmu_gather {};
struct mutex {};
#define DEFINE_MUTEX(mutexname) \
struct mutex mutexname = {}
struct mm_struct {
struct maple_tree mm_mt;
int map_count;
unsigned long total_vm;
unsigned long locked_vm;
unsigned long data_vm;
unsigned long exec_vm;
unsigned long stack_vm;
unsigned long def_flags;
unsigned long flags;
};
struct vm_area_struct;
struct vm_area_desc {
struct mm_struct *mm;
unsigned long start;
unsigned long end;
pgoff_t pgoff;
struct file *file;
vm_flags_t vm_flags;
pgprot_t page_prot;
const struct vm_operations_struct *vm_ops;
void *private_data;
};
struct file_operations {
int (*mmap)(struct file *, struct vm_area_struct *);
int (*mmap_prepare)(struct vm_area_desc *);
};
struct file {
struct address_space *f_mapping;
const struct file_operations *f_op;
};
#define VMA_LOCK_OFFSET 0x40000000
typedef struct { unsigned long v; } freeptr_t;
struct vm_area_struct {
union {
struct {
unsigned long vm_start;
unsigned long vm_end;
};
freeptr_t vm_freeptr;
};
struct mm_struct *vm_mm;
pgprot_t vm_page_prot;
union {
const vm_flags_t vm_flags;
vm_flags_t __private __vm_flags;
};
#ifdef CONFIG_PER_VMA_LOCK
unsigned int vm_lock_seq;
#endif
struct list_head anon_vma_chain;
struct anon_vma *anon_vma;
const struct vm_operations_struct *vm_ops;
unsigned long vm_pgoff;
struct file * vm_file;
void * vm_private_data;
#ifdef CONFIG_SWAP
atomic_long_t swap_readahead_info;
#endif
#ifndef CONFIG_MMU
struct vm_region *vm_region;
#endif
#ifdef CONFIG_NUMA
struct mempolicy *vm_policy;
#endif
#ifdef CONFIG_NUMA_BALANCING
struct vma_numab_state *numab_state;
#endif
#ifdef CONFIG_PER_VMA_LOCK
refcount_t vm_refcnt;
#endif
struct {
struct rb_node rb;
unsigned long rb_subtree_last;
} shared;
#ifdef CONFIG_ANON_VMA_NAME
struct anon_vma_name *anon_name;
#endif
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
} __randomize_layout;
struct vm_fault {};
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
int (*may_split)(struct vm_area_struct *area, unsigned long addr);
int (*mremap)(struct vm_area_struct *area);
int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
unsigned long end, unsigned long newflags);
vm_fault_t (*fault)(struct vm_fault *vmf);
vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
vm_fault_t (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
unsigned long (*pagesize)(struct vm_area_struct * area);
vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
int (*access)(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
const char *(*name)(struct vm_area_struct *vma);
#ifdef CONFIG_NUMA
int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
unsigned long addr, pgoff_t *ilx);
#endif
struct page *(*find_special_page)(struct vm_area_struct *vma,
unsigned long addr);
};
struct vm_unmapped_area_info {
#define VM_UNMAPPED_AREA_TOPDOWN 1
unsigned long flags;
unsigned long length;
unsigned long low_limit;
unsigned long high_limit;
unsigned long align_mask;
unsigned long align_offset;
unsigned long start_gap;
};
struct pagetable_move_control {
struct vm_area_struct *old;
struct vm_area_struct *new;
unsigned long old_addr;
unsigned long old_end;
unsigned long new_addr;
unsigned long len_in;
bool need_rmap_locks;
bool for_stack;
};
#define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_) \
struct pagetable_move_control name = { \
.old = old_, \
.new = new_, \
.old_addr = old_addr_, \
.old_end = (old_addr_) + (len_), \
.new_addr = new_addr_, \
.len_in = len_, \
}
struct kmem_cache_args {
unsigned int align;
unsigned int useroffset;
unsigned int usersize;
unsigned int freeptr_offset;
bool use_freeptr_offset;
void (*ctor)(void *);
};
static inline void vma_iter_invalidate(struct vma_iterator *vmi)
{
mas_pause(&vmi->mas);
}
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
return __pgprot(pgprot_val(oldprot) | pgprot_val(newprot));
}
static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
{
return __pgprot(vm_flags);
}
static inline bool is_shared_maywrite(vm_flags_t vm_flags)
{
return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
(VM_SHARED | VM_MAYWRITE);
}
static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
{
return is_shared_maywrite(vma->vm_flags);
}
static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
{
return mas_find(&vmi->mas, ULONG_MAX);
}
static inline void vma_assert_attached(struct vm_area_struct *vma)
{
WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt));
}
static inline void vma_assert_detached(struct vm_area_struct *vma)
{
WARN_ON_ONCE(refcount_read(&vma->vm_refcnt));
}
static inline void vma_assert_write_locked(struct vm_area_struct *);
static inline void vma_mark_attached(struct vm_area_struct *vma)
{
vma_assert_write_locked(vma);
vma_assert_detached(vma);
refcount_set_release(&vma->vm_refcnt, 1);
}
static inline void vma_mark_detached(struct vm_area_struct *vma)
{
vma_assert_write_locked(vma);
vma_assert_attached(vma);
if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) {
}
}
extern const struct vm_operations_struct vma_dummy_vm_ops;
extern unsigned long rlimit(unsigned int limit);
static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
{
memset(vma, 0, sizeof(*vma));
vma->vm_mm = mm;
vma->vm_ops = &vma_dummy_vm_ops;
INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_lock_seq = UINT_MAX;
}
struct kmem_cache {
const char *name;
size_t object_size;
struct kmem_cache_args *args;
};
static inline struct kmem_cache *__kmem_cache_create(const char *name,
size_t object_size,
struct kmem_cache_args *args)
{
struct kmem_cache *ret = malloc(sizeof(struct kmem_cache));
ret->name = name;
ret->object_size = object_size;
ret->args = args;
return ret;
}
#define kmem_cache_create(__name, __object_size, __args, ...) \
__kmem_cache_create((__name), (__object_size), (__args))
static inline void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{
(void)gfpflags;
return calloc(s->object_size, 1);
}
static inline void kmem_cache_free(struct kmem_cache *s, void *x)
{
free(x);
}
#define is_exec_mapping(flags) \
((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC)
#define is_stack_mapping(flags) \
(((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK))
#define is_data_mapping(flags) \
((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE)
static inline void vm_stat_account(struct mm_struct *mm, vm_flags_t flags,
long npages)
{
WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
if (is_exec_mapping(flags))
mm->exec_vm += npages;
else if (is_stack_mapping(flags))
mm->stack_vm += npages;
else if (is_data_mapping(flags))
mm->data_vm += npages;
}
#undef is_exec_mapping
#undef is_stack_mapping
#undef is_data_mapping
static inline void vm_acct_memory(long pages);
static inline void vm_unacct_memory(long pages)
{
vm_acct_memory(-pages);
}
static inline void mapping_allow_writable(struct address_space *mapping)
{
atomic_inc(&mapping->i_mmap_writable);
}
static inline void vma_set_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
pgoff_t pgoff)
{
vma->vm_start = start;
vma->vm_end = end;
vma->vm_pgoff = pgoff;
}
static inline
struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
{
return mas_find(&vmi->mas, max - 1);
}
static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
unsigned long start, unsigned long end, gfp_t gfp)
{
__mas_set_range(&vmi->mas, start, end - 1);
mas_store_gfp(&vmi->mas, NULL, gfp);
if (unlikely(mas_is_err(&vmi->mas)))
return -ENOMEM;
return 0;
}
static inline void mmap_assert_locked(struct mm_struct *);
static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
unsigned long start_addr,
unsigned long end_addr)
{
unsigned long index = start_addr;
mmap_assert_locked(mm);
return mt_find(&mm->mm_mt, &index, end_addr - 1);
}
static inline
struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
{
return mtree_load(&mm->mm_mt, addr);
}
static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
{
return mas_prev(&vmi->mas, 0);
}
static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
{
mas_set(&vmi->mas, addr);
}
static inline bool vma_is_anonymous(struct vm_area_struct *vma)
{
return !vma->vm_ops;
}
#define vma_iter_load(vmi) \
mas_walk(&(vmi)->mas)
static inline struct vm_area_struct *
find_vma_prev(struct mm_struct *mm, unsigned long addr,
struct vm_area_struct **pprev)
{
struct vm_area_struct *vma;
VMA_ITERATOR(vmi, mm, addr);
vma = vma_iter_load(&vmi);
*pprev = vma_prev(&vmi);
if (!vma)
vma = vma_next(&vmi);
return vma;
}
#undef vma_iter_load
static inline void vma_iter_init(struct vma_iterator *vmi,
struct mm_struct *mm, unsigned long addr)
{
mas_init(&vmi->mas, &mm->mm_mt, addr);
}
static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
{
return NULL;
}
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
struct vm_userfaultfd_ctx vm_ctx)
{
return true;
}
static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
struct anon_vma_name *anon_name2)
{
return true;
}
static inline void might_sleep(void)
{
}
static inline unsigned long vma_pages(struct vm_area_struct *vma)
{
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
}
static inline void fput(struct file *)
{
}
static inline void mpol_put(struct mempolicy *)
{
}
static inline void lru_add_drain(void)
{
}
static inline void tlb_gather_mmu(struct mmu_gather *, struct mm_struct *)
{
}
static inline void update_hiwater_rss(struct mm_struct *)
{
}
static inline void update_hiwater_vm(struct mm_struct *)
{
}
static inline void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
struct vm_area_struct *vma, unsigned long start_addr,
unsigned long end_addr, unsigned long tree_end,
bool mm_wr_locked)
{
(void)tlb;
(void)mas;
(void)vma;
(void)start_addr;
(void)end_addr;
(void)tree_end;
(void)mm_wr_locked;
}
static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
struct vm_area_struct *vma, unsigned long floor,
unsigned long ceiling, bool mm_wr_locked)
{
(void)tlb;
(void)mas;
(void)vma;
(void)floor;
(void)ceiling;
(void)mm_wr_locked;
}
static inline void mapping_unmap_writable(struct address_space *)
{
}
static inline void flush_dcache_mmap_lock(struct address_space *)
{
}
static inline void tlb_finish_mmu(struct mmu_gather *)
{
}
static inline struct file *get_file(struct file *f)
{
return f;
}
static inline int vma_dup_policy(struct vm_area_struct *, struct vm_area_struct *)
{
return 0;
}
static inline int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
{
if (src->anon_vma != NULL) {
dst->anon_vma = src->anon_vma;
dst->anon_vma->was_cloned = true;
}
return 0;
}
static inline void vma_start_write(struct vm_area_struct *vma)
{
vma->vm_lock_seq++;
}
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
struct vm_area_struct *next)
{
(void)vma;
(void)start;
(void)end;
(void)next;
}
static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {}
static inline void vma_iter_free(struct vma_iterator *vmi)
{
mas_destroy(&vmi->mas);
}
static inline
struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
{
return mas_next_range(&vmi->mas, ULONG_MAX);
}
static inline void vm_acct_memory(long pages)
{
}
static inline void vma_interval_tree_insert(struct vm_area_struct *,
struct rb_root_cached *)
{
}
static inline void vma_interval_tree_remove(struct vm_area_struct *,
struct rb_root_cached *)
{
}
static inline void flush_dcache_mmap_unlock(struct address_space *)
{
}
static inline void anon_vma_interval_tree_insert(struct anon_vma_chain*,
struct rb_root_cached *)
{
}
static inline void anon_vma_interval_tree_remove(struct anon_vma_chain*,
struct rb_root_cached *)
{
}
static inline void uprobe_mmap(struct vm_area_struct *)
{
}
static inline void uprobe_munmap(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
(void)vma;
(void)start;
(void)end;
}
static inline void i_mmap_lock_write(struct address_space *)
{
}
static inline void anon_vma_lock_write(struct anon_vma *)
{
}
static inline void vma_assert_write_locked(struct vm_area_struct *)
{
}
static inline void unlink_anon_vmas(struct vm_area_struct *vma)
{
vma->anon_vma->was_unlinked = true;
}
static inline void anon_vma_unlock_write(struct anon_vma *)
{
}
static inline void i_mmap_unlock_write(struct address_space *)
{
}
static inline void anon_vma_merge(struct vm_area_struct *,
struct vm_area_struct *)
{
}
static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
struct list_head *unmaps)
{
(void)vma;
(void)start;
(void)end;
(void)unmaps;
return 0;
}
static inline void mmap_write_downgrade(struct mm_struct *)
{
}
static inline void mmap_read_unlock(struct mm_struct *)
{
}
static inline void mmap_write_unlock(struct mm_struct *)
{
}
static inline int mmap_write_lock_killable(struct mm_struct *)
{
return 0;
}
static inline bool can_modify_mm(struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
(void)mm;
(void)start;
(void)end;
return true;
}
static inline void arch_unmap(struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
(void)mm;
(void)start;
(void)end;
}
static inline void mmap_assert_locked(struct mm_struct *)
{
}
static inline bool mpol_equal(struct mempolicy *, struct mempolicy *)
{
return true;
}
static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
vm_flags_t vm_flags)
{
(void)vma;
(void)vm_flags;
}
static inline bool mapping_can_writeback(struct address_space *)
{
return true;
}
static inline bool is_vm_hugetlb_page(struct vm_area_struct *)
{
return false;
}
static inline bool vma_soft_dirty_enabled(struct vm_area_struct *)
{
return false;
}
static inline bool userfaultfd_wp(struct vm_area_struct *)
{
return false;
}
static inline void mmap_assert_write_locked(struct mm_struct *)
{
}
static inline void mutex_lock(struct mutex *)
{
}
static inline void mutex_unlock(struct mutex *)
{
}
static inline bool mutex_is_locked(struct mutex *)
{
return true;
}
static inline bool signal_pending(void *)
{
return false;
}
static inline bool is_file_hugepages(struct file *)
{
return false;
}
static inline int security_vm_enough_memory_mm(struct mm_struct *, long)
{
return 0;
}
static inline bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long)
{
return true;
}
static inline void vm_flags_init(struct vm_area_struct *vma,
vm_flags_t flags)
{
vma->__vm_flags = flags;
}
static inline void vm_flags_set(struct vm_area_struct *vma,
vm_flags_t flags)
{
vma_start_write(vma);
vma->__vm_flags |= flags;
}
static inline void vm_flags_clear(struct vm_area_struct *vma,
vm_flags_t flags)
{
vma_start_write(vma);
vma->__vm_flags &= ~flags;
}
static inline int shmem_zero_setup(struct vm_area_struct *)
{
return 0;
}
static inline void vma_set_anonymous(struct vm_area_struct *vma)
{
vma->vm_ops = NULL;
}
static inline void ksm_add_vma(struct vm_area_struct *)
{
}
static inline void perf_event_mmap(struct vm_area_struct *)
{
}
static inline bool vma_is_dax(struct vm_area_struct *)
{
return false;
}
static inline struct vm_area_struct *get_gate_vma(struct mm_struct *)
{
return NULL;
}
bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
static inline void vma_set_page_prot(struct vm_area_struct *vma)
{
vm_flags_t vm_flags = vma->vm_flags;
pgprot_t vm_page_prot;
vm_page_prot = pgprot_modify(vma->vm_page_prot, vm_get_page_prot(vm_flags));
if (vma_wants_writenotify(vma, vm_page_prot)) {
vm_flags &= ~VM_SHARED;
vm_page_prot = pgprot_modify(vm_page_prot, vm_get_page_prot(vm_flags));
}
WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
}
static inline bool arch_validate_flags(vm_flags_t)
{
return true;
}
static inline void vma_close(struct vm_area_struct *)
{
}
static inline int mmap_file(struct file *, struct vm_area_struct *)
{
return 0;
}
static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_GROWSDOWN)
return stack_guard_gap;
if (vma->vm_flags & VM_SHADOW_STACK)
return PAGE_SIZE;
return 0;
}
static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
{
unsigned long gap = stack_guard_start_gap(vma);
unsigned long vm_start = vma->vm_start;
vm_start -= gap;
if (vm_start > vma->vm_start)
vm_start = 0;
return vm_start;
}
static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
{
unsigned long vm_end = vma->vm_end;
if (vma->vm_flags & VM_GROWSUP) {
vm_end += stack_guard_gap;
if (vm_end < vma->vm_end)
vm_end = -PAGE_SIZE;
}
return vm_end;
}
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr, unsigned long len)
{
return 0;
}
static inline bool vma_is_accessible(struct vm_area_struct *vma)
{
return vma->vm_flags & VM_ACCESS_FLAGS;
}
static inline bool capable(int cap)
{
return true;
}
static inline bool mlock_future_ok(struct mm_struct *mm, vm_flags_t vm_flags,
unsigned long bytes)
{
unsigned long locked_pages, limit_pages;
if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
return true;
locked_pages = bytes >> PAGE_SHIFT;
locked_pages += mm->locked_vm;
limit_pages = rlimit(RLIMIT_MEMLOCK);
limit_pages >>= PAGE_SHIFT;
return locked_pages <= limit_pages;
}
static inline int __anon_vma_prepare(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = calloc(1, sizeof(struct anon_vma));
if (!anon_vma)
return -ENOMEM;
anon_vma->root = anon_vma;
vma->anon_vma = anon_vma;
return 0;
}
static inline int anon_vma_prepare(struct vm_area_struct *vma)
{
if (likely(vma->anon_vma))
return 0;
return __anon_vma_prepare(vma);
}
static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
struct list_head *uf)
{
}
static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
{
if (!test_bit(MMF_HAS_MDWE, ¤t->mm->flags))
return false;
if (!(new & VM_EXEC))
return false;
if (new & VM_WRITE)
return true;
if (!(old & VM_EXEC))
return true;
return false;
}
static inline int mapping_map_writable(struct address_space *mapping)
{
int c = atomic_read(&mapping->i_mmap_writable);
do {
if (c < 0)
return -EPERM;
} while (!__sync_bool_compare_and_swap(&mapping->i_mmap_writable, c, c+1));
return 0;
}
static inline unsigned long move_page_tables(struct pagetable_move_control *pmc)
{
(void)pmc;
return 0;
}
static inline void free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
(void)tlb;
(void)addr;
(void)end;
(void)floor;
(void)ceiling;
}
static inline int ksm_execve(struct mm_struct *mm)
{
(void)mm;
return 0;
}
static inline void ksm_exit(struct mm_struct *mm)
{
(void)mm;
}
static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt)
{
(void)vma;
(void)reset_refcnt;
}
static inline void vma_numab_state_init(struct vm_area_struct *vma)
{
(void)vma;
}
static inline void vma_numab_state_free(struct vm_area_struct *vma)
{
(void)vma;
}
static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
struct vm_area_struct *new_vma)
{
(void)orig_vma;
(void)new_vma;
}
static inline void free_anon_vma_name(struct vm_area_struct *vma)
{
(void)vma;
}
static inline void set_vma_from_desc(struct vm_area_struct *vma,
struct vm_area_desc *desc);
static inline struct vm_area_desc *vma_to_desc(struct vm_area_struct *vma,
struct vm_area_desc *desc);
static int compat_vma_mmap_prepare(struct file *file,
struct vm_area_struct *vma)
{
struct vm_area_desc desc;
int err;
err = file->f_op->mmap_prepare(vma_to_desc(vma, &desc));
if (err)
return err;
set_vma_from_desc(vma, &desc);
return 0;
}
static inline bool can_mmap_file(struct file *file)
{
bool has_mmap = file->f_op->mmap;
bool has_mmap_prepare = file->f_op->mmap_prepare;
if (WARN_ON_ONCE(has_mmap && has_mmap_prepare))
return false;
if (!has_mmap && !has_mmap_prepare)
return false;
return true;
}
static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma)
{
if (file->f_op->mmap_prepare)
return compat_vma_mmap_prepare(file, vma);
return file->f_op->mmap(file, vma);
}
static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc)
{
return file->f_op->mmap_prepare(desc);
}
static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
{
(void)vma;
}
static inline void vma_set_file(struct vm_area_struct *vma, struct file *file)
{
get_file(file);
swap(vma->vm_file, file);
fput(file);
}
static inline bool shmem_file(struct file *)
{
return false;
}
static inline vm_flags_t ksm_vma_flags(const struct mm_struct *, const struct file *,
vm_flags_t vm_flags)
{
return vm_flags;
}
#endif