#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include "generated/bit-length.h"
#include "maple-shared.h"
#include "vma_internal.h"
#include "../../../mm/vma.h"
static bool fail_prealloc;
#define vma_iter_prealloc(vmi, vma) \
(fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL))
#define CONFIG_DEFAULT_MMAP_MIN_ADDR 65536
unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
#include "../../../mm/vma_init.c"
#include "../../../mm/vma_exec.c"
#include "../../../mm/vma.c"
const struct vm_operations_struct vma_dummy_vm_ops;
static struct anon_vma dummy_anon_vma;
#define ASSERT_TRUE(_expr) \
do { \
if (!(_expr)) { \
fprintf(stderr, \
"Assert FAILED at %s:%d:%s(): %s is FALSE.\n", \
__FILE__, __LINE__, __FUNCTION__, #_expr); \
return false; \
} \
} while (0)
#define ASSERT_FALSE(_expr) ASSERT_TRUE(!(_expr))
#define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2))
#define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2))
static struct task_struct __current;
struct task_struct *get_current(void)
{
return &__current;
}
unsigned long rlimit(unsigned int limit)
{
return (unsigned long)-1;
}
static struct vm_area_struct *alloc_vma(struct mm_struct *mm,
unsigned long start,
unsigned long end,
pgoff_t pgoff,
vm_flags_t vm_flags)
{
struct vm_area_struct *ret = vm_area_alloc(mm);
if (ret == NULL)
return NULL;
ret->vm_start = start;
ret->vm_end = end;
ret->vm_pgoff = pgoff;
ret->__vm_flags = vm_flags;
vma_assert_detached(ret);
return ret;
}
static int attach_vma(struct mm_struct *mm, struct vm_area_struct *vma)
{
int res;
res = vma_link(mm, vma);
if (!res)
vma_assert_attached(vma);
return res;
}
static void detach_free_vma(struct vm_area_struct *vma)
{
vma_mark_detached(vma);
vm_area_free(vma);
}
static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
unsigned long start,
unsigned long end,
pgoff_t pgoff,
vm_flags_t vm_flags)
{
struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, vm_flags);
if (vma == NULL)
return NULL;
if (attach_vma(mm, vma)) {
detach_free_vma(vma);
return NULL;
}
vma->vm_lock_seq = UINT_MAX;
return vma;
}
static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
{
struct vm_area_struct *vma;
vmg->next = vma_next(vmg->vmi);
vmg->prev = vma_prev(vmg->vmi);
vma_iter_next_range(vmg->vmi);
vma = vma_merge_new_range(vmg);
if (vma)
vma_assert_attached(vma);
return vma;
}
static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)
{
struct vm_area_struct *vma;
vma = vma_merge_existing_range(vmg);
if (vma)
vma_assert_attached(vma);
return vma;
}
static int expand_existing(struct vma_merge_struct *vmg)
{
return vma_expand(vmg);
}
static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags)
{
vma_iter_set(vmg->vmi, start);
vmg->prev = NULL;
vmg->middle = NULL;
vmg->next = NULL;
vmg->target = NULL;
vmg->start = start;
vmg->end = end;
vmg->pgoff = pgoff;
vmg->vm_flags = vm_flags;
vmg->just_expand = false;
vmg->__remove_middle = false;
vmg->__remove_next = false;
vmg->__adjust_middle_start = false;
vmg->__adjust_next_start = false;
}
static void vmg_set_range_anon_vma(struct vma_merge_struct *vmg, unsigned long start,
unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags,
struct anon_vma *anon_vma)
{
vmg_set_range(vmg, start, end, pgoff, vm_flags);
vmg->anon_vma = anon_vma;
}
static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
struct vma_merge_struct *vmg,
unsigned long start, unsigned long end,
pgoff_t pgoff, vm_flags_t vm_flags,
bool *was_merged)
{
struct vm_area_struct *merged;
vmg_set_range(vmg, start, end, pgoff, vm_flags);
merged = merge_new(vmg);
if (merged) {
*was_merged = true;
ASSERT_EQ(vmg->state, VMA_MERGE_SUCCESS);
return merged;
}
*was_merged = false;
ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE);
return alloc_and_link_vma(mm, start, end, pgoff, vm_flags);
}
static void reset_dummy_anon_vma(void)
{
dummy_anon_vma.was_cloned = false;
dummy_anon_vma.was_unlinked = false;
}
static int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi)
{
struct vm_area_struct *vma;
int count = 0;
fail_prealloc = false;
reset_dummy_anon_vma();
vma_iter_set(vmi, 0);
for_each_vma(*vmi, vma) {
detach_free_vma(vma);
count++;
}
mtree_destroy(&mm->mm_mt);
mm->map_count = 0;
return count;
}
static bool vma_write_started(struct vm_area_struct *vma)
{
int seq = vma->vm_lock_seq;
vma->vm_lock_seq = UINT_MAX;
return seq > -1;
}
static void dummy_close(struct vm_area_struct *)
{
}
static void __vma_set_dummy_anon_vma(struct vm_area_struct *vma,
struct anon_vma_chain *avc,
struct anon_vma *anon_vma)
{
vma->anon_vma = anon_vma;
INIT_LIST_HEAD(&vma->anon_vma_chain);
list_add(&avc->same_vma, &vma->anon_vma_chain);
avc->anon_vma = vma->anon_vma;
}
static void vma_set_dummy_anon_vma(struct vm_area_struct *vma,
struct anon_vma_chain *avc)
{
__vma_set_dummy_anon_vma(vma, avc, &dummy_anon_vma);
}
static bool test_simple_merge(void)
{
struct vm_area_struct *vma;
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
struct mm_struct mm = {};
struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, vm_flags);
struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, vm_flags);
VMA_ITERATOR(vmi, &mm, 0x1000);
struct vma_merge_struct vmg = {
.mm = &mm,
.vmi = &vmi,
.start = 0x1000,
.end = 0x2000,
.vm_flags = vm_flags,
.pgoff = 1,
};
ASSERT_FALSE(attach_vma(&mm, vma_left));
ASSERT_FALSE(attach_vma(&mm, vma_right));
vma = merge_new(&vmg);
ASSERT_NE(vma, NULL);
ASSERT_EQ(vma->vm_start, 0);
ASSERT_EQ(vma->vm_end, 0x3000);
ASSERT_EQ(vma->vm_pgoff, 0);
ASSERT_EQ(vma->vm_flags, vm_flags);
detach_free_vma(vma);
mtree_destroy(&mm.mm_mt);
return true;
}
static bool test_simple_modify(void)
{
struct vm_area_struct *vma;
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
struct mm_struct mm = {};
struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, vm_flags);
VMA_ITERATOR(vmi, &mm, 0x1000);
ASSERT_FALSE(attach_vma(&mm, init_vma));
vma = vma_modify_flags(&vmi, init_vma, init_vma,
0x1000, 0x2000, VM_READ | VM_MAYREAD);
ASSERT_NE(vma, NULL);
ASSERT_EQ(vma, init_vma);
ASSERT_EQ(vma->vm_start, 0x1000);
ASSERT_EQ(vma->vm_end, 0x2000);
ASSERT_EQ(vma->vm_pgoff, 1);
vma_iter_set(&vmi, 0);
vma = vma_iter_load(&vmi);
ASSERT_EQ(vma->vm_start, 0);
ASSERT_EQ(vma->vm_end, 0x1000);
ASSERT_EQ(vma->vm_pgoff, 0);
detach_free_vma(vma);
vma_iter_clear(&vmi);
vma = vma_next(&vmi);
ASSERT_EQ(vma->vm_start, 0x1000);
ASSERT_EQ(vma->vm_end, 0x2000);
ASSERT_EQ(vma->vm_pgoff, 1);
detach_free_vma(vma);
vma_iter_clear(&vmi);
vma = vma_next(&vmi);
ASSERT_EQ(vma->vm_start, 0x2000);
ASSERT_EQ(vma->vm_end, 0x3000);
ASSERT_EQ(vma->vm_pgoff, 2);
detach_free_vma(vma);
mtree_destroy(&mm.mm_mt);
return true;
}
static bool test_simple_expand(void)
{
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
struct mm_struct mm = {};
struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, vm_flags);
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
.vmi = &vmi,
.target = vma,
.start = 0,
.end = 0x3000,
.pgoff = 0,
};
ASSERT_FALSE(attach_vma(&mm, vma));
ASSERT_FALSE(expand_existing(&vmg));
ASSERT_EQ(vma->vm_start, 0);
ASSERT_EQ(vma->vm_end, 0x3000);
ASSERT_EQ(vma->vm_pgoff, 0);
detach_free_vma(vma);
mtree_destroy(&mm.mm_mt);
return true;
}
static bool test_simple_shrink(void)
{
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
struct mm_struct mm = {};
struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, vm_flags);
VMA_ITERATOR(vmi, &mm, 0);
ASSERT_FALSE(attach_vma(&mm, vma));
ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
ASSERT_EQ(vma->vm_start, 0);
ASSERT_EQ(vma->vm_end, 0x1000);
ASSERT_EQ(vma->vm_pgoff, 0);
detach_free_vma(vma);
mtree_destroy(&mm.mm_mt);
return true;
}
static bool test_merge_new(void)
{
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
.mm = &mm,
.vmi = &vmi,
};
struct anon_vma_chain dummy_anon_vma_chain_a = {
.anon_vma = &dummy_anon_vma,
};
struct anon_vma_chain dummy_anon_vma_chain_b = {
.anon_vma = &dummy_anon_vma,
};
struct anon_vma_chain dummy_anon_vma_chain_c = {
.anon_vma = &dummy_anon_vma,
};
struct anon_vma_chain dummy_anon_vma_chain_d = {
.anon_vma = &dummy_anon_vma,
};
const struct vm_operations_struct vm_ops = {
.close = dummy_close,
};
int count;
struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d;
bool merged;
vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
ASSERT_NE(vma_a, NULL);
INIT_LIST_HEAD(&vma_a->anon_vma_chain);
list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags);
ASSERT_NE(vma_b, NULL);
INIT_LIST_HEAD(&vma_b->anon_vma_chain);
list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, vm_flags);
ASSERT_NE(vma_c, NULL);
INIT_LIST_HEAD(&vma_c->anon_vma_chain);
list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, vm_flags, &merged);
ASSERT_NE(vma_d, NULL);
INIT_LIST_HEAD(&vma_d->anon_vma_chain);
list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
ASSERT_FALSE(merged);
ASSERT_EQ(mm.map_count, 4);
vma_a->vm_ops = &vm_ops;
vma_b->anon_vma = &dummy_anon_vma;
vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, vm_flags, &merged);
ASSERT_EQ(vma, vma_a);
ASSERT_TRUE(merged);
ASSERT_EQ(vma->vm_start, 0);
ASSERT_EQ(vma->vm_end, 0x4000);
ASSERT_EQ(vma->vm_pgoff, 0);
ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 3);
vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, vm_flags, &merged);
ASSERT_EQ(vma, vma_a);
ASSERT_TRUE(merged);
ASSERT_EQ(vma->vm_start, 0);
ASSERT_EQ(vma->vm_end, 0x5000);
ASSERT_EQ(vma->vm_pgoff, 0);
ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 3);
vma_d->anon_vma = &dummy_anon_vma;
vma_d->vm_ops = &vm_ops;
vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, vm_flags, &merged);
ASSERT_EQ(vma, vma_d);
ASSERT_TRUE(merged);
ASSERT_EQ(vma->vm_start, 0x6000);
ASSERT_EQ(vma->vm_end, 0x9000);
ASSERT_EQ(vma->vm_pgoff, 6);
ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 3);
vma_d->vm_ops = NULL;
vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, vm_flags, &merged);
ASSERT_EQ(vma, vma_a);
ASSERT_TRUE(merged);
ASSERT_EQ(vma->vm_start, 0);
ASSERT_EQ(vma->vm_end, 0x9000);
ASSERT_EQ(vma->vm_pgoff, 0);
ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 2);
vma_c->anon_vma = &dummy_anon_vma;
vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, vm_flags, &merged);
ASSERT_EQ(vma, vma_c);
ASSERT_TRUE(merged);
ASSERT_EQ(vma->vm_start, 0xa000);
ASSERT_EQ(vma->vm_end, 0xc000);
ASSERT_EQ(vma->vm_pgoff, 0xa);
ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 2);
vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, vm_flags, &merged);
ASSERT_EQ(vma, vma_a);
ASSERT_TRUE(merged);
ASSERT_EQ(vma->vm_start, 0);
ASSERT_EQ(vma->vm_end, 0xc000);
ASSERT_EQ(vma->vm_pgoff, 0);
ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 1);
count = 0;
vma_iter_set(&vmi, 0);
for_each_vma(vmi, vma) {
ASSERT_NE(vma, NULL);
ASSERT_EQ(vma->vm_start, 0);
ASSERT_EQ(vma->vm_end, 0xc000);
ASSERT_EQ(vma->vm_pgoff, 0);
ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
detach_free_vma(vma);
count++;
}
ASSERT_EQ(count, 1);
mtree_destroy(&mm.mm_mt);
return true;
}
static bool test_vma_merge_special_flags(void)
{
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
.mm = &mm,
.vmi = &vmi,
};
vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP };
vm_flags_t all_special_flags = 0;
int i;
struct vm_area_struct *vma_left, *vma;
for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
all_special_flags |= special_flags[i];
}
ASSERT_EQ(all_special_flags, VM_SPECIAL);
vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
ASSERT_NE(vma_left, NULL);
vmg_set_range(&vmg, 0x3000, 0x4000, 3, vm_flags);
for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
vm_flags_t special_flag = special_flags[i];
vma_left->__vm_flags = vm_flags | special_flag;
vmg.vm_flags = vm_flags | special_flag;
vma = merge_new(&vmg);
ASSERT_EQ(vma, NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
}
vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags);
ASSERT_NE(vma, NULL);
vmg.middle = vma;
for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
vm_flags_t special_flag = special_flags[i];
vma_left->__vm_flags = vm_flags | special_flag;
vmg.vm_flags = vm_flags | special_flag;
vma = merge_existing(&vmg);
ASSERT_EQ(vma, NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
}
cleanup_mm(&mm, &vmi);
return true;
}
static bool test_vma_merge_with_close(void)
{
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
.mm = &mm,
.vmi = &vmi,
};
const struct vm_operations_struct vm_ops = {
.close = dummy_close,
};
struct vm_area_struct *vma_prev, *vma_next, *vma;
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
vma_next->vm_ops = &vm_ops;
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
ASSERT_EQ(merge_new(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
ASSERT_EQ(vma_prev->vm_start, 0);
ASSERT_EQ(vma_prev->vm_end, 0x5000);
ASSERT_EQ(vma_prev->vm_pgoff, 0);
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
vma->vm_ops = &vm_ops;
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
vma->vm_ops = &vm_ops;
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
vma->vm_ops = &vm_ops;
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
vma_next->vm_ops = &vm_ops;
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
ASSERT_EQ(vma_prev->vm_start, 0);
ASSERT_EQ(vma_prev->vm_end, 0x5000);
ASSERT_EQ(vma_prev->vm_pgoff, 0);
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
return true;
}
static bool test_vma_merge_new_with_close(void)
{
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
.mm = &mm,
.vmi = &vmi,
};
struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, vm_flags);
const struct vm_operations_struct vm_ops = {
.close = dummy_close,
};
struct vm_area_struct *vma;
vma_prev->vm_ops = &vm_ops;
vma_next->vm_ops = &vm_ops;
vmg_set_range(&vmg, 0x2000, 0x5000, 2, vm_flags);
vma = merge_new(&vmg);
ASSERT_NE(vma, NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
ASSERT_EQ(vma->vm_start, 0);
ASSERT_EQ(vma->vm_end, 0x5000);
ASSERT_EQ(vma->vm_pgoff, 0);
ASSERT_EQ(vma->vm_ops, &vm_ops);
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 2);
cleanup_mm(&mm, &vmi);
return true;
}
static bool test_merge_existing(void)
{
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vm_area_struct *vma, *vma_prev, *vma_next;
struct vma_merge_struct vmg = {
.mm = &mm,
.vmi = &vmi,
};
const struct vm_operations_struct vm_ops = {
.close = dummy_close,
};
struct anon_vma_chain avc = {};
vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vm_flags);
vma->vm_ops = &vm_ops;
vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, vm_flags);
vma_next->vm_ops = &vm_ops;
vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vm_flags, &dummy_anon_vma);
vmg.middle = vma;
vmg.prev = vma;
vma_set_dummy_anon_vma(vma, &avc);
ASSERT_EQ(merge_existing(&vmg), vma_next);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
ASSERT_EQ(vma_next->vm_start, 0x3000);
ASSERT_EQ(vma_next->vm_end, 0x9000);
ASSERT_EQ(vma_next->vm_pgoff, 3);
ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
ASSERT_EQ(vma->vm_start, 0x2000);
ASSERT_EQ(vma->vm_end, 0x3000);
ASSERT_EQ(vma->vm_pgoff, 2);
ASSERT_TRUE(vma_write_started(vma));
ASSERT_TRUE(vma_write_started(vma_next));
ASSERT_EQ(mm.map_count, 2);
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vm_flags);
vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, vm_flags);
vma_next->vm_ops = &vm_ops;
vmg_set_range_anon_vma(&vmg, 0x2000, 0x6000, 2, vm_flags, &dummy_anon_vma);
vmg.middle = vma;
vma_set_dummy_anon_vma(vma, &avc);
ASSERT_EQ(merge_existing(&vmg), vma_next);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
ASSERT_EQ(vma_next->vm_start, 0x2000);
ASSERT_EQ(vma_next->vm_end, 0x9000);
ASSERT_EQ(vma_next->vm_pgoff, 2);
ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
ASSERT_TRUE(vma_write_started(vma_next));
ASSERT_EQ(mm.map_count, 1);
ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
vma_prev->vm_ops = &vm_ops;
vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
vma->vm_ops = &vm_ops;
vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vm_flags, &dummy_anon_vma);
vmg.prev = vma_prev;
vmg.middle = vma;
vma_set_dummy_anon_vma(vma, &avc);
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
ASSERT_EQ(vma_prev->vm_start, 0);
ASSERT_EQ(vma_prev->vm_end, 0x6000);
ASSERT_EQ(vma_prev->vm_pgoff, 0);
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
ASSERT_EQ(vma->vm_start, 0x6000);
ASSERT_EQ(vma->vm_end, 0x7000);
ASSERT_EQ(vma->vm_pgoff, 6);
ASSERT_TRUE(vma_write_started(vma_prev));
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 2);
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
vma_prev->vm_ops = &vm_ops;
vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, &dummy_anon_vma);
vmg.prev = vma_prev;
vmg.middle = vma;
vma_set_dummy_anon_vma(vma, &avc);
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
ASSERT_EQ(vma_prev->vm_start, 0);
ASSERT_EQ(vma_prev->vm_end, 0x7000);
ASSERT_EQ(vma_prev->vm_pgoff, 0);
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
ASSERT_TRUE(vma_write_started(vma_prev));
ASSERT_EQ(mm.map_count, 1);
ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
vma_prev->vm_ops = &vm_ops;
vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags);
vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, &dummy_anon_vma);
vmg.prev = vma_prev;
vmg.middle = vma;
vma_set_dummy_anon_vma(vma, &avc);
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
ASSERT_EQ(vma_prev->vm_start, 0);
ASSERT_EQ(vma_prev->vm_end, 0x9000);
ASSERT_EQ(vma_prev->vm_pgoff, 0);
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
ASSERT_TRUE(vma_write_started(vma_prev));
ASSERT_EQ(mm.map_count, 1);
ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vm_flags);
vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, vm_flags);
vmg_set_range(&vmg, 0x4000, 0x5000, 4, vm_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
vmg_set_range(&vmg, 0x5000, 0x6000, 5, vm_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
vmg_set_range(&vmg, 0x6000, 0x7000, 6, vm_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
vmg_set_range(&vmg, 0x4000, 0x7000, 4, vm_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
vmg_set_range(&vmg, 0x4000, 0x6000, 4, vm_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
vmg_set_range(&vmg, 0x5000, 0x6000, 5, vm_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
return true;
}
static bool test_anon_vma_non_mergeable(void)
{
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vm_area_struct *vma, *vma_prev, *vma_next;
struct vma_merge_struct vmg = {
.mm = &mm,
.vmi = &vmi,
};
struct anon_vma_chain dummy_anon_vma_chain_1 = {};
struct anon_vma_chain dummy_anon_vma_chain_2 = {};
struct anon_vma dummy_anon_vma_2;
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags);
vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, NULL);
vmg.prev = vma_prev;
vmg.middle = vma;
vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
__vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2);
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
ASSERT_EQ(vma_prev->vm_start, 0);
ASSERT_EQ(vma_prev->vm_end, 0x7000);
ASSERT_EQ(vma_prev->vm_pgoff, 0);
ASSERT_TRUE(vma_write_started(vma_prev));
ASSERT_FALSE(vma_write_started(vma_next));
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags);
vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, NULL);
vmg.prev = vma_prev;
vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
__vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2);
vmg.anon_vma = NULL;
ASSERT_EQ(merge_new(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
ASSERT_EQ(vma_prev->vm_start, 0);
ASSERT_EQ(vma_prev->vm_end, 0x7000);
ASSERT_EQ(vma_prev->vm_pgoff, 0);
ASSERT_TRUE(vma_write_started(vma_prev));
ASSERT_FALSE(vma_write_started(vma_next));
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
return true;
}
static bool test_dup_anon_vma(void)
{
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
.mm = &mm,
.vmi = &vmi,
};
struct anon_vma_chain dummy_anon_vma_chain = {
.anon_vma = &dummy_anon_vma,
};
struct vm_area_struct *vma_prev, *vma_next, *vma;
reset_dummy_anon_vma();
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
vma_next->anon_vma = &dummy_anon_vma;
vmg_set_range(&vmg, 0, 0x5000, 0, vm_flags);
vmg.target = vma_prev;
vmg.next = vma_next;
ASSERT_EQ(expand_existing(&vmg), 0);
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
cleanup_mm(&mm, &vmi);
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags);
INIT_LIST_HEAD(&vma_next->anon_vma_chain);
list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
vma_next->anon_vma = &dummy_anon_vma;
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
ASSERT_EQ(vma_prev->vm_start, 0);
ASSERT_EQ(vma_prev->vm_end, 0x8000);
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
cleanup_mm(&mm, &vmi);
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags);
vmg.anon_vma = &dummy_anon_vma;
vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
ASSERT_EQ(vma_prev->vm_start, 0);
ASSERT_EQ(vma_prev->vm_end, 0x8000);
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
cleanup_mm(&mm, &vmi);
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vm_flags);
vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
vmg.prev = vma_prev;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
ASSERT_EQ(vma_prev->vm_start, 0);
ASSERT_EQ(vma_prev->vm_end, 0x5000);
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
cleanup_mm(&mm, &vmi);
vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, vm_flags);
vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags);
vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
vmg.prev = vma;
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), vma_next);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
ASSERT_EQ(vma_next->vm_start, 0x3000);
ASSERT_EQ(vma_next->vm_end, 0x8000);
ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
ASSERT_TRUE(vma_next->anon_vma->was_cloned);
cleanup_mm(&mm, &vmi);
return true;
}
static bool test_vmi_prealloc_fail(void)
{
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
.mm = &mm,
.vmi = &vmi,
};
struct anon_vma_chain avc = {};
struct vm_area_struct *vma_prev, *vma;
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
vma->anon_vma = &dummy_anon_vma;
vmg_set_range_anon_vma(&vmg, 0x3000, 0x5000, 3, vm_flags, &dummy_anon_vma);
vmg.prev = vma_prev;
vmg.middle = vma;
vma_set_dummy_anon_vma(vma, &avc);
fail_prealloc = true;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
ASSERT_TRUE(dummy_anon_vma.was_cloned);
ASSERT_TRUE(dummy_anon_vma.was_unlinked);
cleanup_mm(&mm, &vmi);
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
vma->anon_vma = &dummy_anon_vma;
vmg_set_range(&vmg, 0, 0x5000, 3, vm_flags);
vmg.target = vma_prev;
vmg.next = vma;
fail_prealloc = true;
ASSERT_EQ(expand_existing(&vmg), -ENOMEM);
ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
ASSERT_TRUE(dummy_anon_vma.was_cloned);
ASSERT_TRUE(dummy_anon_vma.was_unlinked);
cleanup_mm(&mm, &vmi);
return true;
}
static bool test_merge_extend(void)
{
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0x1000);
struct vm_area_struct *vma;
vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, vm_flags);
alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags);
ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma);
ASSERT_EQ(vma->vm_start, 0);
ASSERT_EQ(vma->vm_end, 0x4000);
ASSERT_EQ(vma->vm_pgoff, 0);
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(mm.map_count, 1);
cleanup_mm(&mm, &vmi);
return true;
}
static bool test_copy_vma(void)
{
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
struct mm_struct mm = {};
bool need_locks = false;
VMA_ITERATOR(vmi, &mm, 0);
struct vm_area_struct *vma, *vma_new, *vma_next;
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks);
ASSERT_NE(vma_new, vma);
ASSERT_EQ(vma_new->vm_start, 0);
ASSERT_EQ(vma_new->vm_end, 0x2000);
ASSERT_EQ(vma_new->vm_pgoff, 0);
vma_assert_attached(vma_new);
cleanup_mm(&mm, &vmi);
vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, vm_flags);
vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks);
vma_assert_attached(vma_new);
ASSERT_EQ(vma_new, vma_next);
cleanup_mm(&mm, &vmi);
return true;
}
static bool test_expand_only_mode(void)
{
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
struct mm_struct mm = {};
VMA_ITERATOR(vmi, &mm, 0);
struct vm_area_struct *vma_prev, *vma;
VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, vm_flags, 5);
alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
vma_iter_set(&vmi, 0x3000);
vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
vmg.prev = vma_prev;
vmg.just_expand = true;
vma = vma_merge_new_range(&vmg);
ASSERT_NE(vma, NULL);
ASSERT_EQ(vma, vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
ASSERT_EQ(vma->vm_start, 0x3000);
ASSERT_EQ(vma->vm_end, 0x9000);
ASSERT_EQ(vma->vm_pgoff, 3);
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(vma_iter_addr(&vmi), 0x3000);
vma_assert_attached(vma);
cleanup_mm(&mm, &vmi);
return true;
}
static bool test_mmap_region_basic(void)
{
struct mm_struct mm = {};
unsigned long addr;
struct vm_area_struct *vma;
VMA_ITERATOR(vmi, &mm, 0);
current->mm = &mm;
addr = __mmap_region(NULL, 0x300000, 0x3000,
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
0x300, NULL);
ASSERT_EQ(addr, 0x300000);
addr = __mmap_region(NULL, 0x250000, 0x3000,
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
0x250, NULL);
ASSERT_EQ(addr, 0x250000);
addr = __mmap_region(NULL, 0x303000, 0x3000,
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
0x303, NULL);
ASSERT_EQ(addr, 0x303000);
addr = __mmap_region(NULL, 0x24d000, 0x3000,
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
0x24d, NULL);
ASSERT_EQ(addr, 0x24d000);
ASSERT_EQ(mm.map_count, 2);
for_each_vma(vmi, vma) {
if (vma->vm_start == 0x300000) {
ASSERT_EQ(vma->vm_end, 0x306000);
ASSERT_EQ(vma->vm_pgoff, 0x300);
} else if (vma->vm_start == 0x24d000) {
ASSERT_EQ(vma->vm_end, 0x253000);
ASSERT_EQ(vma->vm_pgoff, 0x24d);
} else {
ASSERT_FALSE(true);
}
}
cleanup_mm(&mm, &vmi);
return true;
}
int main(void)
{
int num_tests = 0, num_fail = 0;
maple_tree_init();
vma_state_init();
#define TEST(name) \
do { \
num_tests++; \
if (!test_##name()) { \
num_fail++; \
fprintf(stderr, "Test " #name " FAILED\n"); \
} \
} while (0)
TEST(simple_merge);
TEST(simple_modify);
TEST(simple_expand);
TEST(simple_shrink);
TEST(merge_new);
TEST(vma_merge_special_flags);
TEST(vma_merge_with_close);
TEST(vma_merge_new_with_close);
TEST(merge_existing);
TEST(anon_vma_non_mergeable);
TEST(dup_anon_vma);
TEST(vmi_prealloc_fail);
TEST(merge_extend);
TEST(copy_vma);
TEST(expand_only_mode);
TEST(mmap_region_basic);
#undef TEST
printf("%d tests run, %d passed, %d failed.\n",
num_tests, num_tests - num_fail, num_fail);
return num_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
}