#ifndef _VM_MAP_
#define _VM_MAP_
#include <sys/lock.h>
#include <sys/sx.h>
#include <sys/_mutex.h>
typedef u_int vm_eflags_t;
union vm_map_object {
struct vm_object *vm_object;
struct vm_map *sub_map;
};
struct vm_map_entry {
struct vm_map_entry *left;
struct vm_map_entry *right;
vm_offset_t start;
vm_offset_t end;
vm_offset_t next_read;
vm_size_t max_free;
union vm_map_object object;
vm_ooffset_t offset;
vm_eflags_t eflags;
vm_prot_t protection;
vm_prot_t max_protection;
vm_inherit_t inheritance;
uint8_t read_ahead;
int wired_count;
struct ucred *cred;
struct thread *wiring_thread;
};
#define MAP_ENTRY_NOSYNC 0x00000001
#define MAP_ENTRY_IS_SUB_MAP 0x00000002
#define MAP_ENTRY_COW 0x00000004
#define MAP_ENTRY_NEEDS_COPY 0x00000008
#define MAP_ENTRY_NOFAULT 0x00000010
#define MAP_ENTRY_USER_WIRED 0x00000020
#define MAP_ENTRY_BEHAV_NORMAL 0x00000000
#define MAP_ENTRY_BEHAV_SEQUENTIAL 0x00000040
#define MAP_ENTRY_BEHAV_RANDOM 0x00000080
#define MAP_ENTRY_BEHAV_RESERVED 0x000000c0
#define MAP_ENTRY_BEHAV_MASK 0x000000c0
#define MAP_ENTRY_IN_TRANSITION 0x00000100
#define MAP_ENTRY_NEEDS_WAKEUP 0x00000200
#define MAP_ENTRY_NOCOREDUMP 0x00000400
#define MAP_ENTRY_VN_EXEC 0x00000800
#define MAP_ENTRY_GROWS_DOWN 0x00001000
#define MAP_ENTRY_UNUSED0 0x00002000
#define MAP_ENTRY_WIRE_SKIPPED 0x00004000
#define MAP_ENTRY_WRITECNT 0x00008000
#define MAP_ENTRY_GUARD 0x00010000
#define MAP_ENTRY_STACK_GAP 0x00020000
#define MAP_ENTRY_UNUSED1 0x00040000
#define MAP_ENTRY_HEADER 0x00080000
#define MAP_ENTRY_SPLIT_BOUNDARY_MASK 0x00300000
#define MAP_ENTRY_SPLIT_BOUNDARY_SHIFT 20
#define MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry) \
(((entry)->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) >> \
MAP_ENTRY_SPLIT_BOUNDARY_SHIFT)
#ifdef _KERNEL
static __inline u_char
vm_map_entry_behavior(vm_map_entry_t entry)
{
return (entry->eflags & MAP_ENTRY_BEHAV_MASK);
}
static __inline int
vm_map_entry_user_wired_count(vm_map_entry_t entry)
{
if (entry->eflags & MAP_ENTRY_USER_WIRED)
return (1);
return (0);
}
static __inline int
vm_map_entry_system_wired_count(vm_map_entry_t entry)
{
return (entry->wired_count - vm_map_entry_user_wired_count(entry));
}
#endif
struct vm_map {
struct vm_map_entry header;
union {
struct sx lock;
struct mtx system_mtx;
};
int nentries;
vm_size_t size;
u_int timestamp;
u_int flags;
vm_map_entry_t root;
pmap_t pmap;
vm_offset_t anon_loc;
int busy;
#ifdef DIAGNOSTIC
int nupdates;
#endif
};
#define MAP_WIREFUTURE 0x00000001
#define MAP_BUSY_WAKEUP 0x00000002
#define MAP_IS_SUB_MAP 0x00000004
#define MAP_ASLR 0x00000008
#define MAP_ASLR_IGNSTART 0x00000010
#define MAP_REPLENISH 0x00000020
#define MAP_WXORX 0x00000040
#define MAP_ASLR_STACK 0x00000080
#define MAP_NEEDS_WAKEUP 0x40000000
#define MAP_SYSTEM_MAP 0x80000000
#ifdef _KERNEL
#if defined(KLD_MODULE) && !defined(KLD_TIED)
#define vm_map_max(map) vm_map_max_KBI((map))
#define vm_map_min(map) vm_map_min_KBI((map))
#define vm_map_pmap(map) vm_map_pmap_KBI((map))
#define vm_map_range_valid(map, start, end) \
vm_map_range_valid_KBI((map), (start), (end))
#else
static __inline vm_offset_t
vm_map_max(const struct vm_map *map)
{
return (map->header.start);
}
static __inline vm_offset_t
vm_map_min(const struct vm_map *map)
{
return (map->header.end);
}
static __inline pmap_t
vm_map_pmap(vm_map_t map)
{
return (map->pmap);
}
static __inline void
vm_map_modflags(vm_map_t map, u_int set, u_int clear)
{
map->flags = (map->flags | set) & ~clear;
}
static inline bool
vm_map_range_valid(vm_map_t map, vm_offset_t start, vm_offset_t end)
{
if (end < start)
return (false);
if (start < vm_map_min(map) || end > vm_map_max(map))
return (false);
return (true);
}
static inline bool
vm_map_is_system(vm_map_t map)
{
return ((map->flags & MAP_SYSTEM_MAP) != 0);
}
#endif
#endif
struct vmspace {
struct vm_map vm_map;
struct shmmap_state *vm_shm;
segsz_t vm_swrss;
segsz_t vm_tsize;
segsz_t vm_dsize;
segsz_t vm_ssize;
caddr_t vm_taddr;
caddr_t vm_daddr;
caddr_t vm_maxsaddr;
vm_offset_t vm_stacktop;
vm_offset_t vm_shp_base;
u_int vm_refcnt;
struct pmap vm_pmap;
};
#ifdef _KERNEL
static __inline pmap_t
vmspace_pmap(struct vmspace *vmspace)
{
return &vmspace->vm_pmap;
}
#endif
#ifdef _KERNEL
void _vm_map_lock(vm_map_t map, const char *file, int line);
void _vm_map_unlock(vm_map_t map, const char *file, int line);
int _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line);
void _vm_map_lock_read(vm_map_t map, const char *file, int line);
void _vm_map_unlock_read(vm_map_t map, const char *file, int line);
int _vm_map_trylock(vm_map_t map, const char *file, int line);
int _vm_map_trylock_read(vm_map_t map, const char *file, int line);
int _vm_map_lock_upgrade(vm_map_t map, const char *file, int line);
void _vm_map_lock_downgrade(vm_map_t map, const char *file, int line);
int vm_map_locked(vm_map_t map);
void vm_map_wakeup(vm_map_t map);
void vm_map_busy(vm_map_t map);
void vm_map_unbusy(vm_map_t map);
void vm_map_wait_busy(vm_map_t map);
vm_offset_t vm_map_max_KBI(const struct vm_map *map);
vm_offset_t vm_map_min_KBI(const struct vm_map *map);
pmap_t vm_map_pmap_KBI(vm_map_t map);
bool vm_map_range_valid_KBI(vm_map_t map, vm_offset_t start, vm_offset_t end);
#define vm_map_lock(map) _vm_map_lock(map, LOCK_FILE, LOCK_LINE)
#define vm_map_unlock(map) _vm_map_unlock(map, LOCK_FILE, LOCK_LINE)
#define vm_map_unlock_and_wait(map, timo) \
_vm_map_unlock_and_wait(map, timo, LOCK_FILE, LOCK_LINE)
#define vm_map_lock_read(map) _vm_map_lock_read(map, LOCK_FILE, LOCK_LINE)
#define vm_map_unlock_read(map) _vm_map_unlock_read(map, LOCK_FILE, LOCK_LINE)
#define vm_map_trylock(map) _vm_map_trylock(map, LOCK_FILE, LOCK_LINE)
#define vm_map_trylock_read(map) \
_vm_map_trylock_read(map, LOCK_FILE, LOCK_LINE)
#define vm_map_lock_upgrade(map) \
_vm_map_lock_upgrade(map, LOCK_FILE, LOCK_LINE)
#define vm_map_lock_downgrade(map) \
_vm_map_lock_downgrade(map, LOCK_FILE, LOCK_LINE)
long vmspace_resident_count(struct vmspace *vmspace);
#endif
#define MAP_INHERIT_SHARE 0x00000001
#define MAP_COPY_ON_WRITE 0x00000002
#define MAP_NOFAULT 0x00000004
#define MAP_PREFAULT 0x00000008
#define MAP_PREFAULT_PARTIAL 0x00000010
#define MAP_DISABLE_SYNCER 0x00000020
#define MAP_CHECK_EXCL 0x00000040
#define MAP_CREATE_GUARD 0x00000080
#define MAP_DISABLE_COREDUMP 0x00000100
#define MAP_PREFAULT_MADVISE 0x00000200
#define MAP_WRITECOUNT 0x00000400
#define MAP_REMAP 0x00000800
#define MAP_STACK_AREA 0x00001000
#define MAP_COW_UNUSED0 0x00002000
#define MAP_ACC_CHARGED 0x00004000
#define MAP_ACC_NO_CHARGE 0x00008000
#define MAP_COW_UNUSED1 0x00010000
#define MAP_CREATE_STACK_GAP 0x00020000
#define MAP_VN_EXEC 0x00040000
#define MAP_SPLIT_BOUNDARY_MASK 0x00180000
#define MAP_NO_HINT 0x00200000
#define MAP_SPLIT_BOUNDARY_SHIFT 19
#define VM_FAULT_NORMAL 0x00
#define VM_FAULT_WIRE 0x01
#define VM_FAULT_DIRTY 0x02
#define VM_FAULT_NOFILL 0x04
#define VM_FAULT_READ_AHEAD_MIN 7
#define VM_FAULT_READ_AHEAD_INIT 15
#define VM_FAULT_READ_AHEAD_MAX min(atop(maxphys) - 1, UINT8_MAX)
#define VMFS_NO_SPACE 0
#define VMFS_ANY_SPACE 1
#define VMFS_OPTIMAL_SPACE 2
#define VMFS_SUPER_SPACE 3
#define VMFS_ALIGNED_SPACE(x) ((x) << 8)
#define VM_MAP_WIRE_SYSTEM 0
#define VM_MAP_WIRE_USER 1
#define VM_MAP_WIRE_NOHOLES 0
#define VM_MAP_WIRE_HOLESOK 2
#define VM_MAP_WIRE_WRITE 4
typedef int vm_map_entry_reader(void *token, vm_map_entry_t addr,
vm_map_entry_t dest);
#ifndef _KERNEL
static inline vm_map_entry_t
vm_map_entry_read_succ(void *token, struct vm_map_entry *const clone,
vm_map_entry_reader reader)
{
vm_map_entry_t after, backup;
vm_offset_t start;
after = clone->right;
start = clone->start;
if (!reader(token, after, clone))
return (NULL);
backup = clone->left;
if (!reader(token, backup, clone))
return (NULL);
if (clone->start > start) {
do {
after = backup;
backup = clone->left;
if (!reader(token, backup, clone))
return (NULL);
} while (clone->start != start);
}
if (!reader(token, after, clone))
return (NULL);
return (after);
}
#endif
#ifdef _KERNEL
boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t);
int vm_map_delete(vm_map_t, vm_offset_t, vm_offset_t);
int vm_map_find(vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t,
vm_offset_t, int, vm_prot_t, vm_prot_t, int);
int vm_map_find_locked(vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *,
vm_size_t, vm_offset_t, int, vm_prot_t, vm_prot_t, int);
int vm_map_find_min(vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *,
vm_size_t, vm_offset_t, vm_offset_t, int, vm_prot_t, vm_prot_t, int);
int vm_map_find_aligned(vm_map_t map, vm_offset_t *addr, vm_size_t length,
vm_offset_t max_addr, vm_offset_t alignment);
int vm_map_fixed(vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_size_t,
vm_prot_t, vm_prot_t, int);
vm_offset_t vm_map_findspace(vm_map_t, vm_offset_t, vm_size_t);
int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t);
void vm_map_init(vm_map_t, pmap_t, vm_offset_t, vm_offset_t);
void vm_map_init_system(vm_map_t, pmap_t, vm_offset_t, vm_offset_t);
int vm_map_insert (vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_prot_t, vm_prot_t, int);
int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
vm_pindex_t *, vm_prot_t *, boolean_t *);
int vm_map_lookup_locked(vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
vm_pindex_t *, vm_prot_t *, boolean_t *);
void vm_map_lookup_done (vm_map_t, vm_map_entry_t);
boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
static inline vm_map_entry_t
vm_map_entry_first(vm_map_t map)
{
return (map->header.right);
}
static inline vm_map_entry_t
vm_map_entry_succ(vm_map_entry_t entry)
{
vm_map_entry_t after;
after = entry->right;
if (after->left->start > entry->start) {
do
after = after->left;
while (after->left != entry);
}
return (after);
}
#define VM_MAP_ENTRY_FOREACH(it, map) \
for ((it) = vm_map_entry_first(map); \
(it) != &(map)->header; \
(it) = vm_map_entry_succ(it))
#define VM_MAP_PROTECT_SET_PROT 0x0001
#define VM_MAP_PROTECT_SET_MAXPROT 0x0002
#define VM_MAP_PROTECT_GROWSDOWN 0x0004
int vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
vm_prot_t new_prot, vm_prot_t new_maxprot, int flags);
int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
vm_map_entry_t vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev,
vm_map_entry_t entry);
void vm_map_startup (void);
int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t);
int vm_map_sync(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int);
int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int);
int vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
int flags);
int vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags);
int vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end,
int flags);
long vmspace_swap_count(struct vmspace *vmspace);
void vm_map_entry_set_vnode_text(vm_map_entry_t entry, bool add);
#endif
#endif