#ifndef __MM_INTERNAL_H
#define __MM_INTERNAL_H
#include <linux/fs.h>
#include <linux/khugepaged.h>
#include <linux/mm.h>
#include <linux/mm_inline.h>
#include <linux/pagemap.h>
#include <linux/pagewalk.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/swap_cgroup.h>
#include <linux/tracepoint-defs.h>
#include "vma.h"
struct folio_batch;
struct pagetable_move_control {
struct vm_area_struct *old;
struct vm_area_struct *new;
unsigned long old_addr;
unsigned long old_end;
unsigned long new_addr;
unsigned long len_in;
bool need_rmap_locks;
bool for_stack;
};
#define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_) \
struct pagetable_move_control name = { \
.old = old_, \
.new = new_, \
.old_addr = old_addr_, \
.old_end = (old_addr_) + (len_), \
.new_addr = new_addr_, \
.len_in = len_, \
}
#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
__GFP_NOLOCKDEP)
#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
#define WARN_ON_ONCE_GFP(cond, gfp) ({ \
static bool __section(".data..once") __warned; \
int __ret_warn_once = !!(cond); \
\
if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
__warned = true; \
WARN_ON(1); \
} \
unlikely(__ret_warn_once); \
})
void page_writeback_init(void);
#define ENTIRELY_MAPPED 0x800000
#define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1)
#define SHOW_MEM_FILTER_NODES (0x0001u)
static inline int folio_nr_pages_mapped(const struct folio *folio)
{
if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT))
return -1;
return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
}
static inline swp_entry_t folio_swap(swp_entry_t entry,
const struct folio *folio)
{
swp_entry_t swap = {
.val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)),
};
return swap;
}
static inline void *folio_raw_mapping(const struct folio *folio)
{
unsigned long mapping = (unsigned long)folio->mapping;
return (void *)(mapping & ~FOLIO_MAPPING_FLAGS);
}
static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
{
int err = vfs_mmap(file, vma);
if (likely(!err))
return 0;
vma->vm_ops = &vma_dummy_vm_ops;
return err;
}
static inline void vma_close(struct vm_area_struct *vma)
{
if (vma->vm_ops && vma->vm_ops->close) {
vma->vm_ops->close(vma);
vma->vm_ops = &vma_dummy_vm_ops;
}
}
#ifdef CONFIG_MMU
typedef int __bitwise fpb_t;
#define FPB_RESPECT_DIRTY ((__force fpb_t)BIT(0))
#define FPB_RESPECT_SOFT_DIRTY ((__force fpb_t)BIT(1))
#define FPB_RESPECT_WRITE ((__force fpb_t)BIT(2))
#define FPB_MERGE_WRITE ((__force fpb_t)BIT(3))
#define FPB_MERGE_YOUNG_DIRTY ((__force fpb_t)BIT(4))
static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
{
if (!(flags & FPB_RESPECT_DIRTY))
pte = pte_mkclean(pte);
if (likely(!(flags & FPB_RESPECT_SOFT_DIRTY)))
pte = pte_clear_soft_dirty(pte);
if (likely(!(flags & FPB_RESPECT_WRITE)))
pte = pte_wrprotect(pte);
return pte_mkold(pte);
}
static inline unsigned int folio_pte_batch_flags(struct folio *folio,
struct vm_area_struct *vma, pte_t *ptep, pte_t *ptentp,
unsigned int max_nr, fpb_t flags)
{
bool any_writable = false, any_young = false, any_dirty = false;
pte_t expected_pte, pte = *ptentp;
unsigned int nr, cur_nr;
VM_WARN_ON_FOLIO(!pte_present(pte), folio);
VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
VM_WARN_ON(virt_addr_valid(ptentp) && PageTable(virt_to_page(ptentp)));
max_nr = min_t(unsigned long, max_nr,
folio_pfn(folio) + folio_nr_pages(folio) - pte_pfn(pte));
nr = pte_batch_hint(ptep, pte);
expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
ptep = ptep + nr;
while (nr < max_nr) {
pte = ptep_get(ptep);
if (!pte_same(__pte_batch_clear_ignored(pte, flags), expected_pte))
break;
if (flags & FPB_MERGE_WRITE)
any_writable |= pte_write(pte);
if (flags & FPB_MERGE_YOUNG_DIRTY) {
any_young |= pte_young(pte);
any_dirty |= pte_dirty(pte);
}
cur_nr = pte_batch_hint(ptep, pte);
expected_pte = pte_advance_pfn(expected_pte, cur_nr);
ptep += cur_nr;
nr += cur_nr;
}
if (any_writable)
*ptentp = pte_mkwrite(*ptentp, vma);
if (any_young)
*ptentp = pte_mkyoung(*ptentp);
if (any_dirty)
*ptentp = pte_mkdirty(*ptentp);
return min(nr, max_nr);
}
unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte,
unsigned int max_nr);
static inline pte_t pte_move_swp_offset(pte_t pte, long delta)
{
swp_entry_t entry = pte_to_swp_entry(pte);
pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry),
(swp_offset(entry) + delta)));
if (pte_swp_soft_dirty(pte))
new = pte_swp_mksoft_dirty(new);
if (pte_swp_exclusive(pte))
new = pte_swp_mkexclusive(new);
if (pte_swp_uffd_wp(pte))
new = pte_swp_mkuffd_wp(new);
return new;
}
static inline pte_t pte_next_swp_offset(pte_t pte)
{
return pte_move_swp_offset(pte, 1);
}
static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
{
pte_t expected_pte = pte_next_swp_offset(pte);
const pte_t *end_ptep = start_ptep + max_nr;
swp_entry_t entry = pte_to_swp_entry(pte);
pte_t *ptep = start_ptep + 1;
unsigned short cgroup_id;
VM_WARN_ON(max_nr < 1);
VM_WARN_ON(!is_swap_pte(pte));
VM_WARN_ON(non_swap_entry(entry));
cgroup_id = lookup_swap_cgroup_id(entry);
while (ptep < end_ptep) {
pte = ptep_get(ptep);
if (!pte_same(pte, expected_pte))
break;
if (lookup_swap_cgroup_id(pte_to_swp_entry(pte)) != cgroup_id)
break;
expected_pte = pte_next_swp_offset(expected_pte);
ptep++;
}
return ptep - start_ptep;
}
#endif
void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
int nr_throttled);
static inline void acct_reclaim_writeback(struct folio *folio)
{
pg_data_t *pgdat = folio_pgdat(folio);
int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
if (nr_throttled)
__acct_reclaim_writeback(pgdat, folio, nr_throttled);
}
static inline void wake_throttle_isolated(pg_data_t *pgdat)
{
wait_queue_head_t *wqh;
wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
if (waitqueue_active(wqh))
wake_up(wqh);
}
vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf);
static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
{
vm_fault_t ret = __vmf_anon_prepare(vmf);
if (unlikely(ret & VM_FAULT_RETRY))
vma_end_read(vmf->vma);
return ret;
}
vm_fault_t do_swap_page(struct vm_fault *vmf);
void folio_rotate_reclaimable(struct folio *folio);
bool __folio_end_writeback(struct folio *folio);
void deactivate_file_folio(struct folio *folio);
void folio_activate(struct folio *folio);
void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
struct vm_area_struct *start_vma, unsigned long floor,
unsigned long ceiling, bool mm_wr_locked);
void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
struct zap_details;
void unmap_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
struct zap_details *details);
void zap_page_range_single_batched(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long addr,
unsigned long size, struct zap_details *details);
int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
gfp_t gfp);
void page_cache_ra_order(struct readahead_control *, struct file_ra_state *);
void force_page_cache_ra(struct readahead_control *, unsigned long nr);
static inline void force_page_cache_readahead(struct address_space *mapping,
struct file *file, pgoff_t index, unsigned long nr_to_read)
{
DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
force_page_cache_ra(&ractl, nr_to_read);
}
unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
void filemap_free_folio(struct address_space *mapping, struct folio *folio);
int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
loff_t end);
long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
unsigned long mapping_try_invalidate(struct address_space *mapping,
pgoff_t start, pgoff_t end, unsigned long *nr_failed);
static inline bool folio_evictable(struct folio *folio)
{
bool ret;
rcu_read_lock();
ret = !mapping_unevictable(folio_mapping(folio)) &&
!folio_test_mlocked(folio);
rcu_read_unlock();
return ret;
}
static inline void set_page_refcounted(struct page *page)
{
VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PAGE(page_ref_count(page), page);
set_page_count(page, 1);
}
static inline bool folio_needs_release(struct folio *folio)
{
struct address_space *mapping = folio_mapping(folio);
return folio_has_private(folio) ||
(mapping && mapping_release_always(mapping));
}
extern unsigned long highest_memmap_pfn;
#define MAX_RECLAIM_RETRIES 16
bool folio_isolate_lru(struct folio *folio);
void folio_putback_lru(struct folio *folio);
extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
#ifdef CONFIG_NUMA
int user_proactive_reclaim(char *buf,
struct mem_cgroup *memcg, pg_data_t *pgdat);
#else
static inline int user_proactive_reclaim(char *buf,
struct mem_cgroup *memcg, pg_data_t *pgdat)
{
return 0;
}
#endif
pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
#define K(x) ((x) << (PAGE_SHIFT-10))
extern char * const zone_names[MAX_NR_ZONES];
DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
extern int min_free_kbytes;
extern int defrag_mode;
void setup_per_zone_wmarks(void);
void calculate_min_free_kbytes(void);
int __meminit init_per_zone_wmark_min(void);
void page_alloc_sysctl_init(void);
struct alloc_context {
struct zonelist *zonelist;
nodemask_t *nodemask;
struct zoneref *preferred_zoneref;
int migratetype;
enum zone_type highest_zoneidx;
bool spread_dirty_pages;
};
static inline unsigned int buddy_order(struct page *page)
{
return page_private(page);
}
#define buddy_order_unsafe(page) READ_ONCE(page_private(page))
static inline bool page_is_buddy(struct page *page, struct page *buddy,
unsigned int order)
{
if (!page_is_guard(buddy) && !PageBuddy(buddy))
return false;
if (buddy_order(buddy) != order)
return false;
if (page_zone_id(page) != page_zone_id(buddy))
return false;
VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
return true;
}
static inline unsigned long
__find_buddy_pfn(unsigned long page_pfn, unsigned int order)
{
return page_pfn ^ (1 << order);
}
static inline struct page *find_buddy_page_pfn(struct page *page,
unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
{
unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
struct page *buddy;
buddy = page + (__buddy_pfn - pfn);
if (buddy_pfn)
*buddy_pfn = __buddy_pfn;
if (page_is_buddy(page, buddy, order))
return buddy;
return NULL;
}
extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
unsigned long end_pfn, struct zone *zone);
static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
unsigned long end_pfn, struct zone *zone)
{
if (zone->contiguous)
return pfn_to_page(start_pfn);
return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
}
void set_zone_contiguous(struct zone *zone);
bool pfn_range_intersects_zones(int nid, unsigned long start_pfn,
unsigned long nr_pages);
static inline void clear_zone_contiguous(struct zone *zone)
{
zone->contiguous = false;
}
extern int __isolate_free_page(struct page *page, unsigned int order);
extern void __putback_isolated_page(struct page *page, unsigned int order,
int mt);
extern void memblock_free_pages(struct page *page, unsigned long pfn,
unsigned int order);
extern void __free_pages_core(struct page *page, unsigned int order,
enum meminit_context context);
static inline void folio_set_order(struct folio *folio, unsigned int order)
{
if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
return;
folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
#ifdef NR_PAGES_IN_LARGE_FOLIO
folio->_nr_pages = 1U << order;
#endif
}
bool __folio_unqueue_deferred_split(struct folio *folio);
static inline bool folio_unqueue_deferred_split(struct folio *folio)
{
if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
return false;
if (data_race(list_empty(&folio->_deferred_list)))
return false;
return __folio_unqueue_deferred_split(folio);
}
static inline struct folio *page_rmappable_folio(struct page *page)
{
struct folio *folio = (struct folio *)page;
if (folio && folio_test_large(folio))
folio_set_large_rmappable(folio);
return folio;
}
static inline void prep_compound_head(struct page *page, unsigned int order)
{
struct folio *folio = (struct folio *)page;
folio_set_order(folio, order);
atomic_set(&folio->_large_mapcount, -1);
if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
atomic_set(&folio->_nr_pages_mapped, 0);
if (IS_ENABLED(CONFIG_MM_ID)) {
folio->_mm_ids = 0;
folio->_mm_id_mapcount[0] = -1;
folio->_mm_id_mapcount[1] = -1;
}
if (IS_ENABLED(CONFIG_64BIT) || order > 1) {
atomic_set(&folio->_pincount, 0);
atomic_set(&folio->_entire_mapcount, -1);
}
if (order > 1)
INIT_LIST_HEAD(&folio->_deferred_list);
}
static inline void prep_compound_tail(struct page *head, int tail_idx)
{
struct page *p = head + tail_idx;
p->mapping = TAIL_MAPPING;
set_compound_head(p, head);
set_page_private(p, 0);
}
void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags);
extern bool free_pages_prepare(struct page *page, unsigned int order);
extern int user_min_free_kbytes;
struct page *__alloc_frozen_pages_noprof(gfp_t, unsigned int order, int nid,
nodemask_t *);
#define __alloc_frozen_pages(...) \
alloc_hooks(__alloc_frozen_pages_noprof(__VA_ARGS__))
void free_frozen_pages(struct page *page, unsigned int order);
void free_unref_folios(struct folio_batch *fbatch);
#ifdef CONFIG_NUMA
struct page *alloc_frozen_pages_noprof(gfp_t, unsigned int order);
#else
static inline struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order)
{
return __alloc_frozen_pages_noprof(gfp, order, numa_node_id(), NULL);
}
#endif
#define alloc_frozen_pages(...) \
alloc_hooks(alloc_frozen_pages_noprof(__VA_ARGS__))
extern void zone_pcp_reset(struct zone *zone);
extern void zone_pcp_disable(struct zone *zone);
extern void zone_pcp_enable(struct zone *zone);
extern void zone_pcp_init(struct zone *zone);
extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr,
int nid, bool exact_nid);
void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
unsigned long, enum meminit_context, struct vmem_altmap *, int,
bool);
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
struct compact_control {
struct list_head freepages[NR_PAGE_ORDERS];
struct list_head migratepages;
unsigned int nr_freepages;
unsigned int nr_migratepages;
unsigned long free_pfn;
unsigned long migrate_pfn;
unsigned long fast_start_pfn;
struct zone *zone;
unsigned long total_migrate_scanned;
unsigned long total_free_scanned;
unsigned short fast_search_fail;
short search_order;
const gfp_t gfp_mask;
int order;
int migratetype;
const unsigned int alloc_flags;
const int highest_zoneidx;
enum migrate_mode mode;
bool ignore_skip_hint;
bool no_set_skip_hint;
bool ignore_block_suitable;
bool direct_compaction;
bool proactive_compaction;
bool whole_zone;
bool contended;
bool finish_pageblock;
bool alloc_contig;
};
struct capture_control {
struct compact_control *cc;
struct page *page;
};
unsigned long
isolate_freepages_range(struct compact_control *cc,
unsigned long start_pfn, unsigned long end_pfn);
int
isolate_migratepages_range(struct compact_control *cc,
unsigned long low_pfn, unsigned long end_pfn);
void init_cma_reserved_pageblock(struct page *page);
#endif
struct cma;
#ifdef CONFIG_CMA
void *cma_reserve_early(struct cma *cma, unsigned long size);
void init_cma_pageblock(struct page *page);
#else
static inline void *cma_reserve_early(struct cma *cma, unsigned long size)
{
return NULL;
}
static inline void init_cma_pageblock(struct page *page)
{
}
#endif
int find_suitable_fallback(struct free_area *area, unsigned int order,
int migratetype, bool claimable);
static inline bool free_area_empty(struct free_area *area, int migratetype)
{
return list_empty(&area->free_list[migratetype]);
}
struct anon_vma *folio_anon_vma(const struct folio *folio);
#ifdef CONFIG_MMU
void unmap_mapping_folio(struct folio *folio);
extern long populate_vma_page_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, int *locked);
extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
unsigned long end, bool write, int *locked);
extern bool mlock_future_ok(struct mm_struct *mm, vm_flags_t vm_flags,
unsigned long bytes);
static inline bool
folio_within_range(struct folio *folio, struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
pgoff_t pgoff, addr;
unsigned long vma_pglen = vma_pages(vma);
VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio);
if (start > end)
return false;
if (start < vma->vm_start)
start = vma->vm_start;
if (end > vma->vm_end)
end = vma->vm_end;
pgoff = folio_pgoff(folio);
if (!in_range(pgoff, vma->vm_pgoff, vma_pglen))
return false;
addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
return !(addr < start || end - addr < folio_size(folio));
}
static inline bool
folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
{
return folio_within_range(folio, vma, vma->vm_start, vma->vm_end);
}
void mlock_folio(struct folio *folio);
static inline void mlock_vma_folio(struct folio *folio,
struct vm_area_struct *vma)
{
if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED))
mlock_folio(folio);
}
void munlock_folio(struct folio *folio);
static inline void munlock_vma_folio(struct folio *folio,
struct vm_area_struct *vma)
{
if (unlikely(vma->vm_flags & VM_LOCKED))
munlock_folio(folio);
}
void mlock_new_folio(struct folio *folio);
bool need_mlock_drain(int cpu);
void mlock_drain_local(void);
void mlock_drain_remote(int cpu);
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
static inline unsigned long vma_address(const struct vm_area_struct *vma,
pgoff_t pgoff, unsigned long nr_pages)
{
unsigned long address;
if (pgoff >= vma->vm_pgoff) {
address = vma->vm_start +
((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
if (address < vma->vm_start || address >= vma->vm_end)
address = -EFAULT;
} else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
address = vma->vm_start;
} else {
address = -EFAULT;
}
return address;
}
static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
{
struct vm_area_struct *vma = pvmw->vma;
pgoff_t pgoff;
unsigned long address;
if (pvmw->nr_pages == 1)
return pvmw->address + PAGE_SIZE;
pgoff = pvmw->pgoff + pvmw->nr_pages;
address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
if (address < vma->vm_start || address > vma->vm_end)
address = vma->vm_end;
return address;
}
static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
struct file *fpin)
{
int flags = vmf->flags;
if (fpin)
return fpin;
if (fault_flag_allow_retry_first(flags) &&
!(flags & FAULT_FLAG_RETRY_NOWAIT)) {
fpin = get_file(vmf->vma->vm_file);
release_fault_lock(vmf);
}
return fpin;
}
#else
static inline void unmap_mapping_folio(struct folio *folio) { }
static inline void mlock_new_folio(struct folio *folio) { }
static inline bool need_mlock_drain(int cpu) { return false; }
static inline void mlock_drain_local(void) { }
static inline void mlock_drain_remote(int cpu) { }
static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
{
}
#endif
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
DECLARE_STATIC_KEY_TRUE(deferred_pages);
bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
#endif
void init_deferred_page(unsigned long pfn, int nid);
enum mminit_level {
MMINIT_WARNING,
MMINIT_VERIFY,
MMINIT_TRACE
};
#ifdef CONFIG_DEBUG_MEMORY_INIT
extern int mminit_loglevel;
#define mminit_dprintk(level, prefix, fmt, arg...) \
do { \
if (level < mminit_loglevel) { \
if (level <= MMINIT_WARNING) \
pr_warn("mminit::" prefix " " fmt, ##arg); \
else \
printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
} \
} while (0)
extern void mminit_verify_pageflags_layout(void);
extern void mminit_verify_zonelist(void);
#else
static inline void mminit_dprintk(enum mminit_level level,
const char *prefix, const char *fmt, ...)
{
}
static inline void mminit_verify_pageflags_layout(void)
{
}
static inline void mminit_verify_zonelist(void)
{
}
#endif
#define NODE_RECLAIM_NOSCAN -2
#define NODE_RECLAIM_FULL -1
#define NODE_RECLAIM_SOME 0
#define NODE_RECLAIM_SUCCESS 1
#ifdef CONFIG_NUMA
extern int node_reclaim_mode;
extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
extern int find_next_best_node(int node, nodemask_t *used_node_mask);
#else
#define node_reclaim_mode 0
static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
unsigned int order)
{
return NODE_RECLAIM_NOSCAN;
}
static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
{
return NUMA_NO_NODE;
}
#endif
static inline bool node_reclaim_enabled(void)
{
return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
}
#ifdef CONFIG_MEMORY_FAILURE
int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill);
void shake_folio(struct folio *folio);
extern int hwpoison_filter(struct page *p);
extern u32 hwpoison_filter_dev_major;
extern u32 hwpoison_filter_dev_minor;
extern u64 hwpoison_filter_flags_mask;
extern u64 hwpoison_filter_flags_value;
extern u64 hwpoison_filter_memcg;
extern u32 hwpoison_filter_enable;
#define MAGIC_HWPOISON 0x48575053U
void SetPageHWPoisonTakenOff(struct page *page);
void ClearPageHWPoisonTakenOff(struct page *page);
bool take_page_off_buddy(struct page *page);
bool put_page_back_buddy(struct page *page);
struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
struct vm_area_struct *vma, struct list_head *to_kill,
unsigned long ksm_addr);
unsigned long page_mapped_in_vma(const struct page *page,
struct vm_area_struct *vma);
#else
static inline int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill)
{
return -EBUSY;
}
#endif
extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
unsigned long, unsigned long,
unsigned long, unsigned long);
extern void set_pageblock_order(void);
unsigned long reclaim_pages(struct list_head *folio_list);
unsigned int reclaim_clean_pages_from_list(struct zone *zone,
struct list_head *folio_list);
#define ALLOC_WMARK_MIN WMARK_MIN
#define ALLOC_WMARK_LOW WMARK_LOW
#define ALLOC_WMARK_HIGH WMARK_HIGH
#define ALLOC_NO_WATERMARKS 0x04
#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
#ifdef CONFIG_MMU
#define ALLOC_OOM 0x08
#else
#define ALLOC_OOM ALLOC_NO_WATERMARKS
#endif
#define ALLOC_NON_BLOCK 0x10
#define ALLOC_MIN_RESERVE 0x20
#define ALLOC_CPUSET 0x40
#define ALLOC_CMA 0x80
#ifdef CONFIG_ZONE_DMA32
#define ALLOC_NOFRAGMENT 0x100
#else
#define ALLOC_NOFRAGMENT 0x0
#endif
#define ALLOC_HIGHATOMIC 0x200
#define ALLOC_TRYLOCK 0x400
#define ALLOC_KSWAPD 0x800
#define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
enum ttu_flags;
struct tlbflush_unmap_batch;
extern struct workqueue_struct *mm_percpu_wq;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
void try_to_unmap_flush(void);
void try_to_unmap_flush_dirty(void);
void flush_tlb_batched_pending(struct mm_struct *mm);
#else
static inline void try_to_unmap_flush(void)
{
}
static inline void try_to_unmap_flush_dirty(void)
{
}
static inline void flush_tlb_batched_pending(struct mm_struct *mm)
{
}
#endif
extern const struct trace_print_flags pageflag_names[];
extern const struct trace_print_flags vmaflag_names[];
extern const struct trace_print_flags gfpflag_names[];
static inline bool is_migrate_highatomic(enum migratetype migratetype)
{
return migratetype == MIGRATE_HIGHATOMIC;
}
void setup_zone_pageset(struct zone *zone);
struct migration_target_control {
int nid;
nodemask_t *nmask;
gfp_t gfp_mask;
enum migrate_reason reason;
};
size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
struct folio *folio, loff_t fpos, size_t size);
#ifdef CONFIG_MMU
void __init vmalloc_init(void);
int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift);
unsigned int get_vm_area_page_order(struct vm_struct *vm);
#else
static inline void vmalloc_init(void)
{
}
static inline
int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift)
{
return -EINVAL;
}
#endif
int __must_check __vmap_pages_range_noflush(unsigned long addr,
unsigned long end, pgprot_t prot,
struct page **pages, unsigned int page_shift);
void vunmap_range_noflush(unsigned long start, unsigned long end);
void __vunmap_range_noflush(unsigned long start, unsigned long end);
int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
unsigned long addr, int *flags, bool writable,
int *last_cpupid);
void free_zone_device_folio(struct folio *folio);
int migrate_device_coherent_folio(struct folio *folio);
struct vm_struct *__get_vm_area_node(unsigned long size,
unsigned long align, unsigned long shift,
unsigned long vm_flags, unsigned long start,
unsigned long end, int node, gfp_t gfp_mask,
const void *caller);
int __must_check try_grab_folio(struct folio *folio, int refs,
unsigned int flags);
void touch_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t *pud, bool write);
void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, bool write);
static inline int get_order_from_str(const char *size_str,
unsigned long valid_orders)
{
unsigned long size;
char *endptr;
int order;
size = memparse(size_str, &endptr);
if (!is_power_of_2(size))
return -EINVAL;
order = get_order(size);
if (BIT(order) & ~valid_orders)
return -EINVAL;
return order;
}
enum {
FOLL_TOUCH = 1 << 16,
FOLL_TRIED = 1 << 17,
FOLL_REMOTE = 1 << 18,
FOLL_PIN = 1 << 19,
FOLL_FAST_ONLY = 1 << 20,
FOLL_UNLOCKABLE = 1 << 21,
FOLL_MADV_POPULATE = 1 << 22,
};
#define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
FOLL_MADV_POPULATE)
static inline bool gup_must_unshare(struct vm_area_struct *vma,
unsigned int flags, struct page *page)
{
if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
return false;
if (!PageAnon(page)) {
if (!(flags & FOLL_LONGTERM))
return false;
if (!vma)
return true;
return is_cow_mapping(vma->vm_flags);
}
if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
smp_rmb();
return !PageAnonExclusive(page);
}
extern bool mirrored_kernelcore;
bool memblock_has_mirror(void);
void memblock_free_all(void);
static __always_inline void vma_set_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
pgoff_t pgoff)
{
vma->vm_start = start;
vma->vm_end = end;
vma->vm_pgoff = pgoff;
}
static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
{
if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
return false;
return !(vma->vm_flags & VM_SOFTDIRTY);
}
static inline bool pmd_needs_soft_dirty_wp(struct vm_area_struct *vma, pmd_t pmd)
{
return vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd);
}
static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte)
{
return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte);
}
void __meminit __init_single_page(struct page *page, unsigned long pfn,
unsigned long zone, int nid);
void __meminit __init_page_from_nid(unsigned long pfn, int nid);
unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
int priority);
#ifdef CONFIG_SHRINKER_DEBUG
static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
struct shrinker *shrinker, const char *fmt, va_list ap)
{
shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
return shrinker->name ? 0 : -ENOMEM;
}
static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
{
kfree_const(shrinker->name);
shrinker->name = NULL;
}
extern int shrinker_debugfs_add(struct shrinker *shrinker);
extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
int *debugfs_id);
extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
int debugfs_id);
#else
static inline int shrinker_debugfs_add(struct shrinker *shrinker)
{
return 0;
}
static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
const char *fmt, va_list ap)
{
return 0;
}
static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
{
}
static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
int *debugfs_id)
{
*debugfs_id = -1;
return NULL;
}
static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
int debugfs_id)
{
}
#endif
void workingset_update_node(struct xa_node *node);
extern struct list_lru shadow_nodes;
#define mapping_set_update(xas, mapping) do { \
if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \
xas_set_update(xas, workingset_update_node); \
xas_set_lru(xas, &shadow_nodes); \
} \
} while (0)
unsigned long move_page_tables(struct pagetable_move_control *pmc);
#ifdef CONFIG_UNACCEPTED_MEMORY
void accept_page(struct page *page);
#else
static inline void accept_page(struct page *page)
{
}
#endif
int walk_page_range_mm(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private);
int walk_page_range_debug(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
pgd_t *pgd, void *private);
bool try_get_and_clear_pmd(struct mm_struct *mm, pmd_t *pmd, pmd_t *pmdval);
void free_pte(struct mm_struct *mm, unsigned long addr, struct mmu_gather *tlb,
pmd_t pmdval);
void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr,
struct mmu_gather *tlb);
#ifdef CONFIG_PT_RECLAIM
bool reclaim_pt_is_enabled(unsigned long start, unsigned long end,
struct zap_details *details);
#else
static inline bool reclaim_pt_is_enabled(unsigned long start, unsigned long end,
struct zap_details *details)
{
return false;
}
#endif
void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm);
int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm);
#endif