#define pr_fmt(fmt) "HugeTLB: " fmt
#include <linux/pgtable.h>
#include <linux/moduleparam.h>
#include <linux/bootmem_info.h>
#include <linux/mmdebug.h>
#include <linux/pagewalk.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include "hugetlb_vmemmap.h"
struct vmemmap_remap_walk {
void (*remap_pte)(pte_t *pte, unsigned long addr,
struct vmemmap_remap_walk *walk);
unsigned long nr_walked;
struct page *reuse_page;
unsigned long reuse_addr;
struct list_head *vmemmap_pages;
#define VMEMMAP_SPLIT_NO_TLB_FLUSH BIT(0)
#define VMEMMAP_REMAP_NO_TLB_FLUSH BIT(1)
#define VMEMMAP_SYNCHRONIZE_RCU BIT(2)
unsigned long flags;
};
static int vmemmap_split_pmd(pmd_t *pmd, struct page *head, unsigned long start,
struct vmemmap_remap_walk *walk)
{
pmd_t __pmd;
int i;
unsigned long addr = start;
pte_t *pgtable;
pgtable = pte_alloc_one_kernel(&init_mm);
if (!pgtable)
return -ENOMEM;
pmd_populate_kernel(&init_mm, &__pmd, pgtable);
for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
pte_t entry, *pte;
pgprot_t pgprot = PAGE_KERNEL;
entry = mk_pte(head + i, pgprot);
pte = pte_offset_kernel(&__pmd, addr);
set_pte_at(&init_mm, addr, pte, entry);
}
spin_lock(&init_mm.page_table_lock);
if (likely(pmd_leaf(*pmd))) {
if (!PageReserved(head))
split_page(head, get_order(PMD_SIZE));
smp_wmb();
pmd_populate_kernel(&init_mm, pmd, pgtable);
if (!(walk->flags & VMEMMAP_SPLIT_NO_TLB_FLUSH))
flush_tlb_kernel_range(start, start + PMD_SIZE);
} else {
pte_free_kernel(&init_mm, pgtable);
}
spin_unlock(&init_mm.page_table_lock);
return 0;
}
static int vmemmap_pmd_entry(pmd_t *pmd, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
int ret = 0;
struct page *head;
struct vmemmap_remap_walk *vmemmap_walk = walk->private;
if (!vmemmap_walk->remap_pte)
walk->action = ACTION_CONTINUE;
spin_lock(&init_mm.page_table_lock);
head = pmd_leaf(*pmd) ? pmd_page(*pmd) : NULL;
if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) && unlikely(!vmemmap_walk->nr_walked)) {
struct page *page = head ? head + pte_index(addr) :
pte_page(ptep_get(pte_offset_kernel(pmd, addr)));
if (PageVmemmapSelfHosted(page))
ret = -ENOTSUPP;
}
spin_unlock(&init_mm.page_table_lock);
if (!head || ret)
return ret;
return vmemmap_split_pmd(pmd, head, addr & PMD_MASK, vmemmap_walk);
}
static int vmemmap_pte_entry(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
struct vmemmap_remap_walk *vmemmap_walk = walk->private;
if (!vmemmap_walk->reuse_page)
vmemmap_walk->reuse_page = pte_page(ptep_get(pte));
else
vmemmap_walk->remap_pte(pte, addr, vmemmap_walk);
vmemmap_walk->nr_walked++;
return 0;
}
static const struct mm_walk_ops vmemmap_remap_ops = {
.pmd_entry = vmemmap_pmd_entry,
.pte_entry = vmemmap_pte_entry,
};
static int vmemmap_remap_range(unsigned long start, unsigned long end,
struct vmemmap_remap_walk *walk)
{
int ret;
VM_BUG_ON(!PAGE_ALIGNED(start | end));
mmap_read_lock(&init_mm);
ret = walk_kernel_page_table_range(start, end, &vmemmap_remap_ops,
NULL, walk);
mmap_read_unlock(&init_mm);
if (ret)
return ret;
if (walk->remap_pte && !(walk->flags & VMEMMAP_REMAP_NO_TLB_FLUSH))
flush_tlb_kernel_range(start, end);
return 0;
}
static inline void free_vmemmap_page(struct page *page)
{
if (PageReserved(page)) {
memmap_boot_pages_add(-1);
free_bootmem_page(page);
} else {
memmap_pages_add(-1);
__free_page(page);
}
}
static void free_vmemmap_page_list(struct list_head *list)
{
struct page *page, *next;
list_for_each_entry_safe(page, next, list, lru)
free_vmemmap_page(page);
}
static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
struct vmemmap_remap_walk *walk)
{
pgprot_t pgprot = PAGE_KERNEL_RO;
struct page *page = pte_page(ptep_get(pte));
pte_t entry;
if (unlikely(addr == walk->reuse_addr)) {
pgprot = PAGE_KERNEL;
list_del(&walk->reuse_page->lru);
smp_wmb();
}
entry = mk_pte(walk->reuse_page, pgprot);
list_add(&page->lru, walk->vmemmap_pages);
set_pte_at(&init_mm, addr, pte, entry);
}
#define NR_RESET_STRUCT_PAGE 4
static inline void reset_struct_pages(struct page *start)
{
struct page *from = start + NR_RESET_STRUCT_PAGE;
BUILD_BUG_ON(NR_RESET_STRUCT_PAGE * 2 > PAGE_SIZE / sizeof(struct page));
memcpy(start, from, sizeof(*from) * NR_RESET_STRUCT_PAGE);
}
static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
struct vmemmap_remap_walk *walk)
{
pgprot_t pgprot = PAGE_KERNEL;
struct page *page;
void *to;
BUG_ON(pte_page(ptep_get(pte)) != walk->reuse_page);
page = list_first_entry(walk->vmemmap_pages, struct page, lru);
list_del(&page->lru);
to = page_to_virt(page);
copy_page(to, (void *)walk->reuse_addr);
reset_struct_pages(to);
smp_wmb();
set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot));
}
static int vmemmap_remap_split(unsigned long start, unsigned long end,
unsigned long reuse)
{
struct vmemmap_remap_walk walk = {
.remap_pte = NULL,
.flags = VMEMMAP_SPLIT_NO_TLB_FLUSH,
};
BUG_ON(start - reuse != PAGE_SIZE);
return vmemmap_remap_range(reuse, end, &walk);
}
static int vmemmap_remap_free(unsigned long start, unsigned long end,
unsigned long reuse,
struct list_head *vmemmap_pages,
unsigned long flags)
{
int ret;
struct vmemmap_remap_walk walk = {
.remap_pte = vmemmap_remap_pte,
.reuse_addr = reuse,
.vmemmap_pages = vmemmap_pages,
.flags = flags,
};
int nid = page_to_nid((struct page *)reuse);
gfp_t gfp_mask = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
walk.reuse_page = alloc_pages_node(nid, gfp_mask, 0);
if (walk.reuse_page) {
copy_page(page_to_virt(walk.reuse_page),
(void *)walk.reuse_addr);
list_add(&walk.reuse_page->lru, vmemmap_pages);
memmap_pages_add(1);
}
BUG_ON(start - reuse != PAGE_SIZE);
ret = vmemmap_remap_range(reuse, end, &walk);
if (ret && walk.nr_walked) {
end = reuse + walk.nr_walked * PAGE_SIZE;
walk = (struct vmemmap_remap_walk) {
.remap_pte = vmemmap_restore_pte,
.reuse_addr = reuse,
.vmemmap_pages = vmemmap_pages,
.flags = 0,
};
vmemmap_remap_range(reuse, end, &walk);
}
return ret;
}
static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
struct list_head *list)
{
gfp_t gfp_mask = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
int nid = page_to_nid((struct page *)start);
struct page *page, *next;
int i;
for (i = 0; i < nr_pages; i++) {
page = alloc_pages_node(nid, gfp_mask, 0);
if (!page)
goto out;
list_add(&page->lru, list);
}
memmap_pages_add(nr_pages);
return 0;
out:
list_for_each_entry_safe(page, next, list, lru)
__free_page(page);
return -ENOMEM;
}
static int vmemmap_remap_alloc(unsigned long start, unsigned long end,
unsigned long reuse, unsigned long flags)
{
LIST_HEAD(vmemmap_pages);
struct vmemmap_remap_walk walk = {
.remap_pte = vmemmap_restore_pte,
.reuse_addr = reuse,
.vmemmap_pages = &vmemmap_pages,
.flags = flags,
};
BUG_ON(start - reuse != PAGE_SIZE);
if (alloc_vmemmap_page_list(start, end, &vmemmap_pages))
return -ENOMEM;
return vmemmap_remap_range(reuse, end, &walk);
}
DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
static int __init hugetlb_vmemmap_optimize_param(char *buf)
{
return kstrtobool(buf, &vmemmap_optimize_enabled);
}
early_param("hugetlb_free_vmemmap", hugetlb_vmemmap_optimize_param);
static int __hugetlb_vmemmap_restore_folio(const struct hstate *h,
struct folio *folio, unsigned long flags)
{
int ret;
unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
unsigned long vmemmap_reuse;
VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio);
VM_WARN_ON_ONCE_FOLIO(folio_ref_count(folio), folio);
if (!folio_test_hugetlb_vmemmap_optimized(folio))
return 0;
if (flags & VMEMMAP_SYNCHRONIZE_RCU)
synchronize_rcu();
vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
vmemmap_reuse = vmemmap_start;
vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE;
ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse, flags);
if (!ret) {
folio_clear_hugetlb_vmemmap_optimized(folio);
static_branch_dec(&hugetlb_optimize_vmemmap_key);
}
return ret;
}
int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio)
{
return __hugetlb_vmemmap_restore_folio(h, folio, VMEMMAP_SYNCHRONIZE_RCU);
}
long hugetlb_vmemmap_restore_folios(const struct hstate *h,
struct list_head *folio_list,
struct list_head *non_hvo_folios)
{
struct folio *folio, *t_folio;
long restored = 0;
long ret = 0;
unsigned long flags = VMEMMAP_REMAP_NO_TLB_FLUSH | VMEMMAP_SYNCHRONIZE_RCU;
list_for_each_entry_safe(folio, t_folio, folio_list, lru) {
if (folio_test_hugetlb_vmemmap_optimized(folio)) {
ret = __hugetlb_vmemmap_restore_folio(h, folio, flags);
flags &= ~VMEMMAP_SYNCHRONIZE_RCU;
if (ret)
break;
restored++;
}
list_move(&folio->lru, non_hvo_folios);
}
if (restored)
flush_tlb_all();
if (!ret)
ret = restored;
return ret;
}
static bool vmemmap_should_optimize_folio(const struct hstate *h, struct folio *folio)
{
if (folio_test_hugetlb_vmemmap_optimized(folio))
return false;
if (!READ_ONCE(vmemmap_optimize_enabled))
return false;
if (!hugetlb_vmemmap_optimizable(h))
return false;
return true;
}
static int __hugetlb_vmemmap_optimize_folio(const struct hstate *h,
struct folio *folio,
struct list_head *vmemmap_pages,
unsigned long flags)
{
int ret = 0;
unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
unsigned long vmemmap_reuse;
VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio);
VM_WARN_ON_ONCE_FOLIO(folio_ref_count(folio), folio);
if (!vmemmap_should_optimize_folio(h, folio))
return ret;
static_branch_inc(&hugetlb_optimize_vmemmap_key);
if (flags & VMEMMAP_SYNCHRONIZE_RCU)
synchronize_rcu();
folio_set_hugetlb_vmemmap_optimized(folio);
vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
vmemmap_reuse = vmemmap_start;
vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE;
ret = vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse,
vmemmap_pages, flags);
if (ret) {
static_branch_dec(&hugetlb_optimize_vmemmap_key);
folio_clear_hugetlb_vmemmap_optimized(folio);
}
return ret;
}
void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio)
{
LIST_HEAD(vmemmap_pages);
__hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, VMEMMAP_SYNCHRONIZE_RCU);
free_vmemmap_page_list(&vmemmap_pages);
}
static int hugetlb_vmemmap_split_folio(const struct hstate *h, struct folio *folio)
{
unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
unsigned long vmemmap_reuse;
if (!vmemmap_should_optimize_folio(h, folio))
return 0;
vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
vmemmap_reuse = vmemmap_start;
vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE;
return vmemmap_remap_split(vmemmap_start, vmemmap_end, vmemmap_reuse);
}
static void __hugetlb_vmemmap_optimize_folios(struct hstate *h,
struct list_head *folio_list,
bool boot)
{
struct folio *folio;
int nr_to_optimize;
LIST_HEAD(vmemmap_pages);
unsigned long flags = VMEMMAP_REMAP_NO_TLB_FLUSH | VMEMMAP_SYNCHRONIZE_RCU;
nr_to_optimize = 0;
list_for_each_entry(folio, folio_list, lru) {
int ret;
unsigned long spfn, epfn;
if (boot && folio_test_hugetlb_vmemmap_optimized(folio)) {
spfn = (unsigned long)&folio->page;
epfn = spfn + pages_per_huge_page(h);
vmemmap_wrprotect_hvo(spfn, epfn, folio_nid(folio),
HUGETLB_VMEMMAP_RESERVE_SIZE);
register_page_bootmem_memmap(pfn_to_section_nr(spfn),
&folio->page,
HUGETLB_VMEMMAP_RESERVE_SIZE);
static_branch_inc(&hugetlb_optimize_vmemmap_key);
continue;
}
nr_to_optimize++;
ret = hugetlb_vmemmap_split_folio(h, folio);
if (ret == -ENOMEM)
break;
}
if (!nr_to_optimize)
goto out;
flush_tlb_all();
list_for_each_entry(folio, folio_list, lru) {
int ret;
ret = __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, flags);
flags &= ~VMEMMAP_SYNCHRONIZE_RCU;
if (ret == -ENOMEM && !list_empty(&vmemmap_pages)) {
flush_tlb_all();
free_vmemmap_page_list(&vmemmap_pages);
INIT_LIST_HEAD(&vmemmap_pages);
__hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, flags);
}
}
out:
flush_tlb_all();
free_vmemmap_page_list(&vmemmap_pages);
}
void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list)
{
__hugetlb_vmemmap_optimize_folios(h, folio_list, false);
}
void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h, struct list_head *folio_list)
{
__hugetlb_vmemmap_optimize_folios(h, folio_list, true);
}
#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
static bool vmemmap_should_optimize_bootmem_page(struct huge_bootmem_page *m)
{
unsigned long section_size, psize, pmd_vmemmap_size;
phys_addr_t paddr;
if (!READ_ONCE(vmemmap_optimize_enabled))
return false;
if (!hugetlb_vmemmap_optimizable(m->hstate))
return false;
psize = huge_page_size(m->hstate);
paddr = virt_to_phys(m);
section_size = (1UL << PA_SECTION_SHIFT);
if (!IS_ALIGNED(paddr, section_size) ||
!IS_ALIGNED(psize, section_size))
return false;
pmd_vmemmap_size = (PMD_SIZE / (sizeof(struct page))) << PAGE_SHIFT;
if (!IS_ALIGNED(paddr, pmd_vmemmap_size) ||
!IS_ALIGNED(psize, pmd_vmemmap_size))
return false;
return true;
}
void __init hugetlb_vmemmap_init_early(int nid)
{
unsigned long psize, paddr, section_size;
unsigned long ns, i, pnum, pfn, nr_pages;
unsigned long start, end;
struct huge_bootmem_page *m = NULL;
void *map;
if (!hugetlb_bootmem_allocated())
return;
if (!READ_ONCE(vmemmap_optimize_enabled))
return;
section_size = (1UL << PA_SECTION_SHIFT);
list_for_each_entry(m, &huge_boot_pages[nid], list) {
if (!vmemmap_should_optimize_bootmem_page(m))
continue;
nr_pages = pages_per_huge_page(m->hstate);
psize = nr_pages << PAGE_SHIFT;
paddr = virt_to_phys(m);
pfn = PHYS_PFN(paddr);
map = pfn_to_page(pfn);
start = (unsigned long)map;
end = start + nr_pages * sizeof(struct page);
if (vmemmap_populate_hvo(start, end, nid,
HUGETLB_VMEMMAP_RESERVE_SIZE) < 0)
continue;
memmap_boot_pages_add(HUGETLB_VMEMMAP_RESERVE_SIZE / PAGE_SIZE);
pnum = pfn_to_section_nr(pfn);
ns = psize / section_size;
for (i = 0; i < ns; i++) {
sparse_init_early_section(nid, map, pnum,
SECTION_IS_VMEMMAP_PREINIT);
map += section_map_size();
pnum++;
}
m->flags |= HUGE_BOOTMEM_HVO;
}
}
void __init hugetlb_vmemmap_init_late(int nid)
{
struct huge_bootmem_page *m, *tm;
unsigned long phys, nr_pages, start, end;
unsigned long pfn, nr_mmap;
struct hstate *h;
void *map;
if (!hugetlb_bootmem_allocated())
return;
if (!READ_ONCE(vmemmap_optimize_enabled))
return;
list_for_each_entry_safe(m, tm, &huge_boot_pages[nid], list) {
if (!(m->flags & HUGE_BOOTMEM_HVO))
continue;
phys = virt_to_phys(m);
h = m->hstate;
pfn = PHYS_PFN(phys);
nr_pages = pages_per_huge_page(h);
if (!hugetlb_bootmem_page_zones_valid(nid, m)) {
list_del(&m->list);
map = pfn_to_page(pfn);
start = (unsigned long)map;
end = start + nr_pages * sizeof(struct page);
vmemmap_undo_hvo(start, end, nid,
HUGETLB_VMEMMAP_RESERVE_SIZE);
nr_mmap = end - start - HUGETLB_VMEMMAP_RESERVE_SIZE;
memmap_boot_pages_add(DIV_ROUND_UP(nr_mmap, PAGE_SIZE));
memblock_phys_free(phys, huge_page_size(h));
continue;
} else
m->flags |= HUGE_BOOTMEM_ZONES_VALID;
}
}
#endif
static const struct ctl_table hugetlb_vmemmap_sysctls[] = {
{
.procname = "hugetlb_optimize_vmemmap",
.data = &vmemmap_optimize_enabled,
.maxlen = sizeof(vmemmap_optimize_enabled),
.mode = 0644,
.proc_handler = proc_dobool,
},
};
static int __init hugetlb_vmemmap_init(void)
{
const struct hstate *h;
BUILD_BUG_ON(__NR_USED_SUBPAGE > HUGETLB_VMEMMAP_RESERVE_PAGES);
for_each_hstate(h) {
if (hugetlb_vmemmap_optimizable(h)) {
register_sysctl_init("vm", hugetlb_vmemmap_sysctls);
break;
}
}
return 0;
}
late_initcall(hugetlb_vmemmap_init);