Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/ia64/mm/hugetlbpage.c
10817 views
1
/*
2
* IA-64 Huge TLB Page Support for Kernel.
3
*
4
* Copyright (C) 2002-2004 Rohit Seth <[email protected]>
5
* Copyright (C) 2003-2004 Ken Chen <[email protected]>
6
*
7
* Sep, 2003: add numa support
8
* Feb, 2004: dynamic hugetlb page size via boot parameter
9
*/
10
11
#include <linux/init.h>
12
#include <linux/fs.h>
13
#include <linux/mm.h>
14
#include <linux/hugetlb.h>
15
#include <linux/pagemap.h>
16
#include <linux/module.h>
17
#include <linux/sysctl.h>
18
#include <linux/log2.h>
19
#include <asm/mman.h>
20
#include <asm/pgalloc.h>
21
#include <asm/tlb.h>
22
#include <asm/tlbflush.h>
23
24
unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT;
25
EXPORT_SYMBOL(hpage_shift);
26
27
pte_t *
28
huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
29
{
30
unsigned long taddr = htlbpage_to_page(addr);
31
pgd_t *pgd;
32
pud_t *pud;
33
pmd_t *pmd;
34
pte_t *pte = NULL;
35
36
pgd = pgd_offset(mm, taddr);
37
pud = pud_alloc(mm, pgd, taddr);
38
if (pud) {
39
pmd = pmd_alloc(mm, pud, taddr);
40
if (pmd)
41
pte = pte_alloc_map(mm, NULL, pmd, taddr);
42
}
43
return pte;
44
}
45
46
pte_t *
47
huge_pte_offset (struct mm_struct *mm, unsigned long addr)
48
{
49
unsigned long taddr = htlbpage_to_page(addr);
50
pgd_t *pgd;
51
pud_t *pud;
52
pmd_t *pmd;
53
pte_t *pte = NULL;
54
55
pgd = pgd_offset(mm, taddr);
56
if (pgd_present(*pgd)) {
57
pud = pud_offset(pgd, taddr);
58
if (pud_present(*pud)) {
59
pmd = pmd_offset(pud, taddr);
60
if (pmd_present(*pmd))
61
pte = pte_offset_map(pmd, taddr);
62
}
63
}
64
65
return pte;
66
}
67
68
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
69
{
70
return 0;
71
}
72
73
#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
74
75
/*
76
* Don't actually need to do any preparation, but need to make sure
77
* the address is in the right region.
78
*/
79
int prepare_hugepage_range(struct file *file,
80
unsigned long addr, unsigned long len)
81
{
82
if (len & ~HPAGE_MASK)
83
return -EINVAL;
84
if (addr & ~HPAGE_MASK)
85
return -EINVAL;
86
if (REGION_NUMBER(addr) != RGN_HPAGE)
87
return -EINVAL;
88
89
return 0;
90
}
91
92
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
93
{
94
struct page *page;
95
pte_t *ptep;
96
97
if (REGION_NUMBER(addr) != RGN_HPAGE)
98
return ERR_PTR(-EINVAL);
99
100
ptep = huge_pte_offset(mm, addr);
101
if (!ptep || pte_none(*ptep))
102
return NULL;
103
page = pte_page(*ptep);
104
page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
105
return page;
106
}
107
int pmd_huge(pmd_t pmd)
108
{
109
return 0;
110
}
111
112
int pud_huge(pud_t pud)
113
{
114
return 0;
115
}
116
117
struct page *
118
follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
119
{
120
return NULL;
121
}
122
123
void hugetlb_free_pgd_range(struct mmu_gather *tlb,
124
unsigned long addr, unsigned long end,
125
unsigned long floor, unsigned long ceiling)
126
{
127
/*
128
* This is called to free hugetlb page tables.
129
*
130
* The offset of these addresses from the base of the hugetlb
131
* region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
132
* the standard free_pgd_range will free the right page tables.
133
*
134
* If floor and ceiling are also in the hugetlb region, they
135
* must likewise be scaled down; but if outside, left unchanged.
136
*/
137
138
addr = htlbpage_to_page(addr);
139
end = htlbpage_to_page(end);
140
if (REGION_NUMBER(floor) == RGN_HPAGE)
141
floor = htlbpage_to_page(floor);
142
if (REGION_NUMBER(ceiling) == RGN_HPAGE)
143
ceiling = htlbpage_to_page(ceiling);
144
145
free_pgd_range(tlb, addr, end, floor, ceiling);
146
}
147
148
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
149
unsigned long pgoff, unsigned long flags)
150
{
151
struct vm_area_struct *vmm;
152
153
if (len > RGN_MAP_LIMIT)
154
return -ENOMEM;
155
if (len & ~HPAGE_MASK)
156
return -EINVAL;
157
158
/* Handle MAP_FIXED */
159
if (flags & MAP_FIXED) {
160
if (prepare_hugepage_range(file, addr, len))
161
return -EINVAL;
162
return addr;
163
}
164
165
/* This code assumes that RGN_HPAGE != 0. */
166
if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
167
addr = HPAGE_REGION_BASE;
168
else
169
addr = ALIGN(addr, HPAGE_SIZE);
170
for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
171
/* At this point: (!vmm || addr < vmm->vm_end). */
172
if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
173
return -ENOMEM;
174
if (!vmm || (addr + len) <= vmm->vm_start)
175
return addr;
176
addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
177
}
178
}
179
180
static int __init hugetlb_setup_sz(char *str)
181
{
182
u64 tr_pages;
183
unsigned long long size;
184
185
if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
186
/*
187
* shouldn't happen, but just in case.
188
*/
189
tr_pages = 0x15557000UL;
190
191
size = memparse(str, &str);
192
if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
193
size <= PAGE_SIZE ||
194
size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
195
printk(KERN_WARNING "Invalid huge page size specified\n");
196
return 1;
197
}
198
199
hpage_shift = __ffs(size);
200
/*
201
* boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
202
* override here with new page shift.
203
*/
204
ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
205
return 0;
206
}
207
early_param("hugepagesz", hugetlb_setup_sz);
208
209