Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/mm/mincore.c
10814 views
1
/*
2
* linux/mm/mincore.c
3
*
4
* Copyright (C) 1994-2006 Linus Torvalds
5
*/
6
7
/*
8
* The mincore() system call.
9
*/
10
#include <linux/pagemap.h>
11
#include <linux/gfp.h>
12
#include <linux/mm.h>
13
#include <linux/mman.h>
14
#include <linux/syscalls.h>
15
#include <linux/swap.h>
16
#include <linux/swapops.h>
17
#include <linux/hugetlb.h>
18
19
#include <asm/uaccess.h>
20
#include <asm/pgtable.h>
21
22
static void mincore_hugetlb_page_range(struct vm_area_struct *vma,
23
unsigned long addr, unsigned long end,
24
unsigned char *vec)
25
{
26
#ifdef CONFIG_HUGETLB_PAGE
27
struct hstate *h;
28
29
h = hstate_vma(vma);
30
while (1) {
31
unsigned char present;
32
pte_t *ptep;
33
/*
34
* Huge pages are always in RAM for now, but
35
* theoretically it needs to be checked.
36
*/
37
ptep = huge_pte_offset(current->mm,
38
addr & huge_page_mask(h));
39
present = ptep && !huge_pte_none(huge_ptep_get(ptep));
40
while (1) {
41
*vec = present;
42
vec++;
43
addr += PAGE_SIZE;
44
if (addr == end)
45
return;
46
/* check hugepage border */
47
if (!(addr & ~huge_page_mask(h)))
48
break;
49
}
50
}
51
#else
52
BUG();
53
#endif
54
}
55
56
/*
57
* Later we can get more picky about what "in core" means precisely.
58
* For now, simply check to see if the page is in the page cache,
59
* and is up to date; i.e. that no page-in operation would be required
60
* at this time if an application were to map and access this page.
61
*/
62
static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
63
{
64
unsigned char present = 0;
65
struct page *page;
66
67
/*
68
* When tmpfs swaps out a page from a file, any process mapping that
69
* file will not get a swp_entry_t in its pte, but rather it is like
70
* any other file mapping (ie. marked !present and faulted in with
71
* tmpfs's .fault). So swapped out tmpfs mappings are tested here.
72
*
73
* However when tmpfs moves the page from pagecache and into swapcache,
74
* it is still in core, but the find_get_page below won't find it.
75
* No big deal, but make a note of it.
76
*/
77
page = find_get_page(mapping, pgoff);
78
if (page) {
79
present = PageUptodate(page);
80
page_cache_release(page);
81
}
82
83
return present;
84
}
85
86
static void mincore_unmapped_range(struct vm_area_struct *vma,
87
unsigned long addr, unsigned long end,
88
unsigned char *vec)
89
{
90
unsigned long nr = (end - addr) >> PAGE_SHIFT;
91
int i;
92
93
if (vma->vm_file) {
94
pgoff_t pgoff;
95
96
pgoff = linear_page_index(vma, addr);
97
for (i = 0; i < nr; i++, pgoff++)
98
vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
99
} else {
100
for (i = 0; i < nr; i++)
101
vec[i] = 0;
102
}
103
}
104
105
static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
106
unsigned long addr, unsigned long end,
107
unsigned char *vec)
108
{
109
unsigned long next;
110
spinlock_t *ptl;
111
pte_t *ptep;
112
113
ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
114
do {
115
pte_t pte = *ptep;
116
pgoff_t pgoff;
117
118
next = addr + PAGE_SIZE;
119
if (pte_none(pte))
120
mincore_unmapped_range(vma, addr, next, vec);
121
else if (pte_present(pte))
122
*vec = 1;
123
else if (pte_file(pte)) {
124
pgoff = pte_to_pgoff(pte);
125
*vec = mincore_page(vma->vm_file->f_mapping, pgoff);
126
} else { /* pte is a swap entry */
127
swp_entry_t entry = pte_to_swp_entry(pte);
128
129
if (is_migration_entry(entry)) {
130
/* migration entries are always uptodate */
131
*vec = 1;
132
} else {
133
#ifdef CONFIG_SWAP
134
pgoff = entry.val;
135
*vec = mincore_page(&swapper_space, pgoff);
136
#else
137
WARN_ON(1);
138
*vec = 1;
139
#endif
140
}
141
}
142
vec++;
143
} while (ptep++, addr = next, addr != end);
144
pte_unmap_unlock(ptep - 1, ptl);
145
}
146
147
static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
148
unsigned long addr, unsigned long end,
149
unsigned char *vec)
150
{
151
unsigned long next;
152
pmd_t *pmd;
153
154
pmd = pmd_offset(pud, addr);
155
do {
156
next = pmd_addr_end(addr, end);
157
if (pmd_trans_huge(*pmd)) {
158
if (mincore_huge_pmd(vma, pmd, addr, next, vec)) {
159
vec += (next - addr) >> PAGE_SHIFT;
160
continue;
161
}
162
/* fall through */
163
}
164
if (pmd_none_or_clear_bad(pmd))
165
mincore_unmapped_range(vma, addr, next, vec);
166
else
167
mincore_pte_range(vma, pmd, addr, next, vec);
168
vec += (next - addr) >> PAGE_SHIFT;
169
} while (pmd++, addr = next, addr != end);
170
}
171
172
static void mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
173
unsigned long addr, unsigned long end,
174
unsigned char *vec)
175
{
176
unsigned long next;
177
pud_t *pud;
178
179
pud = pud_offset(pgd, addr);
180
do {
181
next = pud_addr_end(addr, end);
182
if (pud_none_or_clear_bad(pud))
183
mincore_unmapped_range(vma, addr, next, vec);
184
else
185
mincore_pmd_range(vma, pud, addr, next, vec);
186
vec += (next - addr) >> PAGE_SHIFT;
187
} while (pud++, addr = next, addr != end);
188
}
189
190
static void mincore_page_range(struct vm_area_struct *vma,
191
unsigned long addr, unsigned long end,
192
unsigned char *vec)
193
{
194
unsigned long next;
195
pgd_t *pgd;
196
197
pgd = pgd_offset(vma->vm_mm, addr);
198
do {
199
next = pgd_addr_end(addr, end);
200
if (pgd_none_or_clear_bad(pgd))
201
mincore_unmapped_range(vma, addr, next, vec);
202
else
203
mincore_pud_range(vma, pgd, addr, next, vec);
204
vec += (next - addr) >> PAGE_SHIFT;
205
} while (pgd++, addr = next, addr != end);
206
}
207
208
/*
209
* Do a chunk of "sys_mincore()". We've already checked
210
* all the arguments, we hold the mmap semaphore: we should
211
* just return the amount of info we're asked for.
212
*/
213
static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
214
{
215
struct vm_area_struct *vma;
216
unsigned long end;
217
218
vma = find_vma(current->mm, addr);
219
if (!vma || addr < vma->vm_start)
220
return -ENOMEM;
221
222
end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
223
224
if (is_vm_hugetlb_page(vma)) {
225
mincore_hugetlb_page_range(vma, addr, end, vec);
226
return (end - addr) >> PAGE_SHIFT;
227
}
228
229
end = pmd_addr_end(addr, end);
230
231
if (is_vm_hugetlb_page(vma))
232
mincore_hugetlb_page_range(vma, addr, end, vec);
233
else
234
mincore_page_range(vma, addr, end, vec);
235
236
return (end - addr) >> PAGE_SHIFT;
237
}
238
239
/*
240
* The mincore(2) system call.
241
*
242
* mincore() returns the memory residency status of the pages in the
243
* current process's address space specified by [addr, addr + len).
244
* The status is returned in a vector of bytes. The least significant
245
* bit of each byte is 1 if the referenced page is in memory, otherwise
246
* it is zero.
247
*
248
* Because the status of a page can change after mincore() checks it
249
* but before it returns to the application, the returned vector may
250
* contain stale information. Only locked pages are guaranteed to
251
* remain in memory.
252
*
253
* return values:
254
* zero - success
255
* -EFAULT - vec points to an illegal address
256
* -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE
257
* -ENOMEM - Addresses in the range [addr, addr + len] are
258
* invalid for the address space of this process, or
259
* specify one or more pages which are not currently
260
* mapped
261
* -EAGAIN - A kernel resource was temporarily unavailable.
262
*/
263
SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
264
unsigned char __user *, vec)
265
{
266
long retval;
267
unsigned long pages;
268
unsigned char *tmp;
269
270
/* Check the start address: needs to be page-aligned.. */
271
if (start & ~PAGE_CACHE_MASK)
272
return -EINVAL;
273
274
/* ..and we need to be passed a valid user-space range */
275
if (!access_ok(VERIFY_READ, (void __user *) start, len))
276
return -ENOMEM;
277
278
/* This also avoids any overflows on PAGE_CACHE_ALIGN */
279
pages = len >> PAGE_SHIFT;
280
pages += (len & ~PAGE_MASK) != 0;
281
282
if (!access_ok(VERIFY_WRITE, vec, pages))
283
return -EFAULT;
284
285
tmp = (void *) __get_free_page(GFP_USER);
286
if (!tmp)
287
return -EAGAIN;
288
289
retval = 0;
290
while (pages) {
291
/*
292
* Do at most PAGE_SIZE entries per iteration, due to
293
* the temporary buffer size.
294
*/
295
down_read(&current->mm->mmap_sem);
296
retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
297
up_read(&current->mm->mmap_sem);
298
299
if (retval <= 0)
300
break;
301
if (copy_to_user(vec, tmp, retval)) {
302
retval = -EFAULT;
303
break;
304
}
305
pages -= retval;
306
vec += retval;
307
start += retval << PAGE_SHIFT;
308
retval = 0;
309
}
310
free_page((unsigned long) tmp);
311
return retval;
312
}
313
314