Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/io_uring/memmap.c
26131 views
1
// SPDX-License-Identifier: GPL-2.0
2
#include <linux/kernel.h>
3
#include <linux/init.h>
4
#include <linux/errno.h>
5
#include <linux/mm.h>
6
#include <linux/mman.h>
7
#include <linux/slab.h>
8
#include <linux/vmalloc.h>
9
#include <linux/io_uring.h>
10
#include <linux/io_uring_types.h>
11
#include <asm/shmparam.h>
12
13
#include "memmap.h"
14
#include "kbuf.h"
15
#include "rsrc.h"
16
#include "zcrx.h"
17
18
static void *io_mem_alloc_compound(struct page **pages, int nr_pages,
19
size_t size, gfp_t gfp)
20
{
21
struct page *page;
22
int i, order;
23
24
order = get_order(size);
25
if (order > MAX_PAGE_ORDER)
26
return ERR_PTR(-ENOMEM);
27
else if (order)
28
gfp |= __GFP_COMP;
29
30
page = alloc_pages(gfp, order);
31
if (!page)
32
return ERR_PTR(-ENOMEM);
33
34
for (i = 0; i < nr_pages; i++)
35
pages[i] = page + i;
36
37
return page_address(page);
38
}
39
40
struct page **io_pin_pages(unsigned long uaddr, unsigned long len, int *npages)
41
{
42
unsigned long start, end, nr_pages;
43
struct page **pages;
44
int ret;
45
46
if (check_add_overflow(uaddr, len, &end))
47
return ERR_PTR(-EOVERFLOW);
48
if (check_add_overflow(end, PAGE_SIZE - 1, &end))
49
return ERR_PTR(-EOVERFLOW);
50
51
end = end >> PAGE_SHIFT;
52
start = uaddr >> PAGE_SHIFT;
53
nr_pages = end - start;
54
if (WARN_ON_ONCE(!nr_pages))
55
return ERR_PTR(-EINVAL);
56
if (WARN_ON_ONCE(nr_pages > INT_MAX))
57
return ERR_PTR(-EOVERFLOW);
58
59
pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
60
if (!pages)
61
return ERR_PTR(-ENOMEM);
62
63
ret = pin_user_pages_fast(uaddr, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
64
pages);
65
/* success, mapped all pages */
66
if (ret == nr_pages) {
67
*npages = nr_pages;
68
return pages;
69
}
70
71
/* partial map, or didn't map anything */
72
if (ret >= 0) {
73
/* if we did partial map, release any pages we did get */
74
if (ret)
75
unpin_user_pages(pages, ret);
76
ret = -EFAULT;
77
}
78
kvfree(pages);
79
return ERR_PTR(ret);
80
}
81
82
enum {
83
/* memory was vmap'ed for the kernel, freeing the region vunmap's it */
84
IO_REGION_F_VMAP = 1,
85
/* memory is provided by user and pinned by the kernel */
86
IO_REGION_F_USER_PROVIDED = 2,
87
/* only the first page in the array is ref'ed */
88
IO_REGION_F_SINGLE_REF = 4,
89
};
90
91
void io_free_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr)
92
{
93
if (mr->pages) {
94
long nr_refs = mr->nr_pages;
95
96
if (mr->flags & IO_REGION_F_SINGLE_REF)
97
nr_refs = 1;
98
99
if (mr->flags & IO_REGION_F_USER_PROVIDED)
100
unpin_user_pages(mr->pages, nr_refs);
101
else
102
release_pages(mr->pages, nr_refs);
103
104
kvfree(mr->pages);
105
}
106
if ((mr->flags & IO_REGION_F_VMAP) && mr->ptr)
107
vunmap(mr->ptr);
108
if (mr->nr_pages && ctx->user)
109
__io_unaccount_mem(ctx->user, mr->nr_pages);
110
111
memset(mr, 0, sizeof(*mr));
112
}
113
114
static int io_region_init_ptr(struct io_mapped_region *mr)
115
{
116
struct io_imu_folio_data ifd;
117
void *ptr;
118
119
if (io_check_coalesce_buffer(mr->pages, mr->nr_pages, &ifd)) {
120
if (ifd.nr_folios == 1 && !PageHighMem(mr->pages[0])) {
121
mr->ptr = page_address(mr->pages[0]);
122
return 0;
123
}
124
}
125
ptr = vmap(mr->pages, mr->nr_pages, VM_MAP, PAGE_KERNEL);
126
if (!ptr)
127
return -ENOMEM;
128
129
mr->ptr = ptr;
130
mr->flags |= IO_REGION_F_VMAP;
131
return 0;
132
}
133
134
static int io_region_pin_pages(struct io_ring_ctx *ctx,
135
struct io_mapped_region *mr,
136
struct io_uring_region_desc *reg)
137
{
138
unsigned long size = mr->nr_pages << PAGE_SHIFT;
139
struct page **pages;
140
int nr_pages;
141
142
pages = io_pin_pages(reg->user_addr, size, &nr_pages);
143
if (IS_ERR(pages))
144
return PTR_ERR(pages);
145
if (WARN_ON_ONCE(nr_pages != mr->nr_pages))
146
return -EFAULT;
147
148
mr->pages = pages;
149
mr->flags |= IO_REGION_F_USER_PROVIDED;
150
return 0;
151
}
152
153
static int io_region_allocate_pages(struct io_ring_ctx *ctx,
154
struct io_mapped_region *mr,
155
struct io_uring_region_desc *reg,
156
unsigned long mmap_offset)
157
{
158
gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN;
159
size_t size = (size_t) mr->nr_pages << PAGE_SHIFT;
160
unsigned long nr_allocated;
161
struct page **pages;
162
void *p;
163
164
pages = kvmalloc_array(mr->nr_pages, sizeof(*pages), gfp);
165
if (!pages)
166
return -ENOMEM;
167
168
p = io_mem_alloc_compound(pages, mr->nr_pages, size, gfp);
169
if (!IS_ERR(p)) {
170
mr->flags |= IO_REGION_F_SINGLE_REF;
171
goto done;
172
}
173
174
nr_allocated = alloc_pages_bulk_node(gfp, NUMA_NO_NODE,
175
mr->nr_pages, pages);
176
if (nr_allocated != mr->nr_pages) {
177
if (nr_allocated)
178
release_pages(pages, nr_allocated);
179
kvfree(pages);
180
return -ENOMEM;
181
}
182
done:
183
reg->mmap_offset = mmap_offset;
184
mr->pages = pages;
185
return 0;
186
}
187
188
int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
189
struct io_uring_region_desc *reg,
190
unsigned long mmap_offset)
191
{
192
int nr_pages, ret;
193
u64 end;
194
195
if (WARN_ON_ONCE(mr->pages || mr->ptr || mr->nr_pages))
196
return -EFAULT;
197
if (memchr_inv(&reg->__resv, 0, sizeof(reg->__resv)))
198
return -EINVAL;
199
if (reg->flags & ~IORING_MEM_REGION_TYPE_USER)
200
return -EINVAL;
201
/* user_addr should be set IFF it's a user memory backed region */
202
if ((reg->flags & IORING_MEM_REGION_TYPE_USER) != !!reg->user_addr)
203
return -EFAULT;
204
if (!reg->size || reg->mmap_offset || reg->id)
205
return -EINVAL;
206
if ((reg->size >> PAGE_SHIFT) > INT_MAX)
207
return -E2BIG;
208
if ((reg->user_addr | reg->size) & ~PAGE_MASK)
209
return -EINVAL;
210
if (check_add_overflow(reg->user_addr, reg->size, &end))
211
return -EOVERFLOW;
212
213
nr_pages = reg->size >> PAGE_SHIFT;
214
if (ctx->user) {
215
ret = __io_account_mem(ctx->user, nr_pages);
216
if (ret)
217
return ret;
218
}
219
mr->nr_pages = nr_pages;
220
221
if (reg->flags & IORING_MEM_REGION_TYPE_USER)
222
ret = io_region_pin_pages(ctx, mr, reg);
223
else
224
ret = io_region_allocate_pages(ctx, mr, reg, mmap_offset);
225
if (ret)
226
goto out_free;
227
228
ret = io_region_init_ptr(mr);
229
if (ret)
230
goto out_free;
231
return 0;
232
out_free:
233
io_free_region(ctx, mr);
234
return ret;
235
}
236
237
int io_create_region_mmap_safe(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
238
struct io_uring_region_desc *reg,
239
unsigned long mmap_offset)
240
{
241
struct io_mapped_region tmp_mr;
242
int ret;
243
244
memcpy(&tmp_mr, mr, sizeof(tmp_mr));
245
ret = io_create_region(ctx, &tmp_mr, reg, mmap_offset);
246
if (ret)
247
return ret;
248
249
/*
250
* Once published mmap can find it without holding only the ->mmap_lock
251
* and not ->uring_lock.
252
*/
253
guard(mutex)(&ctx->mmap_lock);
254
memcpy(mr, &tmp_mr, sizeof(tmp_mr));
255
return 0;
256
}
257
258
static struct io_mapped_region *io_mmap_get_region(struct io_ring_ctx *ctx,
259
loff_t pgoff)
260
{
261
loff_t offset = pgoff << PAGE_SHIFT;
262
unsigned int id;
263
264
265
switch (offset & IORING_OFF_MMAP_MASK) {
266
case IORING_OFF_SQ_RING:
267
case IORING_OFF_CQ_RING:
268
return &ctx->ring_region;
269
case IORING_OFF_SQES:
270
return &ctx->sq_region;
271
case IORING_OFF_PBUF_RING:
272
id = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
273
return io_pbuf_get_region(ctx, id);
274
case IORING_MAP_OFF_PARAM_REGION:
275
return &ctx->param_region;
276
case IORING_MAP_OFF_ZCRX_REGION:
277
id = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_ZCRX_SHIFT;
278
return io_zcrx_get_region(ctx, id);
279
}
280
return NULL;
281
}
282
283
static void *io_region_validate_mmap(struct io_ring_ctx *ctx,
284
struct io_mapped_region *mr)
285
{
286
lockdep_assert_held(&ctx->mmap_lock);
287
288
if (!io_region_is_set(mr))
289
return ERR_PTR(-EINVAL);
290
if (mr->flags & IO_REGION_F_USER_PROVIDED)
291
return ERR_PTR(-EINVAL);
292
293
return io_region_get_ptr(mr);
294
}
295
296
static void *io_uring_validate_mmap_request(struct file *file, loff_t pgoff,
297
size_t sz)
298
{
299
struct io_ring_ctx *ctx = file->private_data;
300
struct io_mapped_region *region;
301
302
region = io_mmap_get_region(ctx, pgoff);
303
if (!region)
304
return ERR_PTR(-EINVAL);
305
return io_region_validate_mmap(ctx, region);
306
}
307
308
#ifdef CONFIG_MMU
309
310
static int io_region_mmap(struct io_ring_ctx *ctx,
311
struct io_mapped_region *mr,
312
struct vm_area_struct *vma,
313
unsigned max_pages)
314
{
315
unsigned long nr_pages = min(mr->nr_pages, max_pages);
316
317
vm_flags_set(vma, VM_DONTEXPAND);
318
return vm_insert_pages(vma, vma->vm_start, mr->pages, &nr_pages);
319
}
320
321
__cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
322
{
323
struct io_ring_ctx *ctx = file->private_data;
324
size_t sz = vma->vm_end - vma->vm_start;
325
long offset = vma->vm_pgoff << PAGE_SHIFT;
326
unsigned int page_limit = UINT_MAX;
327
struct io_mapped_region *region;
328
void *ptr;
329
330
guard(mutex)(&ctx->mmap_lock);
331
332
ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
333
if (IS_ERR(ptr))
334
return PTR_ERR(ptr);
335
336
switch (offset & IORING_OFF_MMAP_MASK) {
337
case IORING_OFF_SQ_RING:
338
case IORING_OFF_CQ_RING:
339
page_limit = (sz + PAGE_SIZE - 1) >> PAGE_SHIFT;
340
break;
341
}
342
343
region = io_mmap_get_region(ctx, vma->vm_pgoff);
344
return io_region_mmap(ctx, region, vma, page_limit);
345
}
346
347
unsigned long io_uring_get_unmapped_area(struct file *filp, unsigned long addr,
348
unsigned long len, unsigned long pgoff,
349
unsigned long flags)
350
{
351
struct io_ring_ctx *ctx = filp->private_data;
352
void *ptr;
353
354
/*
355
* Do not allow to map to user-provided address to avoid breaking the
356
* aliasing rules. Userspace is not able to guess the offset address of
357
* kernel kmalloc()ed memory area.
358
*/
359
if (addr)
360
return -EINVAL;
361
362
guard(mutex)(&ctx->mmap_lock);
363
364
ptr = io_uring_validate_mmap_request(filp, pgoff, len);
365
if (IS_ERR(ptr))
366
return -ENOMEM;
367
368
/*
369
* Some architectures have strong cache aliasing requirements.
370
* For such architectures we need a coherent mapping which aliases
371
* kernel memory *and* userspace memory. To achieve that:
372
* - use a NULL file pointer to reference physical memory, and
373
* - use the kernel virtual address of the shared io_uring context
374
* (instead of the userspace-provided address, which has to be 0UL
375
* anyway).
376
* - use the same pgoff which the get_unmapped_area() uses to
377
* calculate the page colouring.
378
* For architectures without such aliasing requirements, the
379
* architecture will return any suitable mapping because addr is 0.
380
*/
381
filp = NULL;
382
flags |= MAP_SHARED;
383
pgoff = 0; /* has been translated to ptr above */
384
#ifdef SHM_COLOUR
385
addr = (uintptr_t) ptr;
386
pgoff = addr >> PAGE_SHIFT;
387
#else
388
addr = 0UL;
389
#endif
390
return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags);
391
}
392
393
#else /* !CONFIG_MMU */
394
395
int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
396
{
397
return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -EINVAL;
398
}
399
400
unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
401
{
402
return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
403
}
404
405
unsigned long io_uring_get_unmapped_area(struct file *file, unsigned long addr,
406
unsigned long len, unsigned long pgoff,
407
unsigned long flags)
408
{
409
struct io_ring_ctx *ctx = file->private_data;
410
void *ptr;
411
412
guard(mutex)(&ctx->mmap_lock);
413
414
ptr = io_uring_validate_mmap_request(file, pgoff, len);
415
if (IS_ERR(ptr))
416
return PTR_ERR(ptr);
417
418
return (unsigned long) ptr;
419
}
420
421
#endif /* !CONFIG_MMU */
422
423