Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/compat/linux/linux_mmap.c
39507 views
1
/*-
2
* Copyright (c) 2004 Tim J. Robbins
3
* Copyright (c) 2002 Doug Rabson
4
* Copyright (c) 2000 Marcel Moolenaar
5
* Copyright (c) 1994-1995 Søren Schmidt
6
* All rights reserved.
7
*
8
* Redistribution and use in source and binary forms, with or without
9
* modification, are permitted provided that the following conditions
10
* are met:
11
* 1. Redistributions of source code must retain the above copyright
12
* notice, this list of conditions and the following disclaimer
13
* in this position and unchanged.
14
* 2. Redistributions in binary form must reproduce the above copyright
15
* notice, this list of conditions and the following disclaimer in the
16
* documentation and/or other materials provided with the distribution.
17
* 3. The name of the author may not be used to endorse or promote products
18
* derived from this software without specific prior written permission.
19
*
20
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
*/
31
32
#include <sys/fcntl.h>
33
#include <sys/file.h>
34
#include <sys/ktr.h>
35
#include <sys/lock.h>
36
#include <sys/malloc.h>
37
#include <sys/mman.h>
38
#include <sys/proc.h>
39
#include <sys/resourcevar.h>
40
#include <sys/rwlock.h>
41
#include <sys/syscallsubr.h>
42
#include <sys/sysent.h>
43
#include <sys/sysproto.h>
44
45
#include <vm/pmap.h>
46
#include <vm/vm_extern.h>
47
#include <vm/vm_map.h>
48
#include <vm/vm_object.h>
49
50
#include <compat/linux/linux_emul.h>
51
#include <compat/linux/linux_mmap.h>
52
#include <compat/linux/linux_persona.h>
53
#include <compat/linux/linux_util.h>
54
55
#define STACK_SIZE (2 * 1024 * 1024)
56
#define GUARD_SIZE (4 * PAGE_SIZE)
57
58
#if defined(__amd64__)
59
static void linux_fixup_prot(struct thread *td, int *prot);
60
#endif
61
62
static int
63
linux_mmap_check_fp(struct file *fp, int flags, int prot, int maxprot)
64
{
65
66
/* Linux mmap() just fails for O_WRONLY files */
67
if ((fp->f_flag & FREAD) == 0)
68
return (EACCES);
69
70
return (0);
71
}
72
73
int
74
linux_mmap_common(struct thread *td, uintptr_t addr, size_t len, int prot,
75
int flags, int fd, off_t pos)
76
{
77
struct mmap_req mr, mr_fixed;
78
struct proc *p = td->td_proc;
79
struct vmspace *vms = td->td_proc->p_vmspace;
80
int bsd_flags, error;
81
82
LINUX_CTR6(mmap2, "0x%lx, %ld, %ld, 0x%08lx, %ld, 0x%lx",
83
addr, len, prot, flags, fd, pos);
84
85
error = 0;
86
bsd_flags = 0;
87
88
/*
89
* Linux mmap(2):
90
* You must specify exactly one of MAP_SHARED and MAP_PRIVATE
91
*/
92
if (!((flags & LINUX_MAP_SHARED) ^ (flags & LINUX_MAP_PRIVATE)))
93
return (EINVAL);
94
95
if (flags & LINUX_MAP_SHARED)
96
bsd_flags |= MAP_SHARED;
97
if (flags & LINUX_MAP_PRIVATE)
98
bsd_flags |= MAP_PRIVATE;
99
if (flags & LINUX_MAP_FIXED)
100
bsd_flags |= MAP_FIXED;
101
if (flags & LINUX_MAP_ANON) {
102
/* Enforce pos to be on page boundary, then ignore. */
103
if ((pos & PAGE_MASK) != 0)
104
return (EINVAL);
105
pos = 0;
106
bsd_flags |= MAP_ANON;
107
} else
108
bsd_flags |= MAP_NOSYNC;
109
if (flags & LINUX_MAP_GROWSDOWN)
110
bsd_flags |= MAP_STACK;
111
112
#if defined(__amd64__)
113
/*
114
* According to the Linux mmap(2) man page, "MAP_32BIT flag
115
* is ignored when MAP_FIXED is set."
116
*/
117
if ((flags & LINUX_MAP_32BIT) && (flags & LINUX_MAP_FIXED) == 0)
118
bsd_flags |= MAP_32BIT;
119
120
/*
121
* PROT_READ, PROT_WRITE, or PROT_EXEC implies PROT_READ and PROT_EXEC
122
* on Linux/i386 if the binary requires executable stack.
123
* We do this only for IA32 emulation as on native i386 this is does not
124
* make sense without PAE.
125
*
126
* XXX. Linux checks that the file system is not mounted with noexec.
127
*/
128
linux_fixup_prot(td, &prot);
129
#endif
130
131
/* Linux does not check file descriptor when MAP_ANONYMOUS is set. */
132
fd = (bsd_flags & MAP_ANON) ? -1 : fd;
133
if (flags & LINUX_MAP_GROWSDOWN) {
134
/*
135
* The Linux MAP_GROWSDOWN option does not limit auto
136
* growth of the region. Linux mmap with this option
137
* takes as addr the initial BOS, and as len, the initial
138
* region size. It can then grow down from addr without
139
* limit. However, Linux threads has an implicit internal
140
* limit to stack size of STACK_SIZE. Its just not
141
* enforced explicitly in Linux. But, here we impose
142
* a limit of (STACK_SIZE - GUARD_SIZE) on the stack
143
* region, since we can do this with our mmap.
144
*
145
* Our mmap with MAP_STACK takes addr as the maximum
146
* downsize limit on BOS, and as len the max size of
147
* the region. It then maps the top SGROWSIZ bytes,
148
* and auto grows the region down, up to the limit
149
* in addr.
150
*
151
* If we don't use the MAP_STACK option, the effect
152
* of this code is to allocate a stack region of a
153
* fixed size of (STACK_SIZE - GUARD_SIZE).
154
*/
155
156
if ((caddr_t)addr + len > vms->vm_maxsaddr) {
157
/*
158
* Some Linux apps will attempt to mmap
159
* thread stacks near the top of their
160
* address space. If their TOS is greater
161
* than vm_maxsaddr, vm_map_growstack()
162
* will confuse the thread stack with the
163
* process stack and deliver a SEGV if they
164
* attempt to grow the thread stack past their
165
* current stacksize rlimit. To avoid this,
166
* adjust vm_maxsaddr upwards to reflect
167
* the current stacksize rlimit rather
168
* than the maximum possible stacksize.
169
* It would be better to adjust the
170
* mmap'ed region, but some apps do not check
171
* mmap's return value.
172
*/
173
PROC_LOCK(p);
174
vms->vm_maxsaddr = (char *)round_page(vms->vm_stacktop) -
175
lim_cur_proc(p, RLIMIT_STACK);
176
PROC_UNLOCK(p);
177
}
178
179
/*
180
* This gives us our maximum stack size and a new BOS.
181
* If we're using VM_STACK, then mmap will just map
182
* the top SGROWSIZ bytes, and let the stack grow down
183
* to the limit at BOS. If we're not using VM_STACK
184
* we map the full stack, since we don't have a way
185
* to autogrow it.
186
*/
187
if (len <= STACK_SIZE - GUARD_SIZE) {
188
addr = addr - (STACK_SIZE - GUARD_SIZE - len);
189
len = STACK_SIZE - GUARD_SIZE;
190
}
191
}
192
193
/*
194
* FreeBSD is free to ignore the address hint if MAP_FIXED wasn't
195
* passed. However, some Linux applications, like the ART runtime,
196
* depend on the hint. If the MAP_FIXED wasn't passed, but the
197
* address is not zero, try with MAP_FIXED and MAP_EXCL first,
198
* and fall back to the normal behaviour if that fails.
199
*/
200
mr = (struct mmap_req) {
201
.mr_hint = addr,
202
.mr_len = len,
203
.mr_prot = prot,
204
.mr_flags = bsd_flags,
205
.mr_fd = fd,
206
.mr_pos = pos,
207
.mr_check_fp_fn = linux_mmap_check_fp,
208
};
209
if (addr != 0 && (bsd_flags & MAP_FIXED) == 0 &&
210
(bsd_flags & MAP_EXCL) == 0) {
211
mr_fixed = mr;
212
mr_fixed.mr_flags |= MAP_FIXED | MAP_EXCL;
213
error = kern_mmap(td, &mr_fixed);
214
if (error == 0)
215
goto out;
216
}
217
218
error = kern_mmap(td, &mr);
219
out:
220
LINUX_CTR2(mmap2, "return: %d (%p)", error, td->td_retval[0]);
221
222
return (error);
223
}
224
225
int
226
linux_mprotect_common(struct thread *td, uintptr_t addr, size_t len, int prot)
227
{
228
int flags = 0;
229
230
/* XXX Ignore PROT_GROWSUP for now. */
231
prot &= ~LINUX_PROT_GROWSUP;
232
if ((prot & ~(LINUX_PROT_GROWSDOWN | PROT_READ | PROT_WRITE |
233
PROT_EXEC)) != 0)
234
return (EINVAL);
235
if ((prot & LINUX_PROT_GROWSDOWN) != 0) {
236
prot &= ~LINUX_PROT_GROWSDOWN;
237
flags |= VM_MAP_PROTECT_GROWSDOWN;
238
}
239
240
#if defined(__amd64__)
241
linux_fixup_prot(td, &prot);
242
#endif
243
return (kern_mprotect(td, addr, len, prot, flags));
244
}
245
246
/*
247
* Implement Linux madvise(MADV_DONTNEED), which has unusual semantics: for
248
* anonymous memory, pages in the range are immediately discarded.
249
*/
250
static int
251
linux_madvise_dontneed(struct thread *td, vm_offset_t start, vm_offset_t end)
252
{
253
vm_map_t map;
254
vm_map_entry_t entry;
255
vm_object_t backing_object, object;
256
vm_offset_t estart, eend;
257
vm_pindex_t pstart, pend;
258
int error;
259
260
map = &td->td_proc->p_vmspace->vm_map;
261
262
if (!vm_map_range_valid(map, start, end))
263
return (EINVAL);
264
start = trunc_page(start);
265
end = round_page(end);
266
267
error = 0;
268
vm_map_lock_read(map);
269
if (!vm_map_lookup_entry(map, start, &entry))
270
entry = vm_map_entry_succ(entry);
271
for (; entry->start < end; entry = vm_map_entry_succ(entry)) {
272
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
273
continue;
274
275
if (entry->wired_count != 0) {
276
error = EINVAL;
277
break;
278
}
279
280
object = entry->object.vm_object;
281
if (object == NULL)
282
continue;
283
if ((object->flags & (OBJ_UNMANAGED | OBJ_FICTITIOUS)) != 0)
284
continue;
285
286
pstart = OFF_TO_IDX(entry->offset);
287
if (start > entry->start) {
288
pstart += atop(start - entry->start);
289
estart = start;
290
} else {
291
estart = entry->start;
292
}
293
pend = OFF_TO_IDX(entry->offset) +
294
atop(entry->end - entry->start);
295
if (entry->end > end) {
296
pend -= atop(entry->end - end);
297
eend = end;
298
} else {
299
eend = entry->end;
300
}
301
302
if ((object->flags & (OBJ_ANON | OBJ_ONEMAPPING)) ==
303
(OBJ_ANON | OBJ_ONEMAPPING)) {
304
/*
305
* Singly-mapped anonymous memory is discarded. This
306
* does not match Linux's semantics when the object
307
* belongs to a shadow chain of length > 1, since
308
* subsequent faults may retrieve pages from an
309
* intermediate anonymous object. However, handling
310
* this case correctly introduces a fair bit of
311
* complexity.
312
*/
313
VM_OBJECT_WLOCK(object);
314
if ((object->flags & OBJ_ONEMAPPING) != 0) {
315
vm_object_collapse(object);
316
vm_object_page_remove(object, pstart, pend, 0);
317
backing_object = object->backing_object;
318
if (backing_object != NULL &&
319
(backing_object->flags & OBJ_ANON) != 0)
320
linux_msg(td,
321
"possibly incorrect MADV_DONTNEED");
322
VM_OBJECT_WUNLOCK(object);
323
continue;
324
}
325
VM_OBJECT_WUNLOCK(object);
326
}
327
328
/*
329
* Handle shared mappings. Remove them outright instead of
330
* calling pmap_advise(), for consistency with Linux.
331
*/
332
pmap_remove(map->pmap, estart, eend);
333
vm_object_madvise(object, pstart, pend, MADV_DONTNEED);
334
}
335
vm_map_unlock_read(map);
336
337
return (error);
338
}
339
340
int
341
linux_madvise_common(struct thread *td, uintptr_t addr, size_t len, int behav)
342
{
343
344
switch (behav) {
345
case LINUX_MADV_NORMAL:
346
return (kern_madvise(td, addr, len, MADV_NORMAL));
347
case LINUX_MADV_RANDOM:
348
return (kern_madvise(td, addr, len, MADV_RANDOM));
349
case LINUX_MADV_SEQUENTIAL:
350
return (kern_madvise(td, addr, len, MADV_SEQUENTIAL));
351
case LINUX_MADV_WILLNEED:
352
return (kern_madvise(td, addr, len, MADV_WILLNEED));
353
case LINUX_MADV_DONTNEED:
354
return (linux_madvise_dontneed(td, addr, addr + len));
355
case LINUX_MADV_FREE:
356
return (kern_madvise(td, addr, len, MADV_FREE));
357
case LINUX_MADV_REMOVE:
358
linux_msg(curthread, "unsupported madvise MADV_REMOVE");
359
return (EINVAL);
360
case LINUX_MADV_DONTFORK:
361
return (kern_minherit(td, addr, len, INHERIT_NONE));
362
case LINUX_MADV_DOFORK:
363
return (kern_minherit(td, addr, len, INHERIT_COPY));
364
case LINUX_MADV_MERGEABLE:
365
linux_msg(curthread, "unsupported madvise MADV_MERGEABLE");
366
return (EINVAL);
367
case LINUX_MADV_UNMERGEABLE:
368
/* We don't merge anyway. */
369
return (0);
370
case LINUX_MADV_HUGEPAGE:
371
/* Ignored; on FreeBSD huge pages are always on. */
372
return (0);
373
case LINUX_MADV_NOHUGEPAGE:
374
#if 0
375
/*
376
* Don't warn - Firefox uses it a lot, and in real Linux it's
377
* an optional feature.
378
*/
379
linux_msg(curthread, "unsupported madvise MADV_NOHUGEPAGE");
380
#endif
381
return (EINVAL);
382
case LINUX_MADV_DONTDUMP:
383
return (kern_madvise(td, addr, len, MADV_NOCORE));
384
case LINUX_MADV_DODUMP:
385
return (kern_madvise(td, addr, len, MADV_CORE));
386
case LINUX_MADV_WIPEONFORK:
387
return (kern_minherit(td, addr, len, INHERIT_ZERO));
388
case LINUX_MADV_KEEPONFORK:
389
return (kern_minherit(td, addr, len, INHERIT_COPY));
390
case LINUX_MADV_HWPOISON:
391
linux_msg(curthread, "unsupported madvise MADV_HWPOISON");
392
return (EINVAL);
393
case LINUX_MADV_SOFT_OFFLINE:
394
linux_msg(curthread, "unsupported madvise MADV_SOFT_OFFLINE");
395
return (EINVAL);
396
case -1:
397
/*
398
* -1 is sometimes used as a dummy value to detect simplistic
399
* madvise(2) stub implementations. This safeguard is used by
400
* BoringSSL, for example, before assuming MADV_WIPEONFORK is
401
* safe to use. Don't produce an "unsupported" error message
402
* for this special dummy value, which is unlikely to be used
403
* by any new advisory behavior feature.
404
*/
405
return (EINVAL);
406
default:
407
linux_msg(curthread, "unsupported madvise behav %d", behav);
408
return (EINVAL);
409
}
410
}
411
412
#if defined(__amd64__)
413
static void
414
linux_fixup_prot(struct thread *td, int *prot)
415
{
416
struct linux_pemuldata *pem;
417
418
if (SV_PROC_FLAG(td->td_proc, SV_ILP32) && *prot & PROT_READ) {
419
pem = pem_find(td->td_proc);
420
if (pem->persona & LINUX_READ_IMPLIES_EXEC)
421
*prot |= PROT_EXEC;
422
}
423
424
}
425
#endif
426
427