Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/kern/kern_exec.c
39475 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 1993, David Greenman
5
* All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
*
16
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26
* SUCH DAMAGE.
27
*/
28
29
#include <sys/cdefs.h>
30
#include "opt_capsicum.h"
31
#include "opt_hwpmc_hooks.h"
32
#include "opt_hwt_hooks.h"
33
#include "opt_ktrace.h"
34
#include "opt_vm.h"
35
36
#include <sys/param.h>
37
#include <sys/systm.h>
38
#include <sys/acct.h>
39
#include <sys/asan.h>
40
#include <sys/capsicum.h>
41
#include <sys/compressor.h>
42
#include <sys/eventhandler.h>
43
#include <sys/exec.h>
44
#include <sys/fcntl.h>
45
#include <sys/filedesc.h>
46
#include <sys/imgact.h>
47
#include <sys/imgact_elf.h>
48
#include <sys/kernel.h>
49
#include <sys/lock.h>
50
#include <sys/malloc.h>
51
#include <sys/mman.h>
52
#include <sys/mount.h>
53
#include <sys/mutex.h>
54
#include <sys/namei.h>
55
#include <sys/priv.h>
56
#include <sys/proc.h>
57
#include <sys/ptrace.h>
58
#include <sys/reg.h>
59
#include <sys/resourcevar.h>
60
#include <sys/rwlock.h>
61
#include <sys/sched.h>
62
#include <sys/sdt.h>
63
#include <sys/sf_buf.h>
64
#include <sys/shm.h>
65
#include <sys/signalvar.h>
66
#include <sys/smp.h>
67
#include <sys/stat.h>
68
#include <sys/syscallsubr.h>
69
#include <sys/sysctl.h>
70
#include <sys/sysent.h>
71
#include <sys/sysproto.h>
72
#include <sys/timers.h>
73
#include <sys/ucoredump.h>
74
#include <sys/umtxvar.h>
75
#include <sys/vnode.h>
76
#include <sys/wait.h>
77
#ifdef KTRACE
78
#include <sys/ktrace.h>
79
#endif
80
81
#include <vm/vm.h>
82
#include <vm/vm_param.h>
83
#include <vm/pmap.h>
84
#include <vm/vm_page.h>
85
#include <vm/vm_map.h>
86
#include <vm/vm_kern.h>
87
#include <vm/vm_extern.h>
88
#include <vm/vm_object.h>
89
#include <vm/vm_pager.h>
90
91
#ifdef HWPMC_HOOKS
92
#include <sys/pmckern.h>
93
#endif
94
95
#ifdef HWT_HOOKS
96
#include <dev/hwt/hwt_hook.h>
97
#endif
98
99
#include <security/audit/audit.h>
100
#include <security/mac/mac_framework.h>
101
102
#ifdef KDTRACE_HOOKS
103
#include <sys/dtrace_bsd.h>
104
dtrace_execexit_func_t dtrace_fasttrap_exec;
105
#endif
106
107
SDT_PROVIDER_DECLARE(proc);
108
SDT_PROBE_DEFINE1(proc, , , exec, "char *");
109
SDT_PROBE_DEFINE1(proc, , , exec__failure, "int");
110
SDT_PROBE_DEFINE1(proc, , , exec__success, "char *");
111
112
MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments");
113
114
int coredump_pack_fileinfo = 1;
115
SYSCTL_INT(_kern, OID_AUTO, coredump_pack_fileinfo, CTLFLAG_RWTUN,
116
&coredump_pack_fileinfo, 0,
117
"Enable file path packing in 'procstat -f' coredump notes");
118
119
int coredump_pack_vmmapinfo = 1;
120
SYSCTL_INT(_kern, OID_AUTO, coredump_pack_vmmapinfo, CTLFLAG_RWTUN,
121
&coredump_pack_vmmapinfo, 0,
122
"Enable file path packing in 'procstat -v' coredump notes");
123
124
static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS);
125
static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS);
126
static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS);
127
static int do_execve(struct thread *td, struct image_args *args,
128
struct mac *mac_p, struct vmspace *oldvmspace);
129
130
/* XXX This should be vm_size_t. */
131
SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD|
132
CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_ps_strings, "LU",
133
"Location of process' ps_strings structure");
134
135
/* XXX This should be vm_size_t. */
136
SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD|
137
CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_usrstack, "LU",
138
"Top of process stack");
139
140
SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_MPSAFE,
141
NULL, 0, sysctl_kern_stackprot, "I",
142
"Stack memory permissions");
143
144
u_long ps_arg_cache_limit = PAGE_SIZE / 16;
145
SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW,
146
&ps_arg_cache_limit, 0,
147
"Process' command line characters cache limit");
148
149
static int disallow_high_osrel;
150
SYSCTL_INT(_kern, OID_AUTO, disallow_high_osrel, CTLFLAG_RW,
151
&disallow_high_osrel, 0,
152
"Disallow execution of binaries built for higher version of the world");
153
154
static int map_at_zero = 0;
155
SYSCTL_INT(_security_bsd, OID_AUTO, map_at_zero, CTLFLAG_RWTUN, &map_at_zero, 0,
156
"Permit processes to map an object at virtual address 0.");
157
158
static int core_dump_can_intr = 1;
159
SYSCTL_INT(_kern, OID_AUTO, core_dump_can_intr, CTLFLAG_RWTUN,
160
&core_dump_can_intr, 0,
161
"Core dumping interruptible with SIGKILL");
162
163
static int
164
sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS)
165
{
166
struct proc *p;
167
vm_offset_t ps_strings;
168
169
p = curproc;
170
#ifdef SCTL_MASK32
171
if (req->flags & SCTL_MASK32) {
172
unsigned int val;
173
val = (unsigned int)PROC_PS_STRINGS(p);
174
return (SYSCTL_OUT(req, &val, sizeof(val)));
175
}
176
#endif
177
ps_strings = PROC_PS_STRINGS(p);
178
return (SYSCTL_OUT(req, &ps_strings, sizeof(ps_strings)));
179
}
180
181
static int
182
sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS)
183
{
184
struct proc *p;
185
vm_offset_t val;
186
187
p = curproc;
188
#ifdef SCTL_MASK32
189
if (req->flags & SCTL_MASK32) {
190
unsigned int val32;
191
192
val32 = round_page((unsigned int)p->p_vmspace->vm_stacktop);
193
return (SYSCTL_OUT(req, &val32, sizeof(val32)));
194
}
195
#endif
196
val = round_page(p->p_vmspace->vm_stacktop);
197
return (SYSCTL_OUT(req, &val, sizeof(val)));
198
}
199
200
static int
201
sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS)
202
{
203
struct proc *p;
204
205
p = curproc;
206
return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot,
207
sizeof(p->p_sysent->sv_stackprot)));
208
}
209
210
/*
211
* Each of the items is a pointer to a `const struct execsw', hence the
212
* double pointer here.
213
*/
214
static const struct execsw **execsw;
215
216
#ifndef _SYS_SYSPROTO_H_
217
struct execve_args {
218
char *fname;
219
char **argv;
220
char **envv;
221
};
222
#endif
223
224
int
225
sys_execve(struct thread *td, struct execve_args *uap)
226
{
227
struct image_args args;
228
struct vmspace *oldvmspace;
229
int error;
230
231
error = pre_execve(td, &oldvmspace);
232
if (error != 0)
233
return (error);
234
error = exec_copyin_args(&args, uap->fname, uap->argv, uap->envv);
235
if (error == 0)
236
error = kern_execve(td, &args, NULL, oldvmspace);
237
post_execve(td, error, oldvmspace);
238
AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
239
return (error);
240
}
241
242
#ifndef _SYS_SYSPROTO_H_
243
struct fexecve_args {
244
int fd;
245
char **argv;
246
char **envv;
247
};
248
#endif
249
int
250
sys_fexecve(struct thread *td, struct fexecve_args *uap)
251
{
252
struct image_args args;
253
struct vmspace *oldvmspace;
254
int error;
255
256
error = pre_execve(td, &oldvmspace);
257
if (error != 0)
258
return (error);
259
error = exec_copyin_args(&args, NULL, uap->argv, uap->envv);
260
if (error == 0) {
261
args.fd = uap->fd;
262
error = kern_execve(td, &args, NULL, oldvmspace);
263
}
264
post_execve(td, error, oldvmspace);
265
AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
266
return (error);
267
}
268
269
#ifndef _SYS_SYSPROTO_H_
270
struct __mac_execve_args {
271
char *fname;
272
char **argv;
273
char **envv;
274
struct mac *mac_p;
275
};
276
#endif
277
278
int
279
sys___mac_execve(struct thread *td, struct __mac_execve_args *uap)
280
{
281
#ifdef MAC
282
struct image_args args;
283
struct vmspace *oldvmspace;
284
int error;
285
286
error = pre_execve(td, &oldvmspace);
287
if (error != 0)
288
return (error);
289
error = exec_copyin_args(&args, uap->fname, uap->argv, uap->envv);
290
if (error == 0)
291
error = kern_execve(td, &args, uap->mac_p, oldvmspace);
292
post_execve(td, error, oldvmspace);
293
AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
294
return (error);
295
#else
296
return (ENOSYS);
297
#endif
298
}
299
300
int
301
pre_execve(struct thread *td, struct vmspace **oldvmspace)
302
{
303
struct proc *p;
304
int error;
305
306
KASSERT(td == curthread, ("non-current thread %p", td));
307
error = 0;
308
p = td->td_proc;
309
if ((p->p_flag & P_HADTHREADS) != 0) {
310
PROC_LOCK(p);
311
if (thread_single(p, SINGLE_BOUNDARY) != 0)
312
error = ERESTART;
313
PROC_UNLOCK(p);
314
}
315
KASSERT(error != 0 || (td->td_pflags & TDP_EXECVMSPC) == 0,
316
("nested execve"));
317
*oldvmspace = p->p_vmspace;
318
return (error);
319
}
320
321
void
322
post_execve(struct thread *td, int error, struct vmspace *oldvmspace)
323
{
324
struct proc *p;
325
326
KASSERT(td == curthread, ("non-current thread %p", td));
327
p = td->td_proc;
328
if ((p->p_flag & P_HADTHREADS) != 0) {
329
PROC_LOCK(p);
330
/*
331
* If success, we upgrade to SINGLE_EXIT state to
332
* force other threads to suicide.
333
*/
334
if (error == EJUSTRETURN)
335
thread_single(p, SINGLE_EXIT);
336
else
337
thread_single_end(p, SINGLE_BOUNDARY);
338
PROC_UNLOCK(p);
339
}
340
exec_cleanup(td, oldvmspace);
341
}
342
343
/*
344
* kern_execve() has the astonishing property of not always returning to
345
* the caller. If sufficiently bad things happen during the call to
346
* do_execve(), it can end up calling exit1(); as a result, callers must
347
* avoid doing anything which they might need to undo (e.g., allocating
348
* memory).
349
*/
350
int
351
kern_execve(struct thread *td, struct image_args *args, struct mac *mac_p,
352
struct vmspace *oldvmspace)
353
{
354
355
TSEXEC(td->td_proc->p_pid, args->begin_argv);
356
AUDIT_ARG_ARGV(args->begin_argv, args->argc,
357
exec_args_get_begin_envv(args) - args->begin_argv);
358
AUDIT_ARG_ENVV(exec_args_get_begin_envv(args), args->envc,
359
args->endp - exec_args_get_begin_envv(args));
360
#ifdef KTRACE
361
if (KTRPOINT(td, KTR_ARGS)) {
362
ktrdata(KTR_ARGS, args->begin_argv,
363
exec_args_get_begin_envv(args) - args->begin_argv);
364
}
365
if (KTRPOINT(td, KTR_ENVS)) {
366
ktrdata(KTR_ENVS, exec_args_get_begin_envv(args),
367
args->endp - exec_args_get_begin_envv(args));
368
}
369
#endif
370
/* Must have at least one argument. */
371
if (args->argc == 0) {
372
exec_free_args(args);
373
return (EINVAL);
374
}
375
return (do_execve(td, args, mac_p, oldvmspace));
376
}
377
378
static void
379
execve_nosetid(struct image_params *imgp)
380
{
381
imgp->credential_setid = false;
382
if (imgp->newcred != NULL) {
383
crfree(imgp->newcred);
384
imgp->newcred = NULL;
385
}
386
}
387
388
/*
389
* In-kernel implementation of execve(). All arguments are assumed to be
390
* userspace pointers from the passed thread.
391
*/
392
static int
393
do_execve(struct thread *td, struct image_args *args, struct mac *mac_p,
394
struct vmspace *oldvmspace)
395
{
396
struct proc *p = td->td_proc;
397
struct nameidata nd;
398
struct ucred *oldcred;
399
struct uidinfo *euip = NULL;
400
uintptr_t stack_base;
401
struct image_params image_params, *imgp;
402
struct vattr attr;
403
struct pargs *oldargs = NULL, *newargs = NULL;
404
struct sigacts *oldsigacts = NULL, *newsigacts = NULL;
405
#ifdef KTRACE
406
struct ktr_io_params *kiop;
407
#endif
408
struct vnode *oldtextvp, *newtextvp;
409
struct vnode *oldtextdvp, *newtextdvp;
410
char *oldbinname, *newbinname;
411
bool credential_changing;
412
#ifdef MAC
413
struct label *interpvplabel = NULL;
414
bool will_transition;
415
#endif
416
#ifdef HWPMC_HOOKS
417
struct pmckern_procexec pe;
418
#endif
419
int error, i, orig_osrel;
420
uint32_t orig_fctl0;
421
Elf_Brandinfo *orig_brandinfo;
422
size_t freepath_size;
423
static const char fexecv_proc_title[] = "(fexecv)";
424
425
imgp = &image_params;
426
oldtextvp = oldtextdvp = NULL;
427
newtextvp = newtextdvp = NULL;
428
newbinname = oldbinname = NULL;
429
#ifdef KTRACE
430
kiop = NULL;
431
#endif
432
433
/*
434
* Lock the process and set the P_INEXEC flag to indicate that
435
* it should be left alone until we're done here. This is
436
* necessary to avoid race conditions - e.g. in ptrace() -
437
* that might allow a local user to illicitly obtain elevated
438
* privileges.
439
*/
440
PROC_LOCK(p);
441
KASSERT((p->p_flag & P_INEXEC) == 0,
442
("%s(): process already has P_INEXEC flag", __func__));
443
p->p_flag |= P_INEXEC;
444
PROC_UNLOCK(p);
445
446
/*
447
* Initialize part of the common data
448
*/
449
bzero(imgp, sizeof(*imgp));
450
imgp->proc = p;
451
imgp->attr = &attr;
452
imgp->args = args;
453
oldcred = p->p_ucred;
454
orig_osrel = p->p_osrel;
455
orig_fctl0 = p->p_fctl0;
456
orig_brandinfo = p->p_elf_brandinfo;
457
458
#ifdef MAC
459
error = mac_execve_enter(imgp, mac_p);
460
if (error)
461
goto exec_fail;
462
#endif
463
464
SDT_PROBE1(proc, , , exec, args->fname);
465
466
interpret:
467
if (args->fname != NULL) {
468
#ifdef CAPABILITY_MODE
469
if (CAP_TRACING(td))
470
ktrcapfail(CAPFAIL_NAMEI, args->fname);
471
/*
472
* While capability mode can't reach this point via direct
473
* path arguments to execve(), we also don't allow
474
* interpreters to be used in capability mode (for now).
475
* Catch indirect lookups and return a permissions error.
476
*/
477
if (IN_CAPABILITY_MODE(td)) {
478
error = ECAPMODE;
479
goto exec_fail;
480
}
481
#endif
482
483
/*
484
* Translate the file name. namei() returns a vnode
485
* pointer in ni_vp among other things.
486
*/
487
NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | LOCKSHARED | FOLLOW |
488
AUDITVNODE1 | WANTPARENT, UIO_SYSSPACE,
489
args->fname);
490
491
error = namei(&nd);
492
if (error)
493
goto exec_fail;
494
495
newtextvp = nd.ni_vp;
496
newtextdvp = nd.ni_dvp;
497
nd.ni_dvp = NULL;
498
newbinname = malloc(nd.ni_cnd.cn_namelen + 1, M_PARGS,
499
M_WAITOK);
500
memcpy(newbinname, nd.ni_cnd.cn_nameptr, nd.ni_cnd.cn_namelen);
501
newbinname[nd.ni_cnd.cn_namelen] = '\0';
502
imgp->vp = newtextvp;
503
504
/*
505
* Do the best to calculate the full path to the image file.
506
*/
507
if (args->fname[0] == '/') {
508
imgp->execpath = args->fname;
509
} else {
510
VOP_UNLOCK(imgp->vp);
511
freepath_size = MAXPATHLEN;
512
if (vn_fullpath_hardlink(newtextvp, newtextdvp,
513
newbinname, nd.ni_cnd.cn_namelen, &imgp->execpath,
514
&imgp->freepath, &freepath_size) != 0)
515
imgp->execpath = args->fname;
516
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
517
}
518
} else if (imgp->interpreter_vp) {
519
/*
520
* An image activator has already provided an open vnode
521
*/
522
newtextvp = imgp->interpreter_vp;
523
imgp->interpreter_vp = NULL;
524
if (vn_fullpath(newtextvp, &imgp->execpath,
525
&imgp->freepath) != 0)
526
imgp->execpath = args->fname;
527
vn_lock(newtextvp, LK_SHARED | LK_RETRY);
528
AUDIT_ARG_VNODE1(newtextvp);
529
imgp->vp = newtextvp;
530
} else {
531
AUDIT_ARG_FD(args->fd);
532
533
/*
534
* If the descriptors was not opened with O_PATH, then
535
* we require that it was opened with O_EXEC or
536
* O_RDONLY. In either case, exec_check_permissions()
537
* below checks _current_ file access mode regardless
538
* of the permissions additionally checked at the
539
* open(2).
540
*/
541
error = fgetvp_exec(td, args->fd, &cap_fexecve_rights,
542
&newtextvp);
543
if (error != 0)
544
goto exec_fail;
545
546
if (vn_fullpath(newtextvp, &imgp->execpath,
547
&imgp->freepath) != 0)
548
imgp->execpath = args->fname;
549
vn_lock(newtextvp, LK_SHARED | LK_RETRY);
550
AUDIT_ARG_VNODE1(newtextvp);
551
imgp->vp = newtextvp;
552
}
553
554
/*
555
* Check file permissions. Also 'opens' file and sets its vnode to
556
* text mode.
557
*/
558
error = exec_check_permissions(imgp);
559
if (error)
560
goto exec_fail_dealloc;
561
562
imgp->object = imgp->vp->v_object;
563
if (imgp->object != NULL)
564
vm_object_reference(imgp->object);
565
566
error = exec_map_first_page(imgp);
567
if (error)
568
goto exec_fail_dealloc;
569
570
imgp->proc->p_osrel = 0;
571
imgp->proc->p_fctl0 = 0;
572
imgp->proc->p_elf_brandinfo = NULL;
573
574
/*
575
* Implement image setuid/setgid.
576
*
577
* Determine new credentials before attempting image activators
578
* so that it can be used by process_exec handlers to determine
579
* credential/setid changes.
580
*
581
* Don't honor setuid/setgid if the filesystem prohibits it or if
582
* the process is being traced.
583
*
584
* We disable setuid/setgid/etc in capability mode on the basis
585
* that most setugid applications are not written with that
586
* environment in mind, and will therefore almost certainly operate
587
* incorrectly. In principle there's no reason that setugid
588
* applications might not be useful in capability mode, so we may want
589
* to reconsider this conservative design choice in the future.
590
*
591
* XXXMAC: For the time being, use NOSUID to also prohibit
592
* transitions on the file system.
593
*/
594
credential_changing = false;
595
credential_changing |= (attr.va_mode & S_ISUID) &&
596
oldcred->cr_uid != attr.va_uid;
597
credential_changing |= (attr.va_mode & S_ISGID) &&
598
oldcred->cr_gid != attr.va_gid;
599
#ifdef MAC
600
will_transition = mac_vnode_execve_will_transition(oldcred, imgp->vp,
601
interpvplabel, imgp) != 0;
602
credential_changing |= will_transition;
603
#endif
604
605
/* Don't inherit PROC_PDEATHSIG_CTL value if setuid/setgid. */
606
if (credential_changing)
607
imgp->proc->p_pdeathsig = 0;
608
609
if (credential_changing &&
610
#ifdef CAPABILITY_MODE
611
((oldcred->cr_flags & CRED_FLAG_CAPMODE) == 0) &&
612
#endif
613
(imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 &&
614
(p->p_flag & P_TRACED) == 0) {
615
imgp->credential_setid = true;
616
VOP_UNLOCK(imgp->vp);
617
imgp->newcred = crdup(oldcred);
618
if (attr.va_mode & S_ISUID) {
619
euip = uifind(attr.va_uid);
620
change_euid(imgp->newcred, euip);
621
}
622
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
623
if (attr.va_mode & S_ISGID)
624
change_egid(imgp->newcred, attr.va_gid);
625
/*
626
* Implement correct POSIX saved-id behavior.
627
*
628
* XXXMAC: Note that the current logic will save the
629
* uid and gid if a MAC domain transition occurs, even
630
* though maybe it shouldn't.
631
*/
632
change_svuid(imgp->newcred, imgp->newcred->cr_uid);
633
change_svgid(imgp->newcred, imgp->newcred->cr_gid);
634
} else {
635
/*
636
* Implement correct POSIX saved-id behavior.
637
*
638
* XXX: It's not clear that the existing behavior is
639
* POSIX-compliant. A number of sources indicate that the
640
* saved uid/gid should only be updated if the new ruid is
641
* not equal to the old ruid, or the new euid is not equal
642
* to the old euid and the new euid is not equal to the old
643
* ruid. The FreeBSD code always updates the saved uid/gid.
644
* Also, this code uses the new (replaced) euid and egid as
645
* the source, which may or may not be the right ones to use.
646
*/
647
if (oldcred->cr_svuid != oldcred->cr_uid ||
648
oldcred->cr_svgid != oldcred->cr_gid) {
649
VOP_UNLOCK(imgp->vp);
650
imgp->newcred = crdup(oldcred);
651
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
652
change_svuid(imgp->newcred, imgp->newcred->cr_uid);
653
change_svgid(imgp->newcred, imgp->newcred->cr_gid);
654
}
655
}
656
/* The new credentials are installed into the process later. */
657
658
/*
659
* Loop through the list of image activators, calling each one.
660
* An activator returns -1 if there is no match, 0 on success,
661
* and an error otherwise.
662
*/
663
error = -1;
664
for (i = 0; error == -1 && execsw[i]; ++i) {
665
if (execsw[i]->ex_imgact == NULL)
666
continue;
667
error = (*execsw[i]->ex_imgact)(imgp);
668
}
669
670
if (error) {
671
if (error == -1)
672
error = ENOEXEC;
673
goto exec_fail_dealloc;
674
}
675
676
/*
677
* Special interpreter operation, cleanup and loop up to try to
678
* activate the interpreter.
679
*/
680
if (imgp->interpreted) {
681
exec_unmap_first_page(imgp);
682
/*
683
* The text reference needs to be removed for scripts.
684
* There is a short period before we determine that
685
* something is a script where text reference is active.
686
* The vnode lock is held over this entire period
687
* so nothing should illegitimately be blocked.
688
*/
689
MPASS(imgp->textset);
690
VOP_UNSET_TEXT_CHECKED(newtextvp);
691
imgp->textset = false;
692
/* free name buffer and old vnode */
693
#ifdef MAC
694
mac_execve_interpreter_enter(newtextvp, &interpvplabel);
695
#endif
696
if (imgp->opened) {
697
VOP_CLOSE(newtextvp, FREAD, td->td_ucred, td);
698
imgp->opened = false;
699
}
700
vput(newtextvp);
701
imgp->vp = newtextvp = NULL;
702
if (args->fname != NULL) {
703
if (newtextdvp != NULL) {
704
vrele(newtextdvp);
705
newtextdvp = NULL;
706
}
707
NDFREE_PNBUF(&nd);
708
free(newbinname, M_PARGS);
709
newbinname = NULL;
710
}
711
vm_object_deallocate(imgp->object);
712
imgp->object = NULL;
713
execve_nosetid(imgp);
714
imgp->execpath = NULL;
715
free(imgp->freepath, M_TEMP);
716
imgp->freepath = NULL;
717
/* set new name to that of the interpreter */
718
if (imgp->interpreter_vp) {
719
args->fname = NULL;
720
} else {
721
args->fname = imgp->interpreter_name;
722
}
723
goto interpret;
724
}
725
726
/*
727
* NB: We unlock the vnode here because it is believed that none
728
* of the sv_copyout_strings/sv_fixup operations require the vnode.
729
*/
730
VOP_UNLOCK(imgp->vp);
731
732
if (disallow_high_osrel &&
733
P_OSREL_MAJOR(p->p_osrel) > P_OSREL_MAJOR(__FreeBSD_version)) {
734
error = ENOEXEC;
735
uprintf("Osrel %d for image %s too high\n", p->p_osrel,
736
imgp->execpath != NULL ? imgp->execpath : "<unresolved>");
737
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
738
goto exec_fail_dealloc;
739
}
740
741
/*
742
* Copy out strings (args and env) and initialize stack base.
743
*/
744
error = (*p->p_sysent->sv_copyout_strings)(imgp, &stack_base);
745
if (error != 0) {
746
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
747
goto exec_fail_dealloc;
748
}
749
750
/*
751
* Stack setup.
752
*/
753
error = (*p->p_sysent->sv_fixup)(&stack_base, imgp);
754
if (error != 0) {
755
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
756
goto exec_fail_dealloc;
757
}
758
759
/*
760
* For security and other reasons, the file descriptor table cannot be
761
* shared after an exec.
762
*/
763
fdunshare(td);
764
pdunshare(td);
765
/* close files on exec */
766
fdcloseexec(td);
767
768
/*
769
* Malloc things before we need locks.
770
*/
771
i = exec_args_get_begin_envv(imgp->args) - imgp->args->begin_argv;
772
/* Cache arguments if they fit inside our allowance */
773
if (ps_arg_cache_limit >= i + sizeof(struct pargs)) {
774
newargs = pargs_alloc(i);
775
bcopy(imgp->args->begin_argv, newargs->ar_args, i);
776
}
777
778
/*
779
* For security and other reasons, signal handlers cannot
780
* be shared after an exec. The new process gets a copy of the old
781
* handlers. In execsigs(), the new process will have its signals
782
* reset.
783
*/
784
if (sigacts_shared(p->p_sigacts)) {
785
oldsigacts = p->p_sigacts;
786
newsigacts = sigacts_alloc();
787
sigacts_copy(newsigacts, oldsigacts);
788
}
789
790
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
791
792
PROC_LOCK(p);
793
if (oldsigacts)
794
p->p_sigacts = newsigacts;
795
/* Stop profiling */
796
stopprofclock(p);
797
798
/* reset caught signals */
799
execsigs(p);
800
801
/* name this process - nameiexec(p, ndp) */
802
bzero(p->p_comm, sizeof(p->p_comm));
803
if (args->fname)
804
bcopy(nd.ni_cnd.cn_nameptr, p->p_comm,
805
min(nd.ni_cnd.cn_namelen, MAXCOMLEN));
806
else if (vn_commname(newtextvp, p->p_comm, sizeof(p->p_comm)) != 0)
807
bcopy(fexecv_proc_title, p->p_comm, sizeof(fexecv_proc_title));
808
bcopy(p->p_comm, td->td_name, sizeof(td->td_name));
809
#ifdef KTR
810
sched_clear_tdname(td);
811
#endif
812
813
/*
814
* mark as execed, wakeup the process that vforked (if any) and tell
815
* it that it now has its own resources back
816
*/
817
p->p_flag |= P_EXEC;
818
td->td_pflags2 &= ~TDP2_UEXTERR;
819
if ((p->p_flag2 & P2_NOTRACE_EXEC) == 0)
820
p->p_flag2 &= ~P2_NOTRACE;
821
if ((p->p_flag2 & P2_STKGAP_DISABLE_EXEC) == 0)
822
p->p_flag2 &= ~P2_STKGAP_DISABLE;
823
p->p_flag2 &= ~(P2_MEMBAR_PRIVE | P2_MEMBAR_PRIVE_SYNCORE |
824
P2_MEMBAR_GLOBE);
825
if (p->p_flag & P_PPWAIT) {
826
p->p_flag &= ~(P_PPWAIT | P_PPTRACE);
827
cv_broadcast(&p->p_pwait);
828
/* STOPs are no longer ignored, arrange for AST */
829
signotify(td);
830
}
831
832
if ((imgp->sysent->sv_setid_allowed != NULL &&
833
!(*imgp->sysent->sv_setid_allowed)(td, imgp)) ||
834
(p->p_flag2 & P2_NO_NEW_PRIVS) != 0)
835
execve_nosetid(imgp);
836
837
/*
838
* Implement image setuid/setgid installation.
839
*/
840
if (imgp->credential_setid) {
841
/*
842
* Turn off syscall tracing for set-id programs, except for
843
* root. Record any set-id flags first to make sure that
844
* we do not regain any tracing during a possible block.
845
*/
846
setsugid(p);
847
#ifdef KTRACE
848
kiop = ktrprocexec(p);
849
#endif
850
/*
851
* Close any file descriptors 0..2 that reference procfs,
852
* then make sure file descriptors 0..2 are in use.
853
*
854
* Both fdsetugidsafety() and fdcheckstd() may call functions
855
* taking sleepable locks, so temporarily drop our locks.
856
*/
857
PROC_UNLOCK(p);
858
VOP_UNLOCK(imgp->vp);
859
fdsetugidsafety(td);
860
error = fdcheckstd(td);
861
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
862
if (error != 0)
863
goto exec_fail_dealloc;
864
PROC_LOCK(p);
865
#ifdef MAC
866
if (will_transition) {
867
mac_vnode_execve_transition(oldcred, imgp->newcred,
868
imgp->vp, interpvplabel, imgp);
869
}
870
#endif
871
} else {
872
if (oldcred->cr_uid == oldcred->cr_ruid &&
873
oldcred->cr_gid == oldcred->cr_rgid)
874
p->p_flag &= ~P_SUGID;
875
}
876
/*
877
* Set the new credentials.
878
*/
879
if (imgp->newcred != NULL) {
880
proc_set_cred(p, imgp->newcred);
881
crfree(oldcred);
882
oldcred = NULL;
883
}
884
885
/*
886
* Store the vp for use in kern.proc.pathname. This vnode was
887
* referenced by namei() or by fexecve variant of fname handling.
888
*/
889
oldtextvp = p->p_textvp;
890
p->p_textvp = newtextvp;
891
oldtextdvp = p->p_textdvp;
892
p->p_textdvp = newtextdvp;
893
newtextdvp = NULL;
894
oldbinname = p->p_binname;
895
p->p_binname = newbinname;
896
newbinname = NULL;
897
898
#ifdef KDTRACE_HOOKS
899
/*
900
* Tell the DTrace fasttrap provider about the exec if it
901
* has declared an interest.
902
*/
903
if (dtrace_fasttrap_exec)
904
dtrace_fasttrap_exec(p);
905
#endif
906
907
/*
908
* Notify others that we exec'd, and clear the P_INEXEC flag
909
* as we're now a bona fide freshly-execed process.
910
*/
911
KNOTE_LOCKED(p->p_klist, NOTE_EXEC);
912
p->p_flag &= ~P_INEXEC;
913
914
/* clear "fork but no exec" flag, as we _are_ execing */
915
p->p_acflag &= ~AFORK;
916
917
/*
918
* Free any previous argument cache and replace it with
919
* the new argument cache, if any.
920
*/
921
oldargs = p->p_args;
922
p->p_args = newargs;
923
newargs = NULL;
924
925
PROC_UNLOCK(p);
926
927
#ifdef HWPMC_HOOKS
928
/*
929
* Check if system-wide sampling is in effect or if the
930
* current process is using PMCs. If so, do exec() time
931
* processing. This processing needs to happen AFTER the
932
* P_INEXEC flag is cleared.
933
*/
934
if (PMC_SYSTEM_SAMPLING_ACTIVE() || PMC_PROC_IS_USING_PMCS(p)) {
935
VOP_UNLOCK(imgp->vp);
936
pe.pm_credentialschanged = credential_changing;
937
pe.pm_baseaddr = imgp->reloc_base;
938
pe.pm_dynaddr = imgp->et_dyn_addr;
939
940
PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC, (void *) &pe);
941
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
942
}
943
#endif
944
945
#ifdef HWT_HOOKS
946
if ((td->td_proc->p_flag2 & P2_HWT) != 0) {
947
struct hwt_record_entry ent;
948
949
VOP_UNLOCK(imgp->vp);
950
ent.fullpath = imgp->execpath;
951
ent.addr = imgp->et_dyn_addr;
952
ent.baseaddr = imgp->reloc_base;
953
ent.record_type = HWT_RECORD_EXECUTABLE;
954
HWT_CALL_HOOK(td, HWT_EXEC, &ent);
955
vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
956
}
957
#endif
958
959
/* Set values passed into the program in registers. */
960
(*p->p_sysent->sv_setregs)(td, imgp, stack_base);
961
962
VOP_MMAPPED(imgp->vp);
963
964
SDT_PROBE1(proc, , , exec__success, args->fname);
965
966
exec_fail_dealloc:
967
if (error != 0) {
968
p->p_osrel = orig_osrel;
969
p->p_fctl0 = orig_fctl0;
970
p->p_elf_brandinfo = orig_brandinfo;
971
}
972
973
if (imgp->firstpage != NULL)
974
exec_unmap_first_page(imgp);
975
976
if (imgp->vp != NULL) {
977
if (imgp->opened)
978
VOP_CLOSE(imgp->vp, FREAD, td->td_ucred, td);
979
if (imgp->textset)
980
VOP_UNSET_TEXT_CHECKED(imgp->vp);
981
if (error != 0)
982
vput(imgp->vp);
983
else
984
VOP_UNLOCK(imgp->vp);
985
if (args->fname != NULL)
986
NDFREE_PNBUF(&nd);
987
if (newtextdvp != NULL)
988
vrele(newtextdvp);
989
free(newbinname, M_PARGS);
990
}
991
992
if (imgp->object != NULL)
993
vm_object_deallocate(imgp->object);
994
995
free(imgp->freepath, M_TEMP);
996
997
if (error == 0) {
998
if (p->p_ptevents & PTRACE_EXEC) {
999
PROC_LOCK(p);
1000
if (p->p_ptevents & PTRACE_EXEC)
1001
td->td_dbgflags |= TDB_EXEC;
1002
PROC_UNLOCK(p);
1003
}
1004
} else {
1005
exec_fail:
1006
/* we're done here, clear P_INEXEC */
1007
PROC_LOCK(p);
1008
p->p_flag &= ~P_INEXEC;
1009
PROC_UNLOCK(p);
1010
1011
SDT_PROBE1(proc, , , exec__failure, error);
1012
}
1013
1014
if (imgp->newcred != NULL && oldcred != NULL)
1015
crfree(imgp->newcred);
1016
1017
#ifdef MAC
1018
mac_execve_exit(imgp);
1019
mac_execve_interpreter_exit(interpvplabel);
1020
#endif
1021
exec_free_args(args);
1022
1023
/*
1024
* Handle deferred decrement of ref counts.
1025
*/
1026
if (oldtextvp != NULL)
1027
vrele(oldtextvp);
1028
if (oldtextdvp != NULL)
1029
vrele(oldtextdvp);
1030
free(oldbinname, M_PARGS);
1031
#ifdef KTRACE
1032
ktr_io_params_free(kiop);
1033
#endif
1034
pargs_drop(oldargs);
1035
pargs_drop(newargs);
1036
if (oldsigacts != NULL)
1037
sigacts_free(oldsigacts);
1038
if (euip != NULL)
1039
uifree(euip);
1040
1041
if (error && imgp->vmspace_destroyed) {
1042
/* sorry, no more process anymore. exit gracefully */
1043
exec_cleanup(td, oldvmspace);
1044
exit1(td, 0, SIGABRT);
1045
/* NOT REACHED */
1046
}
1047
1048
#ifdef KTRACE
1049
if (error == 0)
1050
ktrprocctor(p);
1051
#endif
1052
1053
/*
1054
* We don't want cpu_set_syscall_retval() to overwrite any of
1055
* the register values put in place by exec_setregs().
1056
* Implementations of cpu_set_syscall_retval() will leave
1057
* registers unmodified when returning EJUSTRETURN.
1058
*/
1059
return (error == 0 ? EJUSTRETURN : error);
1060
}
1061
1062
void
1063
exec_cleanup(struct thread *td, struct vmspace *oldvmspace)
1064
{
1065
if ((td->td_pflags & TDP_EXECVMSPC) != 0) {
1066
KASSERT(td->td_proc->p_vmspace != oldvmspace,
1067
("oldvmspace still used"));
1068
vmspace_free(oldvmspace);
1069
td->td_pflags &= ~TDP_EXECVMSPC;
1070
}
1071
}
1072
1073
int
1074
exec_map_first_page(struct image_params *imgp)
1075
{
1076
vm_object_t object;
1077
vm_page_t m;
1078
int error;
1079
1080
if (imgp->firstpage != NULL)
1081
exec_unmap_first_page(imgp);
1082
1083
object = imgp->vp->v_object;
1084
if (object == NULL)
1085
return (EACCES);
1086
#if VM_NRESERVLEVEL > 0
1087
if ((object->flags & OBJ_COLORED) == 0) {
1088
VM_OBJECT_WLOCK(object);
1089
vm_object_color(object, 0);
1090
VM_OBJECT_WUNLOCK(object);
1091
}
1092
#endif
1093
error = vm_page_grab_valid_unlocked(&m, object, 0,
1094
VM_ALLOC_COUNT(VM_INITIAL_PAGEIN) |
1095
VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
1096
1097
if (error != VM_PAGER_OK)
1098
return (EIO);
1099
imgp->firstpage = sf_buf_alloc(m, 0);
1100
imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);
1101
1102
return (0);
1103
}
1104
1105
void
1106
exec_unmap_first_page(struct image_params *imgp)
1107
{
1108
vm_page_t m;
1109
1110
if (imgp->firstpage != NULL) {
1111
m = sf_buf_page(imgp->firstpage);
1112
sf_buf_free(imgp->firstpage);
1113
imgp->firstpage = NULL;
1114
vm_page_unwire(m, PQ_ACTIVE);
1115
}
1116
}
1117
1118
void
1119
exec_onexec_old(struct thread *td)
1120
{
1121
sigfastblock_clear(td);
1122
umtx_exec(td->td_proc);
1123
}
1124
1125
/*
1126
* This is an optimization which removes the unmanaged shared page
1127
* mapping. In combination with pmap_remove_pages(), which cleans all
1128
* managed mappings in the process' vmspace pmap, no work will be left
1129
* for pmap_remove(min, max).
1130
*/
1131
void
1132
exec_free_abi_mappings(struct proc *p)
1133
{
1134
struct vmspace *vmspace;
1135
1136
vmspace = p->p_vmspace;
1137
if (refcount_load(&vmspace->vm_refcnt) != 1)
1138
return;
1139
1140
if (!PROC_HAS_SHP(p))
1141
return;
1142
1143
pmap_remove(vmspace_pmap(vmspace), vmspace->vm_shp_base,
1144
vmspace->vm_shp_base + p->p_sysent->sv_shared_page_len);
1145
}
1146
1147
/*
1148
* Run down the current address space and install a new one.
1149
*/
1150
int
1151
exec_new_vmspace(struct image_params *imgp, struct sysentvec *sv)
1152
{
1153
int error;
1154
struct proc *p = imgp->proc;
1155
struct vmspace *vmspace = p->p_vmspace;
1156
struct thread *td = curthread;
1157
vm_offset_t sv_minuser;
1158
vm_map_t map;
1159
1160
imgp->vmspace_destroyed = true;
1161
imgp->sysent = sv;
1162
1163
if (p->p_sysent->sv_onexec_old != NULL)
1164
p->p_sysent->sv_onexec_old(td);
1165
itimers_exec(p);
1166
1167
EVENTHANDLER_DIRECT_INVOKE(process_exec, p, imgp);
1168
1169
/*
1170
* Blow away entire process VM, if address space not shared,
1171
* otherwise, create a new VM space so that other threads are
1172
* not disrupted
1173
*/
1174
map = &vmspace->vm_map;
1175
if (map_at_zero)
1176
sv_minuser = sv->sv_minuser;
1177
else
1178
sv_minuser = MAX(sv->sv_minuser, PAGE_SIZE);
1179
if (refcount_load(&vmspace->vm_refcnt) == 1 &&
1180
vm_map_min(map) == sv_minuser &&
1181
vm_map_max(map) == sv->sv_maxuser &&
1182
cpu_exec_vmspace_reuse(p, map)) {
1183
exec_free_abi_mappings(p);
1184
shmexit(vmspace);
1185
pmap_remove_pages(vmspace_pmap(vmspace));
1186
vm_map_remove(map, vm_map_min(map), vm_map_max(map));
1187
/*
1188
* An exec terminates mlockall(MCL_FUTURE).
1189
* ASLR and W^X states must be re-evaluated.
1190
*/
1191
vm_map_lock(map);
1192
vm_map_modflags(map, 0, MAP_WIREFUTURE | MAP_ASLR |
1193
MAP_ASLR_IGNSTART | MAP_ASLR_STACK | MAP_WXORX);
1194
vm_map_unlock(map);
1195
} else {
1196
error = vmspace_exec(p, sv_minuser, sv->sv_maxuser);
1197
if (error)
1198
return (error);
1199
vmspace = p->p_vmspace;
1200
map = &vmspace->vm_map;
1201
}
1202
map->flags |= imgp->map_flags;
1203
1204
return (sv->sv_onexec != NULL ? sv->sv_onexec(p, imgp) : 0);
1205
}
1206
1207
/*
1208
* Compute the stack size limit and map the main process stack.
1209
* Map the shared page.
1210
*/
1211
int
1212
exec_map_stack(struct image_params *imgp)
1213
{
1214
struct rlimit rlim_stack;
1215
struct sysentvec *sv;
1216
struct proc *p;
1217
vm_map_t map;
1218
struct vmspace *vmspace;
1219
vm_offset_t stack_addr, stack_top;
1220
vm_offset_t sharedpage_addr;
1221
u_long ssiz;
1222
int error, find_space, stack_off;
1223
vm_prot_t stack_prot;
1224
vm_object_t obj;
1225
1226
p = imgp->proc;
1227
sv = p->p_sysent;
1228
1229
if (imgp->stack_sz != 0) {
1230
ssiz = trunc_page(imgp->stack_sz);
1231
PROC_LOCK(p);
1232
lim_rlimit_proc(p, RLIMIT_STACK, &rlim_stack);
1233
PROC_UNLOCK(p);
1234
if (ssiz > rlim_stack.rlim_max)
1235
ssiz = rlim_stack.rlim_max;
1236
if (ssiz > rlim_stack.rlim_cur) {
1237
rlim_stack.rlim_cur = ssiz;
1238
kern_setrlimit(curthread, RLIMIT_STACK, &rlim_stack);
1239
}
1240
} else if (sv->sv_maxssiz != NULL) {
1241
ssiz = *sv->sv_maxssiz;
1242
} else {
1243
ssiz = maxssiz;
1244
}
1245
1246
vmspace = p->p_vmspace;
1247
map = &vmspace->vm_map;
1248
1249
stack_prot = sv->sv_shared_page_obj != NULL && imgp->stack_prot != 0 ?
1250
imgp->stack_prot : sv->sv_stackprot;
1251
if ((map->flags & MAP_ASLR_STACK) != 0) {
1252
stack_addr = round_page((vm_offset_t)p->p_vmspace->vm_daddr +
1253
lim_max(curthread, RLIMIT_DATA));
1254
find_space = VMFS_ANY_SPACE;
1255
} else {
1256
stack_addr = sv->sv_usrstack - ssiz;
1257
find_space = VMFS_NO_SPACE;
1258
}
1259
error = vm_map_find(map, NULL, 0, &stack_addr, (vm_size_t)ssiz,
1260
sv->sv_usrstack, find_space, stack_prot, VM_PROT_ALL,
1261
MAP_STACK_AREA);
1262
if (error != KERN_SUCCESS) {
1263
uprintf("exec_new_vmspace: mapping stack size %#jx prot %#x "
1264
"failed, mach error %d errno %d\n", (uintmax_t)ssiz,
1265
stack_prot, error, vm_mmap_to_errno(error));
1266
return (vm_mmap_to_errno(error));
1267
}
1268
1269
stack_top = stack_addr + ssiz;
1270
if ((map->flags & MAP_ASLR_STACK) != 0) {
1271
/* Randomize within the first page of the stack. */
1272
arc4rand(&stack_off, sizeof(stack_off), 0);
1273
stack_top -= rounddown2(stack_off & PAGE_MASK, sizeof(void *));
1274
}
1275
1276
/* Map a shared page */
1277
obj = sv->sv_shared_page_obj;
1278
if (obj == NULL) {
1279
sharedpage_addr = 0;
1280
goto out;
1281
}
1282
1283
/*
1284
* If randomization is disabled then the shared page will
1285
* be mapped at address specified in sysentvec.
1286
* Otherwise any address above .data section can be selected.
1287
* Same logic is used for stack address randomization.
1288
* If the address randomization is applied map a guard page
1289
* at the top of UVA.
1290
*/
1291
vm_object_reference(obj);
1292
if ((imgp->imgp_flags & IMGP_ASLR_SHARED_PAGE) != 0) {
1293
sharedpage_addr = round_page((vm_offset_t)p->p_vmspace->vm_daddr +
1294
lim_max(curthread, RLIMIT_DATA));
1295
1296
error = vm_map_fixed(map, NULL, 0,
1297
sv->sv_maxuser - PAGE_SIZE, PAGE_SIZE,
1298
VM_PROT_NONE, VM_PROT_NONE, MAP_CREATE_GUARD);
1299
if (error != KERN_SUCCESS) {
1300
/*
1301
* This is not fatal, so let's just print a warning
1302
* and continue.
1303
*/
1304
uprintf("%s: Mapping guard page at the top of UVA failed"
1305
" mach error %d errno %d",
1306
__func__, error, vm_mmap_to_errno(error));
1307
}
1308
1309
error = vm_map_find(map, obj, 0,
1310
&sharedpage_addr, sv->sv_shared_page_len,
1311
sv->sv_maxuser, VMFS_ANY_SPACE,
1312
VM_PROT_READ | VM_PROT_EXECUTE,
1313
VM_PROT_READ | VM_PROT_EXECUTE,
1314
MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE);
1315
} else {
1316
sharedpage_addr = sv->sv_shared_page_base;
1317
vm_map_fixed(map, obj, 0,
1318
sharedpage_addr, sv->sv_shared_page_len,
1319
VM_PROT_READ | VM_PROT_EXECUTE,
1320
VM_PROT_READ | VM_PROT_EXECUTE,
1321
MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE);
1322
}
1323
if (error != KERN_SUCCESS) {
1324
uprintf("%s: mapping shared page at addr: %p"
1325
"failed, mach error %d errno %d\n", __func__,
1326
(void *)sharedpage_addr, error, vm_mmap_to_errno(error));
1327
vm_object_deallocate(obj);
1328
return (vm_mmap_to_errno(error));
1329
}
1330
out:
1331
/*
1332
* vm_ssize and vm_maxsaddr are somewhat antiquated concepts, but they
1333
* are still used to enforce the stack rlimit on the process stack.
1334
*/
1335
vmspace->vm_maxsaddr = (char *)stack_addr;
1336
vmspace->vm_stacktop = stack_top;
1337
vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT;
1338
vmspace->vm_shp_base = sharedpage_addr;
1339
1340
return (0);
1341
}
1342
1343
/*
1344
* Copy out argument and environment strings from the old process address
1345
* space into the temporary string buffer.
1346
*/
1347
int
1348
exec_copyin_args(struct image_args *args, const char *fname,
1349
char **argv, char **envv)
1350
{
1351
u_long arg, env;
1352
int error;
1353
1354
bzero(args, sizeof(*args));
1355
if (argv == NULL)
1356
return (EFAULT);
1357
1358
/*
1359
* Allocate demand-paged memory for the file name, argument, and
1360
* environment strings.
1361
*/
1362
error = exec_alloc_args(args);
1363
if (error != 0)
1364
return (error);
1365
1366
/*
1367
* Copy the file name.
1368
*/
1369
error = exec_args_add_fname(args, fname, UIO_USERSPACE);
1370
if (error != 0)
1371
goto err_exit;
1372
1373
/*
1374
* extract arguments first
1375
*/
1376
for (;;) {
1377
error = fueword(argv++, &arg);
1378
if (error == -1) {
1379
error = EFAULT;
1380
goto err_exit;
1381
}
1382
if (arg == 0)
1383
break;
1384
error = exec_args_add_arg(args, (char *)(uintptr_t)arg,
1385
UIO_USERSPACE);
1386
if (error != 0)
1387
goto err_exit;
1388
}
1389
1390
/*
1391
* extract environment strings
1392
*/
1393
if (envv) {
1394
for (;;) {
1395
error = fueword(envv++, &env);
1396
if (error == -1) {
1397
error = EFAULT;
1398
goto err_exit;
1399
}
1400
if (env == 0)
1401
break;
1402
error = exec_args_add_env(args,
1403
(char *)(uintptr_t)env, UIO_USERSPACE);
1404
if (error != 0)
1405
goto err_exit;
1406
}
1407
}
1408
1409
return (0);
1410
1411
err_exit:
1412
exec_free_args(args);
1413
return (error);
1414
}
1415
1416
struct exec_args_kva {
1417
vm_offset_t addr;
1418
u_int gen;
1419
SLIST_ENTRY(exec_args_kva) next;
1420
};
1421
1422
DPCPU_DEFINE_STATIC(struct exec_args_kva *, exec_args_kva);
1423
1424
static SLIST_HEAD(, exec_args_kva) exec_args_kva_freelist;
1425
static struct mtx exec_args_kva_mtx;
1426
static u_int exec_args_gen;
1427
1428
static void
1429
exec_prealloc_args_kva(void *arg __unused)
1430
{
1431
struct exec_args_kva *argkva;
1432
u_int i;
1433
1434
SLIST_INIT(&exec_args_kva_freelist);
1435
mtx_init(&exec_args_kva_mtx, "exec args kva", NULL, MTX_DEF);
1436
for (i = 0; i < exec_map_entries; i++) {
1437
argkva = malloc(sizeof(*argkva), M_PARGS, M_WAITOK);
1438
argkva->addr = kmap_alloc_wait(exec_map, exec_map_entry_size);
1439
argkva->gen = exec_args_gen;
1440
SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next);
1441
}
1442
}
1443
SYSINIT(exec_args_kva, SI_SUB_EXEC, SI_ORDER_ANY, exec_prealloc_args_kva, NULL);
1444
1445
static vm_offset_t
1446
exec_alloc_args_kva(void **cookie)
1447
{
1448
struct exec_args_kva *argkva;
1449
1450
argkva = (void *)atomic_readandclear_ptr(
1451
(uintptr_t *)DPCPU_PTR(exec_args_kva));
1452
if (argkva == NULL) {
1453
mtx_lock(&exec_args_kva_mtx);
1454
while ((argkva = SLIST_FIRST(&exec_args_kva_freelist)) == NULL)
1455
(void)mtx_sleep(&exec_args_kva_freelist,
1456
&exec_args_kva_mtx, 0, "execkva", 0);
1457
SLIST_REMOVE_HEAD(&exec_args_kva_freelist, next);
1458
mtx_unlock(&exec_args_kva_mtx);
1459
}
1460
kasan_mark((void *)argkva->addr, exec_map_entry_size,
1461
exec_map_entry_size, 0);
1462
*(struct exec_args_kva **)cookie = argkva;
1463
return (argkva->addr);
1464
}
1465
1466
static void
1467
exec_release_args_kva(struct exec_args_kva *argkva, u_int gen)
1468
{
1469
vm_offset_t base;
1470
1471
base = argkva->addr;
1472
kasan_mark((void *)argkva->addr, 0, exec_map_entry_size,
1473
KASAN_EXEC_ARGS_FREED);
1474
if (argkva->gen != gen) {
1475
(void)vm_map_madvise(exec_map, base, base + exec_map_entry_size,
1476
MADV_FREE);
1477
argkva->gen = gen;
1478
}
1479
if (!atomic_cmpset_ptr((uintptr_t *)DPCPU_PTR(exec_args_kva),
1480
(uintptr_t)NULL, (uintptr_t)argkva)) {
1481
mtx_lock(&exec_args_kva_mtx);
1482
SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next);
1483
wakeup_one(&exec_args_kva_freelist);
1484
mtx_unlock(&exec_args_kva_mtx);
1485
}
1486
}
1487
1488
static void
1489
exec_free_args_kva(void *cookie)
1490
{
1491
1492
exec_release_args_kva(cookie, exec_args_gen);
1493
}
1494
1495
static void
1496
exec_args_kva_lowmem(void *arg __unused, int flags __unused)
1497
{
1498
SLIST_HEAD(, exec_args_kva) head;
1499
struct exec_args_kva *argkva;
1500
u_int gen;
1501
int i;
1502
1503
gen = atomic_fetchadd_int(&exec_args_gen, 1) + 1;
1504
1505
/*
1506
* Force an madvise of each KVA range. Any currently allocated ranges
1507
* will have MADV_FREE applied once they are freed.
1508
*/
1509
SLIST_INIT(&head);
1510
mtx_lock(&exec_args_kva_mtx);
1511
SLIST_SWAP(&head, &exec_args_kva_freelist, exec_args_kva);
1512
mtx_unlock(&exec_args_kva_mtx);
1513
while ((argkva = SLIST_FIRST(&head)) != NULL) {
1514
SLIST_REMOVE_HEAD(&head, next);
1515
exec_release_args_kva(argkva, gen);
1516
}
1517
1518
CPU_FOREACH(i) {
1519
argkva = (void *)atomic_readandclear_ptr(
1520
(uintptr_t *)DPCPU_ID_PTR(i, exec_args_kva));
1521
if (argkva != NULL)
1522
exec_release_args_kva(argkva, gen);
1523
}
1524
}
1525
EVENTHANDLER_DEFINE(vm_lowmem, exec_args_kva_lowmem, NULL,
1526
EVENTHANDLER_PRI_ANY);
1527
1528
/*
1529
* Allocate temporary demand-paged, zero-filled memory for the file name,
1530
* argument, and environment strings.
1531
*/
1532
int
1533
exec_alloc_args(struct image_args *args)
1534
{
1535
1536
args->buf = (char *)exec_alloc_args_kva(&args->bufkva);
1537
return (0);
1538
}
1539
1540
void
1541
exec_free_args(struct image_args *args)
1542
{
1543
1544
if (args->buf != NULL) {
1545
exec_free_args_kva(args->bufkva);
1546
args->buf = NULL;
1547
}
1548
if (args->fname_buf != NULL) {
1549
free(args->fname_buf, M_TEMP);
1550
args->fname_buf = NULL;
1551
}
1552
}
1553
1554
/*
1555
* A set to functions to fill struct image args.
1556
*
1557
* NOTE: exec_args_add_fname() must be called (possibly with a NULL
1558
* fname) before the other functions. All exec_args_add_arg() calls must
1559
* be made before any exec_args_add_env() calls. exec_args_adjust_args()
1560
* may be called any time after exec_args_add_fname().
1561
*
1562
* exec_args_add_fname() - install path to be executed
1563
* exec_args_add_arg() - append an argument string
1564
* exec_args_add_env() - append an env string
1565
* exec_args_adjust_args() - adjust location of the argument list to
1566
* allow new arguments to be prepended
1567
*/
1568
int
1569
exec_args_add_fname(struct image_args *args, const char *fname,
1570
enum uio_seg segflg)
1571
{
1572
int error;
1573
size_t length;
1574
1575
KASSERT(args->fname == NULL, ("fname already appended"));
1576
KASSERT(args->endp == NULL, ("already appending to args"));
1577
1578
if (fname != NULL) {
1579
args->fname = args->buf;
1580
error = segflg == UIO_SYSSPACE ?
1581
copystr(fname, args->fname, PATH_MAX, &length) :
1582
copyinstr(fname, args->fname, PATH_MAX, &length);
1583
if (error != 0)
1584
return (error == ENAMETOOLONG ? E2BIG : error);
1585
} else
1586
length = 0;
1587
1588
/* Set up for _arg_*()/_env_*() */
1589
args->endp = args->buf + length;
1590
/* begin_argv must be set and kept updated */
1591
args->begin_argv = args->endp;
1592
KASSERT(exec_map_entry_size - length >= ARG_MAX,
1593
("too little space remaining for arguments %zu < %zu",
1594
exec_map_entry_size - length, (size_t)ARG_MAX));
1595
args->stringspace = ARG_MAX;
1596
1597
return (0);
1598
}
1599
1600
static int
1601
exec_args_add_str(struct image_args *args, const char *str,
1602
enum uio_seg segflg, int *countp)
1603
{
1604
int error;
1605
size_t length;
1606
1607
KASSERT(args->endp != NULL, ("endp not initialized"));
1608
KASSERT(args->begin_argv != NULL, ("begin_argp not initialized"));
1609
1610
error = (segflg == UIO_SYSSPACE) ?
1611
copystr(str, args->endp, args->stringspace, &length) :
1612
copyinstr(str, args->endp, args->stringspace, &length);
1613
if (error != 0)
1614
return (error == ENAMETOOLONG ? E2BIG : error);
1615
args->stringspace -= length;
1616
args->endp += length;
1617
(*countp)++;
1618
1619
return (0);
1620
}
1621
1622
int
1623
exec_args_add_arg(struct image_args *args, const char *argp,
1624
enum uio_seg segflg)
1625
{
1626
1627
KASSERT(args->envc == 0, ("appending args after env"));
1628
1629
return (exec_args_add_str(args, argp, segflg, &args->argc));
1630
}
1631
1632
int
1633
exec_args_add_env(struct image_args *args, const char *envp,
1634
enum uio_seg segflg)
1635
{
1636
1637
if (args->envc == 0)
1638
args->begin_envv = args->endp;
1639
1640
return (exec_args_add_str(args, envp, segflg, &args->envc));
1641
}
1642
1643
int
1644
exec_args_adjust_args(struct image_args *args, size_t consume, ssize_t extend)
1645
{
1646
ssize_t offset;
1647
1648
KASSERT(args->endp != NULL, ("endp not initialized"));
1649
KASSERT(args->begin_argv != NULL, ("begin_argp not initialized"));
1650
1651
offset = extend - consume;
1652
if (args->stringspace < offset)
1653
return (E2BIG);
1654
memmove(args->begin_argv + extend, args->begin_argv + consume,
1655
args->endp - args->begin_argv + consume);
1656
if (args->envc > 0)
1657
args->begin_envv += offset;
1658
args->endp += offset;
1659
args->stringspace -= offset;
1660
return (0);
1661
}
1662
1663
char *
1664
exec_args_get_begin_envv(struct image_args *args)
1665
{
1666
1667
KASSERT(args->endp != NULL, ("endp not initialized"));
1668
1669
if (args->envc > 0)
1670
return (args->begin_envv);
1671
return (args->endp);
1672
}
1673
1674
/*
1675
* Copy strings out to the new process address space, constructing new arg
1676
* and env vector tables. Return a pointer to the base so that it can be used
1677
* as the initial stack pointer.
1678
*/
1679
int
1680
exec_copyout_strings(struct image_params *imgp, uintptr_t *stack_base)
1681
{
1682
int argc, envc;
1683
char **vectp;
1684
char *stringp;
1685
uintptr_t destp, ustringp;
1686
struct ps_strings *arginfo;
1687
struct proc *p;
1688
struct sysentvec *sysent;
1689
size_t execpath_len;
1690
int error, szsigcode;
1691
char canary[sizeof(long) * 8];
1692
1693
p = imgp->proc;
1694
sysent = p->p_sysent;
1695
1696
destp = PROC_PS_STRINGS(p);
1697
arginfo = imgp->ps_strings = (void *)destp;
1698
1699
/*
1700
* Install sigcode.
1701
*/
1702
if (sysent->sv_shared_page_base == 0 && sysent->sv_szsigcode != NULL) {
1703
szsigcode = *(sysent->sv_szsigcode);
1704
destp -= szsigcode;
1705
destp = rounddown2(destp, sizeof(void *));
1706
error = copyout(sysent->sv_sigcode, (void *)destp, szsigcode);
1707
if (error != 0)
1708
return (error);
1709
}
1710
1711
/*
1712
* Copy the image path for the rtld.
1713
*/
1714
if (imgp->execpath != NULL && imgp->auxargs != NULL) {
1715
execpath_len = strlen(imgp->execpath) + 1;
1716
destp -= execpath_len;
1717
destp = rounddown2(destp, sizeof(void *));
1718
imgp->execpathp = (void *)destp;
1719
error = copyout(imgp->execpath, imgp->execpathp, execpath_len);
1720
if (error != 0)
1721
return (error);
1722
}
1723
1724
/*
1725
* Prepare the canary for SSP.
1726
*/
1727
arc4rand(canary, sizeof(canary), 0);
1728
destp -= sizeof(canary);
1729
imgp->canary = (void *)destp;
1730
error = copyout(canary, imgp->canary, sizeof(canary));
1731
if (error != 0)
1732
return (error);
1733
imgp->canarylen = sizeof(canary);
1734
1735
/*
1736
* Prepare the pagesizes array.
1737
*/
1738
imgp->pagesizeslen = sizeof(pagesizes[0]) * MAXPAGESIZES;
1739
destp -= imgp->pagesizeslen;
1740
destp = rounddown2(destp, sizeof(void *));
1741
imgp->pagesizes = (void *)destp;
1742
error = copyout(pagesizes, imgp->pagesizes, imgp->pagesizeslen);
1743
if (error != 0)
1744
return (error);
1745
1746
/*
1747
* Allocate room for the argument and environment strings.
1748
*/
1749
destp -= ARG_MAX - imgp->args->stringspace;
1750
destp = rounddown2(destp, sizeof(void *));
1751
ustringp = destp;
1752
1753
if (imgp->auxargs) {
1754
/*
1755
* Allocate room on the stack for the ELF auxargs
1756
* array. It has up to AT_COUNT entries.
1757
*/
1758
destp -= AT_COUNT * sizeof(Elf_Auxinfo);
1759
destp = rounddown2(destp, sizeof(void *));
1760
}
1761
1762
vectp = (char **)destp;
1763
1764
/*
1765
* Allocate room for the argv[] and env vectors including the
1766
* terminating NULL pointers.
1767
*/
1768
vectp -= imgp->args->argc + 1 + imgp->args->envc + 1;
1769
1770
/*
1771
* vectp also becomes our initial stack base
1772
*/
1773
*stack_base = (uintptr_t)vectp;
1774
1775
stringp = imgp->args->begin_argv;
1776
argc = imgp->args->argc;
1777
envc = imgp->args->envc;
1778
1779
/*
1780
* Copy out strings - arguments and environment.
1781
*/
1782
error = copyout(stringp, (void *)ustringp,
1783
ARG_MAX - imgp->args->stringspace);
1784
if (error != 0)
1785
return (error);
1786
1787
/*
1788
* Fill in "ps_strings" struct for ps, w, etc.
1789
*/
1790
imgp->argv = vectp;
1791
if (suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp) != 0 ||
1792
suword32(&arginfo->ps_nargvstr, argc) != 0)
1793
return (EFAULT);
1794
1795
/*
1796
* Fill in argument portion of vector table.
1797
*/
1798
for (; argc > 0; --argc) {
1799
if (suword(vectp++, ustringp) != 0)
1800
return (EFAULT);
1801
while (*stringp++ != 0)
1802
ustringp++;
1803
ustringp++;
1804
}
1805
1806
/* a null vector table pointer separates the argp's from the envp's */
1807
if (suword(vectp++, 0) != 0)
1808
return (EFAULT);
1809
1810
imgp->envv = vectp;
1811
if (suword(&arginfo->ps_envstr, (long)(intptr_t)vectp) != 0 ||
1812
suword32(&arginfo->ps_nenvstr, envc) != 0)
1813
return (EFAULT);
1814
1815
/*
1816
* Fill in environment portion of vector table.
1817
*/
1818
for (; envc > 0; --envc) {
1819
if (suword(vectp++, ustringp) != 0)
1820
return (EFAULT);
1821
while (*stringp++ != 0)
1822
ustringp++;
1823
ustringp++;
1824
}
1825
1826
/* end of vector table is a null pointer */
1827
if (suword(vectp, 0) != 0)
1828
return (EFAULT);
1829
1830
if (imgp->auxargs) {
1831
vectp++;
1832
error = imgp->sysent->sv_copyout_auxargs(imgp,
1833
(uintptr_t)vectp);
1834
if (error != 0)
1835
return (error);
1836
}
1837
1838
return (0);
1839
}
1840
1841
/*
1842
* Check permissions of file to execute.
1843
* Called with imgp->vp locked.
1844
* Return 0 for success or error code on failure.
1845
*/
1846
int
1847
exec_check_permissions(struct image_params *imgp)
1848
{
1849
struct vnode *vp = imgp->vp;
1850
struct vattr *attr = imgp->attr;
1851
struct thread *td;
1852
int error;
1853
1854
td = curthread;
1855
1856
/* Get file attributes */
1857
error = VOP_GETATTR(vp, attr, td->td_ucred);
1858
if (error)
1859
return (error);
1860
1861
#ifdef MAC
1862
error = mac_vnode_check_exec(td->td_ucred, imgp->vp, imgp);
1863
if (error)
1864
return (error);
1865
#endif
1866
1867
/*
1868
* 1) Check if file execution is disabled for the filesystem that
1869
* this file resides on.
1870
* 2) Ensure that at least one execute bit is on. Otherwise, a
1871
* privileged user will always succeed, and we don't want this
1872
* to happen unless the file really is executable.
1873
* 3) Ensure that the file is a regular file.
1874
*/
1875
if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
1876
(attr->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0 ||
1877
(attr->va_type != VREG))
1878
return (EACCES);
1879
1880
/*
1881
* Zero length files can't be exec'd
1882
*/
1883
if (attr->va_size == 0)
1884
return (ENOEXEC);
1885
1886
/*
1887
* Check for execute permission to file based on current credentials.
1888
*/
1889
error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
1890
if (error)
1891
return (error);
1892
1893
/*
1894
* Check number of open-for-writes on the file and deny execution
1895
* if there are any.
1896
*
1897
* Add a text reference now so no one can write to the
1898
* executable while we're activating it.
1899
*
1900
* Remember if this was set before and unset it in case this is not
1901
* actually an executable image.
1902
*/
1903
error = VOP_SET_TEXT(vp);
1904
if (error != 0)
1905
return (error);
1906
imgp->textset = true;
1907
1908
/*
1909
* Call filesystem specific open routine (which does nothing in the
1910
* general case).
1911
*/
1912
error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL);
1913
if (error == 0)
1914
imgp->opened = true;
1915
return (error);
1916
}
1917
1918
/*
1919
* Exec handler registration
1920
*/
1921
int
1922
exec_register(const struct execsw *execsw_arg)
1923
{
1924
const struct execsw **es, **xs, **newexecsw;
1925
u_int count = 2; /* New slot and trailing NULL */
1926
1927
if (execsw)
1928
for (es = execsw; *es; es++)
1929
count++;
1930
newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1931
xs = newexecsw;
1932
if (execsw)
1933
for (es = execsw; *es; es++)
1934
*xs++ = *es;
1935
*xs++ = execsw_arg;
1936
*xs = NULL;
1937
if (execsw)
1938
free(execsw, M_TEMP);
1939
execsw = newexecsw;
1940
return (0);
1941
}
1942
1943
int
1944
exec_unregister(const struct execsw *execsw_arg)
1945
{
1946
const struct execsw **es, **xs, **newexecsw;
1947
int count = 1;
1948
1949
if (execsw == NULL)
1950
panic("unregister with no handlers left?\n");
1951
1952
for (es = execsw; *es; es++) {
1953
if (*es == execsw_arg)
1954
break;
1955
}
1956
if (*es == NULL)
1957
return (ENOENT);
1958
for (es = execsw; *es; es++)
1959
if (*es != execsw_arg)
1960
count++;
1961
newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1962
xs = newexecsw;
1963
for (es = execsw; *es; es++)
1964
if (*es != execsw_arg)
1965
*xs++ = *es;
1966
*xs = NULL;
1967
if (execsw)
1968
free(execsw, M_TEMP);
1969
execsw = newexecsw;
1970
return (0);
1971
}
1972
1973
/*
1974
* Write out a core segment to the compression stream.
1975
*/
1976
static int
1977
compress_chunk(struct coredump_params *cp, char *base, char *buf, size_t len)
1978
{
1979
size_t chunk_len;
1980
int error;
1981
1982
error = 0;
1983
while (len > 0) {
1984
chunk_len = MIN(len, CORE_BUF_SIZE);
1985
1986
/*
1987
* We can get EFAULT error here.
1988
* In that case zero out the current chunk of the segment.
1989
*/
1990
error = copyin(base, buf, chunk_len);
1991
if (error != 0)
1992
bzero(buf, chunk_len);
1993
error = compressor_write(cp->comp, buf, chunk_len);
1994
if (error != 0)
1995
break;
1996
base += chunk_len;
1997
len -= chunk_len;
1998
}
1999
return (error);
2000
}
2001
2002
int
2003
core_write(struct coredump_params *cp, const void *base, size_t len,
2004
off_t offset, enum uio_seg seg, size_t *resid)
2005
{
2006
return ((*cp->cdw->write_fn)(cp->cdw, base, len, offset, seg,
2007
cp->active_cred, resid, cp->td));
2008
}
2009
2010
static int
2011
core_extend(struct coredump_params *cp, off_t newsz)
2012
{
2013
return ((*cp->cdw->extend_fn)(cp->cdw, newsz, cp->active_cred));
2014
}
2015
2016
int
2017
core_output(char *base, size_t len, off_t offset, struct coredump_params *cp,
2018
void *tmpbuf)
2019
{
2020
vm_map_t map;
2021
size_t resid, runlen;
2022
int error;
2023
bool success;
2024
2025
KASSERT((uintptr_t)base % PAGE_SIZE == 0,
2026
("%s: user address %p is not page-aligned", __func__, base));
2027
2028
if (cp->comp != NULL)
2029
return (compress_chunk(cp, base, tmpbuf, len));
2030
2031
error = 0;
2032
map = &cp->td->td_proc->p_vmspace->vm_map;
2033
for (; len > 0; base += runlen, offset += runlen, len -= runlen) {
2034
/*
2035
* Attempt to page in all virtual pages in the range. If a
2036
* virtual page is not backed by the pager, it is represented as
2037
* a hole in the file. This can occur with zero-filled
2038
* anonymous memory or truncated files, for example.
2039
*/
2040
for (runlen = 0; runlen < len; runlen += PAGE_SIZE) {
2041
if (core_dump_can_intr && curproc_sigkilled())
2042
return (EINTR);
2043
error = vm_fault(map, (uintptr_t)base + runlen,
2044
VM_PROT_READ, VM_FAULT_NOFILL, NULL);
2045
if (runlen == 0)
2046
success = error == KERN_SUCCESS;
2047
else if ((error == KERN_SUCCESS) != success)
2048
break;
2049
}
2050
2051
if (success) {
2052
error = core_write(cp, base, runlen, offset,
2053
UIO_USERSPACE, &resid);
2054
if (error != 0) {
2055
if (error != EFAULT)
2056
break;
2057
2058
/*
2059
* EFAULT may be returned if the user mapping
2060
* could not be accessed, e.g., because a mapped
2061
* file has been truncated. Skip the page if no
2062
* progress was made, to protect against a
2063
* hypothetical scenario where vm_fault() was
2064
* successful but core_write() returns EFAULT
2065
* anyway.
2066
*/
2067
runlen -= resid;
2068
if (runlen == 0) {
2069
success = false;
2070
runlen = PAGE_SIZE;
2071
}
2072
}
2073
}
2074
if (!success) {
2075
error = core_extend(cp, offset + runlen);
2076
if (error != 0)
2077
break;
2078
}
2079
}
2080
return (error);
2081
}
2082
2083
/*
2084
* Drain into a core file.
2085
*/
2086
int
2087
sbuf_drain_core_output(void *arg, const char *data, int len)
2088
{
2089
struct coredump_params *cp;
2090
struct proc *p;
2091
int error, locked;
2092
2093
cp = arg;
2094
p = cp->td->td_proc;
2095
2096
/*
2097
* Some kern_proc out routines that print to this sbuf may
2098
* call us with the process lock held. Draining with the
2099
* non-sleepable lock held is unsafe. The lock is needed for
2100
* those routines when dumping a live process. In our case we
2101
* can safely release the lock before draining and acquire
2102
* again after.
2103
*/
2104
locked = PROC_LOCKED(p);
2105
if (locked)
2106
PROC_UNLOCK(p);
2107
if (cp->comp != NULL)
2108
error = compressor_write(cp->comp, __DECONST(char *, data),
2109
len);
2110
else
2111
error = core_write(cp, __DECONST(void *, data), len, cp->offset,
2112
UIO_SYSSPACE, NULL);
2113
if (locked)
2114
PROC_LOCK(p);
2115
if (error != 0)
2116
return (-error);
2117
cp->offset += len;
2118
return (len);
2119
}
2120
2121