Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/kern/kern_exit.c
39475 views
1
/*-
2
* SPDX-License-Identifier: BSD-3-Clause
3
*
4
* Copyright (c) 1982, 1986, 1989, 1991, 1993
5
* The Regents of the University of California. All rights reserved.
6
* (c) UNIX System Laboratories, Inc.
7
* All or some portions of this file are derived from material licensed
8
* to the University of California by American Telephone and Telegraph
9
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
10
* the permission of UNIX System Laboratories, Inc.
11
*
12
* Redistribution and use in source and binary forms, with or without
13
* modification, are permitted provided that the following conditions
14
* are met:
15
* 1. Redistributions of source code must retain the above copyright
16
* notice, this list of conditions and the following disclaimer.
17
* 2. Redistributions in binary form must reproduce the above copyright
18
* notice, this list of conditions and the following disclaimer in the
19
* documentation and/or other materials provided with the distribution.
20
* 3. Neither the name of the University nor the names of its contributors
21
* may be used to endorse or promote products derived from this software
22
* without specific prior written permission.
23
*
24
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34
* SUCH DAMAGE.
35
*/
36
37
#include <sys/cdefs.h>
38
#include "opt_ddb.h"
39
#include "opt_ktrace.h"
40
41
#include <sys/param.h>
42
#include <sys/systm.h>
43
#include <sys/sysproto.h>
44
#include <sys/capsicum.h>
45
#include <sys/eventhandler.h>
46
#include <sys/kernel.h>
47
#include <sys/ktr.h>
48
#include <sys/malloc.h>
49
#include <sys/lock.h>
50
#include <sys/mutex.h>
51
#include <sys/proc.h>
52
#include <sys/procdesc.h>
53
#include <sys/jail.h>
54
#include <sys/tty.h>
55
#include <sys/wait.h>
56
#include <sys/vmmeter.h>
57
#include <sys/vnode.h>
58
#include <sys/racct.h>
59
#include <sys/resourcevar.h>
60
#include <sys/sbuf.h>
61
#include <sys/signalvar.h>
62
#include <sys/sched.h>
63
#include <sys/sx.h>
64
#include <sys/syscallsubr.h>
65
#include <sys/sysctl.h>
66
#include <sys/syslog.h>
67
#include <sys/ptrace.h>
68
#include <sys/acct.h> /* for acct_process() function prototype */
69
#include <sys/filedesc.h>
70
#include <sys/sdt.h>
71
#include <sys/shm.h>
72
#include <sys/sem.h>
73
#include <sys/sysent.h>
74
#include <sys/timers.h>
75
#include <sys/umtxvar.h>
76
#ifdef KTRACE
77
#include <sys/ktrace.h>
78
#endif
79
80
#include <security/audit/audit.h>
81
#include <security/mac/mac_framework.h>
82
83
#include <vm/vm.h>
84
#include <vm/vm_extern.h>
85
#include <vm/vm_param.h>
86
#include <vm/pmap.h>
87
#include <vm/vm_map.h>
88
#include <vm/vm_page.h>
89
#include <vm/uma.h>
90
91
#ifdef KDTRACE_HOOKS
92
#include <sys/dtrace_bsd.h>
93
dtrace_execexit_func_t dtrace_fasttrap_exit;
94
#endif
95
96
SDT_PROVIDER_DECLARE(proc);
97
SDT_PROBE_DEFINE1(proc, , , exit, "int");
98
99
static int kern_kill_on_dbg_exit = 1;
100
SYSCTL_INT(_kern, OID_AUTO, kill_on_debugger_exit, CTLFLAG_RWTUN,
101
&kern_kill_on_dbg_exit, 0,
102
"Kill ptraced processes when debugger exits");
103
104
static bool kern_wait_dequeue_sigchld = 1;
105
SYSCTL_BOOL(_kern, OID_AUTO, wait_dequeue_sigchld, CTLFLAG_RWTUN,
106
&kern_wait_dequeue_sigchld, 0,
107
"Dequeue SIGCHLD on wait(2) for live process");
108
109
struct proc *
110
proc_realparent(struct proc *child)
111
{
112
struct proc *p, *parent;
113
114
sx_assert(&proctree_lock, SX_LOCKED);
115
if ((child->p_treeflag & P_TREE_ORPHANED) == 0)
116
return (child->p_pptr->p_pid == child->p_oppid ?
117
child->p_pptr : child->p_reaper);
118
for (p = child; (p->p_treeflag & P_TREE_FIRST_ORPHAN) == 0;) {
119
/* Cannot use LIST_PREV(), since the list head is not known. */
120
p = __containerof(p->p_orphan.le_prev, struct proc,
121
p_orphan.le_next);
122
KASSERT((p->p_treeflag & P_TREE_ORPHANED) != 0,
123
("missing P_ORPHAN %p", p));
124
}
125
parent = __containerof(p->p_orphan.le_prev, struct proc,
126
p_orphans.lh_first);
127
return (parent);
128
}
129
130
void
131
reaper_abandon_children(struct proc *p, bool exiting)
132
{
133
struct proc *p1, *p2, *ptmp;
134
135
sx_assert(&proctree_lock, SX_XLOCKED);
136
KASSERT(p != initproc, ("reaper_abandon_children for initproc"));
137
if ((p->p_treeflag & P_TREE_REAPER) == 0)
138
return;
139
p1 = p->p_reaper;
140
LIST_FOREACH_SAFE(p2, &p->p_reaplist, p_reapsibling, ptmp) {
141
LIST_REMOVE(p2, p_reapsibling);
142
p2->p_reaper = p1;
143
p2->p_reapsubtree = p->p_reapsubtree;
144
LIST_INSERT_HEAD(&p1->p_reaplist, p2, p_reapsibling);
145
if (exiting && p2->p_pptr == p) {
146
PROC_LOCK(p2);
147
proc_reparent(p2, p1, true);
148
PROC_UNLOCK(p2);
149
}
150
}
151
KASSERT(LIST_EMPTY(&p->p_reaplist), ("p_reaplist not empty"));
152
p->p_treeflag &= ~P_TREE_REAPER;
153
}
154
155
static void
156
reaper_clear(struct proc *p)
157
{
158
struct proc *p1;
159
bool clear;
160
161
sx_assert(&proctree_lock, SX_LOCKED);
162
LIST_REMOVE(p, p_reapsibling);
163
if (p->p_reapsubtree == 1)
164
return;
165
clear = true;
166
LIST_FOREACH(p1, &p->p_reaper->p_reaplist, p_reapsibling) {
167
if (p1->p_reapsubtree == p->p_reapsubtree) {
168
clear = false;
169
break;
170
}
171
}
172
if (clear)
173
proc_id_clear(PROC_ID_REAP, p->p_reapsubtree);
174
}
175
176
void
177
proc_clear_orphan(struct proc *p)
178
{
179
struct proc *p1;
180
181
sx_assert(&proctree_lock, SA_XLOCKED);
182
if ((p->p_treeflag & P_TREE_ORPHANED) == 0)
183
return;
184
if ((p->p_treeflag & P_TREE_FIRST_ORPHAN) != 0) {
185
p1 = LIST_NEXT(p, p_orphan);
186
if (p1 != NULL)
187
p1->p_treeflag |= P_TREE_FIRST_ORPHAN;
188
p->p_treeflag &= ~P_TREE_FIRST_ORPHAN;
189
}
190
LIST_REMOVE(p, p_orphan);
191
p->p_treeflag &= ~P_TREE_ORPHANED;
192
}
193
194
void
195
exit_onexit(struct proc *p)
196
{
197
MPASS(p->p_numthreads == 1);
198
umtx_thread_exit(FIRST_THREAD_IN_PROC(p));
199
}
200
201
/*
202
* exit -- death of process.
203
*/
204
int
205
sys__exit(struct thread *td, struct _exit_args *uap)
206
{
207
208
exit1(td, uap->rval, 0);
209
__unreachable();
210
}
211
212
void
213
proc_set_p2_wexit(struct proc *p)
214
{
215
PROC_LOCK_ASSERT(p, MA_OWNED);
216
p->p_flag2 |= P2_WEXIT;
217
}
218
219
/*
220
* Exit: deallocate address space and other resources, change proc state to
221
* zombie, and unlink proc from allproc and parent's lists. Save exit status
222
* and rusage for wait(). Check for child processes and orphan them.
223
*/
224
void
225
exit1(struct thread *td, int rval, int signo)
226
{
227
struct proc *p, *nq, *q, *t;
228
struct thread *tdt;
229
ksiginfo_t *ksi, *ksi1;
230
int signal_parent;
231
232
mtx_assert(&Giant, MA_NOTOWNED);
233
KASSERT(rval == 0 || signo == 0, ("exit1 rv %d sig %d", rval, signo));
234
TSPROCEXIT(td->td_proc->p_pid);
235
236
p = td->td_proc;
237
/*
238
* In case we're rebooting we just let init die in order to
239
* work around an issues where pid 1 might get a fatal signal.
240
* For instance, if network interface serving NFS root is
241
* going down due to reboot, page-in requests for text are
242
* failing.
243
*/
244
if (p == initproc && rebooting == 0) {
245
printf("init died (signal %d, exit %d)\n", signo, rval);
246
panic("Going nowhere without my init!");
247
}
248
249
/*
250
* Process deferred operations, designated with ASTF_KCLEAR.
251
* For instance, we need to deref SU mp, since the thread does
252
* not return to userspace, and wait for geom to stabilize.
253
*/
254
ast_kclear(td);
255
256
/*
257
* MUST abort all other threads before proceeding past here.
258
*/
259
PROC_LOCK(p);
260
proc_set_p2_wexit(p);
261
262
/*
263
* First check if some other thread or external request got
264
* here before us. If so, act appropriately: exit or suspend.
265
* We must ensure that stop requests are handled before we set
266
* P_WEXIT.
267
*/
268
thread_suspend_check(0);
269
while (p->p_flag & P_HADTHREADS) {
270
/*
271
* Kill off the other threads. This requires
272
* some co-operation from other parts of the kernel
273
* so it may not be instantaneous. With this state set
274
* any thread attempting to interruptibly
275
* sleep will return immediately with EINTR or EWOULDBLOCK
276
* which will hopefully force them to back out to userland
277
* freeing resources as they go. Any thread attempting
278
* to return to userland will thread_exit() from ast().
279
* thread_exit() will unsuspend us when the last of the
280
* other threads exits.
281
* If there is already a thread singler after resumption,
282
* calling thread_single() will fail; in that case, we just
283
* re-check all suspension request, the thread should
284
* either be suspended there or exit.
285
*/
286
if (!thread_single(p, SINGLE_EXIT))
287
/*
288
* All other activity in this process is now
289
* stopped. Threading support has been turned
290
* off.
291
*/
292
break;
293
/*
294
* Recheck for new stop or suspend requests which
295
* might appear while process lock was dropped in
296
* thread_single().
297
*/
298
thread_suspend_check(0);
299
}
300
KASSERT(p->p_numthreads == 1,
301
("exit1: proc %p exiting with %d threads", p, p->p_numthreads));
302
racct_sub(p, RACCT_NTHR, 1);
303
304
/* Let event handler change exit status */
305
p->p_xexit = rval;
306
p->p_xsig = signo;
307
308
/*
309
* Ignore any pending request to stop due to a stop signal.
310
* Once P_WEXIT is set, future requests will be ignored as
311
* well.
312
*/
313
p->p_flag &= ~P_STOPPED_SIG;
314
KASSERT(!P_SHOULDSTOP(p), ("exiting process is stopped"));
315
316
/* Note that we are exiting. */
317
p->p_flag |= P_WEXIT;
318
319
/*
320
* Wait for any processes that have a hold on our vmspace to
321
* release their reference.
322
*/
323
while (p->p_lock > 0)
324
msleep(&p->p_lock, &p->p_mtx, PWAIT, "exithold", 0);
325
326
PROC_UNLOCK(p);
327
/* Drain the limit callout while we don't have the proc locked */
328
callout_drain(&p->p_limco);
329
330
#ifdef AUDIT
331
/*
332
* The Sun BSM exit token contains two components: an exit status as
333
* passed to exit(), and a return value to indicate what sort of exit
334
* it was. The exit status is WEXITSTATUS(rv), but it's not clear
335
* what the return value is.
336
*/
337
AUDIT_ARG_EXIT(rval, 0);
338
AUDIT_SYSCALL_EXIT(0, td);
339
#endif
340
341
/* Are we a task leader with peers? */
342
if (p->p_peers != NULL && p == p->p_leader) {
343
mtx_lock(&ppeers_lock);
344
q = p->p_peers;
345
while (q != NULL) {
346
PROC_LOCK(q);
347
kern_psignal(q, SIGKILL);
348
PROC_UNLOCK(q);
349
q = q->p_peers;
350
}
351
while (p->p_peers != NULL)
352
msleep(p, &ppeers_lock, PWAIT, "exit1", 0);
353
mtx_unlock(&ppeers_lock);
354
}
355
356
itimers_exit(p);
357
358
/*
359
* Check if any loadable modules need anything done at process exit.
360
* E.g. SYSV IPC stuff.
361
* Event handler could change exit status.
362
* XXX what if one of these generates an error?
363
*/
364
EVENTHANDLER_DIRECT_INVOKE(process_exit, p);
365
366
/*
367
* If parent is waiting for us to exit or exec,
368
* P_PPWAIT is set; we will wakeup the parent below.
369
*/
370
PROC_LOCK(p);
371
stopprofclock(p);
372
p->p_ptevents = 0;
373
374
/*
375
* Stop the real interval timer. If the handler is currently
376
* executing, prevent it from rearming itself and let it finish.
377
*/
378
p->p_flag2 &= ~P2_ITSTOPPED;
379
if (timevalisset(&p->p_realtimer.it_value) &&
380
callout_stop(&p->p_itcallout) == 0) {
381
timevalclear(&p->p_realtimer.it_interval);
382
PROC_UNLOCK(p);
383
callout_drain(&p->p_itcallout);
384
} else {
385
PROC_UNLOCK(p);
386
}
387
388
if (p->p_sysent->sv_onexit != NULL)
389
p->p_sysent->sv_onexit(p);
390
seltdfini(td);
391
392
/*
393
* Reset any sigio structures pointing to us as a result of
394
* F_SETOWN with our pid. The P_WEXIT flag interlocks with fsetown().
395
*/
396
funsetownlst(&p->p_sigiolst);
397
398
/*
399
* Close open files and release open-file table.
400
* This may block!
401
*/
402
pdescfree(td);
403
fdescfree(td);
404
405
/*
406
* Remove ourself from our leader's peer list and wake our leader.
407
*/
408
if (p->p_leader->p_peers != NULL) {
409
mtx_lock(&ppeers_lock);
410
if (p->p_leader->p_peers != NULL) {
411
q = p->p_leader;
412
while (q->p_peers != p)
413
q = q->p_peers;
414
q->p_peers = p->p_peers;
415
wakeup(p->p_leader);
416
}
417
mtx_unlock(&ppeers_lock);
418
}
419
420
exec_free_abi_mappings(p);
421
vmspace_exit(td);
422
(void)acct_process(td);
423
424
#ifdef KTRACE
425
ktrprocexit(td);
426
#endif
427
/*
428
* Release reference to text vnode etc
429
*/
430
if (p->p_textvp != NULL) {
431
vrele(p->p_textvp);
432
p->p_textvp = NULL;
433
}
434
if (p->p_textdvp != NULL) {
435
vrele(p->p_textdvp);
436
p->p_textdvp = NULL;
437
}
438
if (p->p_binname != NULL) {
439
free(p->p_binname, M_PARGS);
440
p->p_binname = NULL;
441
}
442
443
/*
444
* Release our limits structure.
445
*/
446
lim_free(p->p_limit);
447
p->p_limit = NULL;
448
449
tidhash_remove(td);
450
451
/*
452
* Call machine-dependent code to release any
453
* machine-dependent resources other than the address space.
454
* The address space is released by "vmspace_exitfree(p)" in
455
* vm_waitproc().
456
*/
457
cpu_exit(td);
458
459
WITNESS_WARN(WARN_PANIC, NULL, "process (pid %d) exiting", p->p_pid);
460
461
/*
462
* Remove from allproc. It still sits in the hash.
463
*/
464
sx_xlock(&allproc_lock);
465
LIST_REMOVE(p, p_list);
466
467
#ifdef DDB
468
/*
469
* Used by ddb's 'ps' command to find this process via the
470
* pidhash.
471
*/
472
p->p_list.le_prev = NULL;
473
#endif
474
prison_proc_unlink(p->p_ucred->cr_prison, p);
475
sx_xunlock(&allproc_lock);
476
477
sx_xlock(&proctree_lock);
478
if ((p->p_flag & (P_TRACED | P_PPWAIT | P_PPTRACE)) != 0) {
479
PROC_LOCK(p);
480
p->p_flag &= ~(P_TRACED | P_PPWAIT | P_PPTRACE);
481
PROC_UNLOCK(p);
482
}
483
484
/*
485
* killjobc() might drop and re-acquire proctree_lock to
486
* revoke control tty if exiting process was a session leader.
487
*/
488
killjobc();
489
490
/*
491
* Reparent all children processes:
492
* - traced ones to the original parent (or init if we are that parent)
493
* - the rest to init
494
*/
495
q = LIST_FIRST(&p->p_children);
496
if (q != NULL) /* only need this if any child is S_ZOMB */
497
wakeup(q->p_reaper);
498
for (; q != NULL; q = nq) {
499
nq = LIST_NEXT(q, p_sibling);
500
ksi = ksiginfo_alloc(M_WAITOK);
501
PROC_LOCK(q);
502
q->p_sigparent = SIGCHLD;
503
504
if ((q->p_flag & P_TRACED) == 0) {
505
proc_reparent(q, q->p_reaper, true);
506
if (q->p_state == PRS_ZOMBIE) {
507
/*
508
* Inform reaper about the reparented
509
* zombie, since wait(2) has something
510
* new to report. Guarantee queueing
511
* of the SIGCHLD signal, similar to
512
* the _exit() behaviour, by providing
513
* our ksiginfo. Ksi is freed by the
514
* signal delivery.
515
*/
516
if (q->p_ksi == NULL) {
517
ksi1 = NULL;
518
} else {
519
ksiginfo_copy(q->p_ksi, ksi);
520
ksi->ksi_flags |= KSI_INS;
521
ksi1 = ksi;
522
ksi = NULL;
523
}
524
PROC_LOCK(q->p_reaper);
525
pksignal(q->p_reaper, SIGCHLD, ksi1);
526
PROC_UNLOCK(q->p_reaper);
527
} else if (q->p_pdeathsig > 0) {
528
/*
529
* The child asked to received a signal
530
* when we exit.
531
*/
532
kern_psignal(q, q->p_pdeathsig);
533
}
534
} else {
535
/*
536
* Traced processes are killed by default
537
* since their existence means someone is
538
* screwing up.
539
*/
540
t = proc_realparent(q);
541
if (t == p) {
542
proc_reparent(q, q->p_reaper, true);
543
} else {
544
PROC_LOCK(t);
545
proc_reparent(q, t, true);
546
PROC_UNLOCK(t);
547
}
548
/*
549
* Since q was found on our children list, the
550
* proc_reparent() call moved q to the orphan
551
* list due to present P_TRACED flag. Clear
552
* orphan link for q now while q is locked.
553
*/
554
proc_clear_orphan(q);
555
q->p_flag &= ~P_TRACED;
556
q->p_flag2 &= ~P2_PTRACE_FSTP;
557
q->p_ptevents = 0;
558
p->p_xthread = NULL;
559
FOREACH_THREAD_IN_PROC(q, tdt) {
560
tdt->td_dbgflags &= ~(TDB_SUSPEND | TDB_XSIG |
561
TDB_FSTP);
562
tdt->td_xsig = 0;
563
}
564
if (kern_kill_on_dbg_exit) {
565
q->p_flag &= ~P_STOPPED_TRACE;
566
kern_psignal(q, SIGKILL);
567
} else if ((q->p_flag & (P_STOPPED_TRACE |
568
P_STOPPED_SIG)) != 0) {
569
sigqueue_delete_proc(q, SIGTRAP);
570
ptrace_unsuspend(q);
571
}
572
}
573
PROC_UNLOCK(q);
574
if (ksi != NULL)
575
ksiginfo_free(ksi);
576
}
577
578
/*
579
* Also get rid of our orphans.
580
*/
581
while ((q = LIST_FIRST(&p->p_orphans)) != NULL) {
582
PROC_LOCK(q);
583
KASSERT(q->p_oppid == p->p_pid,
584
("orphan %p of %p has unexpected oppid %d", q, p,
585
q->p_oppid));
586
q->p_oppid = q->p_reaper->p_pid;
587
588
/*
589
* If we are the real parent of this process
590
* but it has been reparented to a debugger, then
591
* check if it asked for a signal when we exit.
592
*/
593
if (q->p_pdeathsig > 0)
594
kern_psignal(q, q->p_pdeathsig);
595
CTR2(KTR_PTRACE, "exit: pid %d, clearing orphan %d", p->p_pid,
596
q->p_pid);
597
proc_clear_orphan(q);
598
PROC_UNLOCK(q);
599
}
600
601
#ifdef KDTRACE_HOOKS
602
if (SDT_PROBES_ENABLED()) {
603
int reason = CLD_EXITED;
604
if (WCOREDUMP(signo))
605
reason = CLD_DUMPED;
606
else if (WIFSIGNALED(signo))
607
reason = CLD_KILLED;
608
SDT_PROBE1(proc, , , exit, reason);
609
}
610
#endif
611
612
/* Save exit status. */
613
PROC_LOCK(p);
614
p->p_xthread = td;
615
616
if (p->p_sysent->sv_ontdexit != NULL)
617
p->p_sysent->sv_ontdexit(td);
618
619
#ifdef KDTRACE_HOOKS
620
/*
621
* Tell the DTrace fasttrap provider about the exit if it
622
* has declared an interest.
623
*/
624
if (dtrace_fasttrap_exit)
625
dtrace_fasttrap_exit(p);
626
#endif
627
628
/*
629
* Notify interested parties of our demise.
630
*/
631
KNOTE_LOCKED(p->p_klist, NOTE_EXIT);
632
633
/*
634
* If this is a process with a descriptor, we may not need to deliver
635
* a signal to the parent. proctree_lock is held over
636
* procdesc_exit() to serialize concurrent calls to close() and
637
* exit().
638
*/
639
signal_parent = 0;
640
if (p->p_procdesc == NULL || procdesc_exit(p)) {
641
/*
642
* Notify parent that we're gone. If parent has the
643
* PS_NOCLDWAIT flag set, or if the handler is set to SIG_IGN,
644
* notify process 1 instead (and hope it will handle this
645
* situation).
646
*/
647
PROC_LOCK(p->p_pptr);
648
mtx_lock(&p->p_pptr->p_sigacts->ps_mtx);
649
if (p->p_pptr->p_sigacts->ps_flag &
650
(PS_NOCLDWAIT | PS_CLDSIGIGN)) {
651
struct proc *pp;
652
653
mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx);
654
pp = p->p_pptr;
655
PROC_UNLOCK(pp);
656
proc_reparent(p, p->p_reaper, true);
657
p->p_sigparent = SIGCHLD;
658
PROC_LOCK(p->p_pptr);
659
660
/*
661
* Notify parent, so in case he was wait(2)ing or
662
* executing waitpid(2) with our pid, he will
663
* continue.
664
*/
665
wakeup(pp);
666
} else
667
mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx);
668
669
if (p->p_pptr == p->p_reaper || p->p_pptr == initproc) {
670
signal_parent = 1;
671
} else if (p->p_sigparent != 0) {
672
if (p->p_sigparent == SIGCHLD) {
673
signal_parent = 1;
674
} else { /* LINUX thread */
675
signal_parent = 2;
676
}
677
}
678
} else
679
PROC_LOCK(p->p_pptr);
680
sx_xunlock(&proctree_lock);
681
682
if (signal_parent == 1) {
683
childproc_exited(p);
684
} else if (signal_parent == 2) {
685
kern_psignal(p->p_pptr, p->p_sigparent);
686
}
687
688
/* Tell the prison that we are gone. */
689
prison_proc_free(p->p_ucred->cr_prison);
690
691
/*
692
* The state PRS_ZOMBIE prevents other processes from sending
693
* signal to the process, to avoid memory leak, we free memory
694
* for signal queue at the time when the state is set.
695
*/
696
sigqueue_flush(&p->p_sigqueue);
697
sigqueue_flush(&td->td_sigqueue);
698
699
/*
700
* We have to wait until after acquiring all locks before
701
* changing p_state. We need to avoid all possible context
702
* switches (including ones from blocking on a mutex) while
703
* marked as a zombie. We also have to set the zombie state
704
* before we release the parent process' proc lock to avoid
705
* a lost wakeup. So, we first call wakeup, then we grab the
706
* sched lock, update the state, and release the parent process'
707
* proc lock.
708
*/
709
wakeup(p->p_pptr);
710
cv_broadcast(&p->p_pwait);
711
sched_exit(p->p_pptr, td);
712
PROC_SLOCK(p);
713
p->p_state = PRS_ZOMBIE;
714
PROC_UNLOCK(p->p_pptr);
715
716
/*
717
* Save our children's rusage information in our exit rusage.
718
*/
719
PROC_STATLOCK(p);
720
ruadd(&p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux);
721
PROC_STATUNLOCK(p);
722
723
/*
724
* Make sure the scheduler takes this thread out of its tables etc.
725
* This will also release this thread's reference to the ucred.
726
* Other thread parts to release include pcb bits and such.
727
*/
728
thread_exit();
729
}
730
731
#ifndef _SYS_SYSPROTO_H_
732
struct abort2_args {
733
char *why;
734
int nargs;
735
void **args;
736
};
737
#endif
738
739
int
740
sys_abort2(struct thread *td, struct abort2_args *uap)
741
{
742
void *uargs[16];
743
void **uargsp;
744
int error, nargs;
745
746
nargs = uap->nargs;
747
if (nargs < 0 || nargs > nitems(uargs))
748
nargs = -1;
749
uargsp = NULL;
750
if (nargs > 0) {
751
if (uap->args != NULL) {
752
error = copyin(uap->args, uargs,
753
nargs * sizeof(void *));
754
if (error != 0)
755
nargs = -1;
756
else
757
uargsp = uargs;
758
} else
759
nargs = -1;
760
}
761
return (kern_abort2(td, uap->why, nargs, uargsp));
762
}
763
764
/*
765
* kern_abort2()
766
* Arguments:
767
* why - user pointer to why
768
* nargs - number of arguments copied or -1 if an error occurred in copying
769
* args - pointer to an array of pointers in kernel format
770
*/
771
int
772
kern_abort2(struct thread *td, const char *why, int nargs, void **uargs)
773
{
774
struct proc *p = td->td_proc;
775
struct sbuf *sb;
776
int error, i, sig;
777
778
/*
779
* Do it right now so we can log either proper call of abort2(), or
780
* note, that invalid argument was passed. 512 is big enough to
781
* handle 16 arguments' descriptions with additional comments.
782
*/
783
sb = sbuf_new(NULL, NULL, 512, SBUF_FIXEDLEN);
784
sbuf_clear(sb);
785
sbuf_printf(sb, "%s(pid %d uid %d) aborted: ",
786
p->p_comm, p->p_pid, td->td_ucred->cr_uid);
787
/*
788
* Since we can't return from abort2(), send SIGKILL in cases, where
789
* abort2() was called improperly
790
*/
791
sig = SIGKILL;
792
/* Prevent from DoSes from user-space. */
793
if (nargs == -1)
794
goto out;
795
KASSERT(nargs >= 0 && nargs <= 16, ("called with too many args (%d)",
796
nargs));
797
/*
798
* Limit size of 'reason' string to 128. Will fit even when
799
* maximal number of arguments was chosen to be logged.
800
*/
801
if (why != NULL) {
802
error = sbuf_copyin(sb, why, 128);
803
if (error < 0)
804
goto out;
805
} else {
806
sbuf_cat(sb, "(null)");
807
}
808
if (nargs > 0) {
809
sbuf_putc(sb, '(');
810
for (i = 0;i < nargs; i++)
811
sbuf_printf(sb, "%s%p", i == 0 ? "" : ", ", uargs[i]);
812
sbuf_putc(sb, ')');
813
}
814
/*
815
* Final stage: arguments were proper, string has been
816
* successfully copied from userspace, and copying pointers
817
* from user-space succeed.
818
*/
819
sig = SIGABRT;
820
out:
821
if (sig == SIGKILL) {
822
sbuf_trim(sb);
823
sbuf_cat(sb, " (Reason text inaccessible)");
824
}
825
sbuf_cat(sb, "\n");
826
sbuf_finish(sb);
827
log(LOG_INFO, "%s", sbuf_data(sb));
828
sbuf_delete(sb);
829
PROC_LOCK(p);
830
sigexit(td, sig);
831
/* NOTREACHED */
832
}
833
834
#ifdef COMPAT_43
835
/*
836
* The dirty work is handled by kern_wait().
837
*/
838
int
839
owait(struct thread *td, struct owait_args *uap __unused)
840
{
841
int error, status;
842
843
error = kern_wait(td, WAIT_ANY, &status, 0, NULL);
844
if (error == 0)
845
td->td_retval[1] = status;
846
return (error);
847
}
848
#endif /* COMPAT_43 */
849
850
/*
851
* The dirty work is handled by kern_wait().
852
*/
853
int
854
sys_wait4(struct thread *td, struct wait4_args *uap)
855
{
856
struct rusage ru, *rup;
857
int error, status;
858
859
if (uap->rusage != NULL)
860
rup = &ru;
861
else
862
rup = NULL;
863
error = kern_wait(td, uap->pid, &status, uap->options, rup);
864
if (uap->status != NULL && error == 0 && td->td_retval[0] != 0)
865
error = copyout(&status, uap->status, sizeof(status));
866
if (uap->rusage != NULL && error == 0 && td->td_retval[0] != 0)
867
error = copyout(&ru, uap->rusage, sizeof(struct rusage));
868
return (error);
869
}
870
871
int
872
sys_wait6(struct thread *td, struct wait6_args *uap)
873
{
874
struct __wrusage wru, *wrup;
875
siginfo_t si, *sip;
876
idtype_t idtype;
877
id_t id;
878
int error, status;
879
880
idtype = uap->idtype;
881
id = uap->id;
882
883
if (uap->wrusage != NULL)
884
wrup = &wru;
885
else
886
wrup = NULL;
887
888
if (uap->info != NULL) {
889
sip = &si;
890
bzero(sip, sizeof(*sip));
891
} else
892
sip = NULL;
893
894
/*
895
* We expect all callers of wait6() to know about WEXITED and
896
* WTRAPPED.
897
*/
898
error = kern_wait6(td, idtype, id, &status, uap->options, wrup, sip);
899
900
if (uap->status != NULL && error == 0 && td->td_retval[0] != 0)
901
error = copyout(&status, uap->status, sizeof(status));
902
if (uap->wrusage != NULL && error == 0 && td->td_retval[0] != 0)
903
error = copyout(&wru, uap->wrusage, sizeof(wru));
904
if (uap->info != NULL && error == 0)
905
error = copyout(&si, uap->info, sizeof(si));
906
return (error);
907
}
908
909
/*
910
* Reap the remains of a zombie process and optionally return status and
911
* rusage. Asserts and will release both the proctree_lock and the process
912
* lock as part of its work.
913
*/
914
void
915
proc_reap(struct thread *td, struct proc *p, int *status, int options)
916
{
917
struct proc *q, *t;
918
919
sx_assert(&proctree_lock, SA_XLOCKED);
920
PROC_LOCK_ASSERT(p, MA_OWNED);
921
KASSERT(p->p_state == PRS_ZOMBIE, ("proc_reap: !PRS_ZOMBIE"));
922
923
mtx_spin_wait_unlocked(&p->p_slock);
924
925
q = td->td_proc;
926
927
if (status)
928
*status = KW_EXITCODE(p->p_xexit, p->p_xsig);
929
if (options & WNOWAIT) {
930
/*
931
* Only poll, returning the status. Caller does not wish to
932
* release the proc struct just yet.
933
*/
934
PROC_UNLOCK(p);
935
sx_xunlock(&proctree_lock);
936
return;
937
}
938
939
PROC_LOCK(q);
940
sigqueue_take(p->p_ksi);
941
PROC_UNLOCK(q);
942
943
/*
944
* If we got the child via a ptrace 'attach', we need to give it back
945
* to the old parent.
946
*/
947
if (p->p_oppid != p->p_pptr->p_pid) {
948
PROC_UNLOCK(p);
949
t = proc_realparent(p);
950
PROC_LOCK(t);
951
PROC_LOCK(p);
952
CTR2(KTR_PTRACE,
953
"wait: traced child %d moved back to parent %d", p->p_pid,
954
t->p_pid);
955
proc_reparent(p, t, false);
956
PROC_UNLOCK(p);
957
pksignal(t, SIGCHLD, p->p_ksi);
958
wakeup(t);
959
cv_broadcast(&p->p_pwait);
960
PROC_UNLOCK(t);
961
sx_xunlock(&proctree_lock);
962
return;
963
}
964
PROC_UNLOCK(p);
965
966
/*
967
* Remove other references to this process to ensure we have an
968
* exclusive reference.
969
*/
970
sx_xlock(PIDHASHLOCK(p->p_pid));
971
LIST_REMOVE(p, p_hash);
972
sx_xunlock(PIDHASHLOCK(p->p_pid));
973
LIST_REMOVE(p, p_sibling);
974
reaper_abandon_children(p, true);
975
reaper_clear(p);
976
PROC_LOCK(p);
977
proc_clear_orphan(p);
978
PROC_UNLOCK(p);
979
leavepgrp(p);
980
if (p->p_procdesc != NULL)
981
procdesc_reap(p);
982
sx_xunlock(&proctree_lock);
983
984
proc_id_clear(PROC_ID_PID, p->p_pid);
985
986
PROC_LOCK(p);
987
knlist_detach(p->p_klist);
988
p->p_klist = NULL;
989
PROC_UNLOCK(p);
990
991
/*
992
* Removal from allproc list and process group list paired with
993
* PROC_LOCK which was executed during that time should guarantee
994
* nothing can reach this process anymore. As such further locking
995
* is unnecessary.
996
*/
997
p->p_xexit = p->p_xsig = 0; /* XXX: why? */
998
999
PROC_LOCK(q);
1000
ruadd(&q->p_stats->p_cru, &q->p_crux, &p->p_ru, &p->p_rux);
1001
PROC_UNLOCK(q);
1002
1003
/*
1004
* Destroy resource accounting information associated with the process.
1005
*/
1006
#ifdef RACCT
1007
if (racct_enable) {
1008
PROC_LOCK(p);
1009
racct_sub(p, RACCT_NPROC, 1);
1010
PROC_UNLOCK(p);
1011
}
1012
#endif
1013
racct_proc_exit(p);
1014
1015
/*
1016
* Free credentials, arguments, and sigacts, and decrement the count of
1017
* processes running with this uid.
1018
*/
1019
proc_unset_cred(p, true);
1020
pargs_drop(p->p_args);
1021
p->p_args = NULL;
1022
sigacts_free(p->p_sigacts);
1023
p->p_sigacts = NULL;
1024
1025
/*
1026
* Do any thread-system specific cleanups.
1027
*/
1028
thread_wait(p);
1029
1030
/*
1031
* Give vm and machine-dependent layer a chance to free anything that
1032
* cpu_exit couldn't release while still running in process context.
1033
*/
1034
vm_waitproc(p);
1035
#ifdef MAC
1036
mac_proc_destroy(p);
1037
#endif
1038
1039
KASSERT(FIRST_THREAD_IN_PROC(p),
1040
("proc_reap: no residual thread!"));
1041
uma_zfree(proc_zone, p);
1042
atomic_add_int(&nprocs, -1);
1043
}
1044
1045
static int
1046
proc_to_reap(struct thread *td, struct proc *p, idtype_t idtype, id_t id,
1047
int *status, int options, struct __wrusage *wrusage, siginfo_t *siginfo,
1048
int check_only)
1049
{
1050
struct rusage *rup;
1051
1052
sx_assert(&proctree_lock, SA_XLOCKED);
1053
1054
PROC_LOCK(p);
1055
1056
switch (idtype) {
1057
case P_ALL:
1058
if (p->p_procdesc == NULL ||
1059
(p->p_pptr == td->td_proc &&
1060
(p->p_flag & P_TRACED) != 0)) {
1061
break;
1062
}
1063
1064
PROC_UNLOCK(p);
1065
return (0);
1066
case P_PID:
1067
if (p->p_pid != (pid_t)id) {
1068
PROC_UNLOCK(p);
1069
return (0);
1070
}
1071
break;
1072
case P_PGID:
1073
if (p->p_pgid != (pid_t)id) {
1074
PROC_UNLOCK(p);
1075
return (0);
1076
}
1077
break;
1078
case P_SID:
1079
if (p->p_session->s_sid != (pid_t)id) {
1080
PROC_UNLOCK(p);
1081
return (0);
1082
}
1083
break;
1084
case P_UID:
1085
if (p->p_ucred->cr_uid != (uid_t)id) {
1086
PROC_UNLOCK(p);
1087
return (0);
1088
}
1089
break;
1090
case P_GID:
1091
if (p->p_ucred->cr_gid != (gid_t)id) {
1092
PROC_UNLOCK(p);
1093
return (0);
1094
}
1095
break;
1096
case P_JAILID:
1097
if (p->p_ucred->cr_prison->pr_id != (int)id) {
1098
PROC_UNLOCK(p);
1099
return (0);
1100
}
1101
break;
1102
/*
1103
* It seems that the thread structures get zeroed out
1104
* at process exit. This makes it impossible to
1105
* support P_SETID, P_CID or P_CPUID.
1106
*/
1107
default:
1108
PROC_UNLOCK(p);
1109
return (0);
1110
}
1111
1112
if (p_canwait(td, p)) {
1113
PROC_UNLOCK(p);
1114
return (0);
1115
}
1116
1117
if (((options & WEXITED) == 0) && (p->p_state == PRS_ZOMBIE)) {
1118
PROC_UNLOCK(p);
1119
return (0);
1120
}
1121
1122
/*
1123
* This special case handles a kthread spawned by linux_clone
1124
* (see linux_misc.c). The linux_wait4 and linux_waitpid
1125
* functions need to be able to distinguish between waiting
1126
* on a process and waiting on a thread. It is a thread if
1127
* p_sigparent is not SIGCHLD, and the WLINUXCLONE option
1128
* signifies we want to wait for threads and not processes.
1129
*/
1130
if ((p->p_sigparent != SIGCHLD) ^
1131
((options & WLINUXCLONE) != 0)) {
1132
PROC_UNLOCK(p);
1133
return (0);
1134
}
1135
1136
if (siginfo != NULL) {
1137
bzero(siginfo, sizeof(*siginfo));
1138
siginfo->si_errno = 0;
1139
1140
/*
1141
* SUSv4 requires that the si_signo value is always
1142
* SIGCHLD. Obey it despite the rfork(2) interface
1143
* allows to request other signal for child exit
1144
* notification.
1145
*/
1146
siginfo->si_signo = SIGCHLD;
1147
1148
/*
1149
* This is still a rough estimate. We will fix the
1150
* cases TRAPPED, STOPPED, and CONTINUED later.
1151
*/
1152
if (WCOREDUMP(p->p_xsig)) {
1153
siginfo->si_code = CLD_DUMPED;
1154
siginfo->si_status = WTERMSIG(p->p_xsig);
1155
} else if (WIFSIGNALED(p->p_xsig)) {
1156
siginfo->si_code = CLD_KILLED;
1157
siginfo->si_status = WTERMSIG(p->p_xsig);
1158
} else {
1159
siginfo->si_code = CLD_EXITED;
1160
siginfo->si_status = p->p_xexit;
1161
}
1162
1163
siginfo->si_pid = p->p_pid;
1164
siginfo->si_uid = p->p_ucred->cr_uid;
1165
1166
/*
1167
* The si_addr field would be useful additional
1168
* detail, but apparently the PC value may be lost
1169
* when we reach this point. bzero() above sets
1170
* siginfo->si_addr to NULL.
1171
*/
1172
}
1173
1174
/*
1175
* There should be no reason to limit resources usage info to
1176
* exited processes only. A snapshot about any resources used
1177
* by a stopped process may be exactly what is needed.
1178
*/
1179
if (wrusage != NULL) {
1180
rup = &wrusage->wru_self;
1181
*rup = p->p_ru;
1182
PROC_STATLOCK(p);
1183
calcru(p, &rup->ru_utime, &rup->ru_stime);
1184
PROC_STATUNLOCK(p);
1185
1186
rup = &wrusage->wru_children;
1187
*rup = p->p_stats->p_cru;
1188
calccru(p, &rup->ru_utime, &rup->ru_stime);
1189
}
1190
1191
if (p->p_state == PRS_ZOMBIE && !check_only) {
1192
proc_reap(td, p, status, options);
1193
return (-1);
1194
}
1195
return (1);
1196
}
1197
1198
int
1199
kern_wait(struct thread *td, pid_t pid, int *status, int options,
1200
struct rusage *rusage)
1201
{
1202
struct __wrusage wru, *wrup;
1203
idtype_t idtype;
1204
id_t id;
1205
int ret;
1206
1207
/*
1208
* Translate the special pid values into the (idtype, pid)
1209
* pair for kern_wait6. The WAIT_MYPGRP case is handled by
1210
* kern_wait6() on its own.
1211
*/
1212
if (pid == WAIT_ANY) {
1213
idtype = P_ALL;
1214
id = 0;
1215
} else if (pid < 0) {
1216
idtype = P_PGID;
1217
id = (id_t)-pid;
1218
} else {
1219
idtype = P_PID;
1220
id = (id_t)pid;
1221
}
1222
1223
if (rusage != NULL)
1224
wrup = &wru;
1225
else
1226
wrup = NULL;
1227
1228
/*
1229
* For backward compatibility we implicitly add flags WEXITED
1230
* and WTRAPPED here.
1231
*/
1232
options |= WEXITED | WTRAPPED;
1233
ret = kern_wait6(td, idtype, id, status, options, wrup, NULL);
1234
if (rusage != NULL)
1235
*rusage = wru.wru_self;
1236
return (ret);
1237
}
1238
1239
static void
1240
report_alive_proc(struct thread *td, struct proc *p, siginfo_t *siginfo,
1241
int *status, int options, int si_code)
1242
{
1243
bool cont;
1244
1245
PROC_LOCK_ASSERT(p, MA_OWNED);
1246
sx_assert(&proctree_lock, SA_XLOCKED);
1247
MPASS(si_code == CLD_TRAPPED || si_code == CLD_STOPPED ||
1248
si_code == CLD_CONTINUED);
1249
1250
cont = si_code == CLD_CONTINUED;
1251
if ((options & WNOWAIT) == 0) {
1252
if (cont)
1253
p->p_flag &= ~P_CONTINUED;
1254
else
1255
p->p_flag |= P_WAITED;
1256
if (kern_wait_dequeue_sigchld &&
1257
(td->td_proc->p_sysent->sv_flags & SV_SIG_WAITNDQ) == 0) {
1258
PROC_LOCK(td->td_proc);
1259
sigqueue_take(p->p_ksi);
1260
PROC_UNLOCK(td->td_proc);
1261
}
1262
}
1263
sx_xunlock(&proctree_lock);
1264
if (siginfo != NULL) {
1265
siginfo->si_code = si_code;
1266
siginfo->si_status = cont ? SIGCONT : p->p_xsig;
1267
}
1268
if (status != NULL)
1269
*status = cont ? SIGCONT : W_STOPCODE(p->p_xsig);
1270
td->td_retval[0] = p->p_pid;
1271
PROC_UNLOCK(p);
1272
}
1273
1274
int
1275
kern_wait6(struct thread *td, idtype_t idtype, id_t id, int *status,
1276
int options, struct __wrusage *wrusage, siginfo_t *siginfo)
1277
{
1278
struct proc *p, *q;
1279
pid_t pid;
1280
int error, nfound, ret;
1281
bool report;
1282
1283
AUDIT_ARG_VALUE((int)idtype); /* XXX - This is likely wrong! */
1284
AUDIT_ARG_PID((pid_t)id); /* XXX - This may be wrong! */
1285
AUDIT_ARG_VALUE(options);
1286
1287
q = td->td_proc;
1288
1289
if ((pid_t)id == WAIT_MYPGRP && (idtype == P_PID || idtype == P_PGID)) {
1290
PROC_LOCK(q);
1291
id = (id_t)q->p_pgid;
1292
PROC_UNLOCK(q);
1293
idtype = P_PGID;
1294
}
1295
1296
/* If we don't know the option, just return. */
1297
if ((options & ~(WUNTRACED | WNOHANG | WCONTINUED | WNOWAIT |
1298
WEXITED | WTRAPPED | WLINUXCLONE)) != 0)
1299
return (EINVAL);
1300
if ((options & (WEXITED | WUNTRACED | WCONTINUED | WTRAPPED)) == 0) {
1301
/*
1302
* We will be unable to find any matching processes,
1303
* because there are no known events to look for.
1304
* Prefer to return error instead of blocking
1305
* indefinitely.
1306
*/
1307
return (EINVAL);
1308
}
1309
1310
loop:
1311
if (q->p_flag & P_STATCHILD) {
1312
PROC_LOCK(q);
1313
q->p_flag &= ~P_STATCHILD;
1314
PROC_UNLOCK(q);
1315
}
1316
sx_xlock(&proctree_lock);
1317
loop_locked:
1318
nfound = 0;
1319
LIST_FOREACH(p, &q->p_children, p_sibling) {
1320
pid = p->p_pid;
1321
ret = proc_to_reap(td, p, idtype, id, status, options,
1322
wrusage, siginfo, 0);
1323
if (ret == 0)
1324
continue;
1325
else if (ret != 1) {
1326
td->td_retval[0] = pid;
1327
return (0);
1328
}
1329
1330
/*
1331
* When running in capsicum(4) mode, make wait(2) ignore
1332
* processes created with pdfork(2). This is because one can
1333
* disown them - by passing their process descriptor to another
1334
* process - which means it needs to be prevented from touching
1335
* them afterwards.
1336
*/
1337
if (IN_CAPABILITY_MODE(td) && p->p_procdesc != NULL) {
1338
PROC_UNLOCK(p);
1339
continue;
1340
}
1341
1342
nfound++;
1343
PROC_LOCK_ASSERT(p, MA_OWNED);
1344
1345
if ((options & WTRAPPED) != 0 &&
1346
(p->p_flag & P_TRACED) != 0) {
1347
PROC_SLOCK(p);
1348
report =
1349
((p->p_flag & (P_STOPPED_TRACE | P_STOPPED_SIG)) &&
1350
p->p_suspcount == p->p_numthreads &&
1351
(p->p_flag & P_WAITED) == 0);
1352
PROC_SUNLOCK(p);
1353
if (report) {
1354
CTR4(KTR_PTRACE,
1355
"wait: returning trapped pid %d status %#x "
1356
"(xstat %d) xthread %d",
1357
p->p_pid, W_STOPCODE(p->p_xsig), p->p_xsig,
1358
p->p_xthread != NULL ?
1359
p->p_xthread->td_tid : -1);
1360
report_alive_proc(td, p, siginfo, status,
1361
options, CLD_TRAPPED);
1362
return (0);
1363
}
1364
}
1365
if ((options & WUNTRACED) != 0 &&
1366
(p->p_flag & P_STOPPED_SIG) != 0) {
1367
PROC_SLOCK(p);
1368
report = (p->p_suspcount == p->p_numthreads &&
1369
((p->p_flag & P_WAITED) == 0));
1370
PROC_SUNLOCK(p);
1371
if (report) {
1372
report_alive_proc(td, p, siginfo, status,
1373
options, CLD_STOPPED);
1374
return (0);
1375
}
1376
}
1377
if ((options & WCONTINUED) != 0 &&
1378
(p->p_flag & P_CONTINUED) != 0) {
1379
report_alive_proc(td, p, siginfo, status, options,
1380
CLD_CONTINUED);
1381
return (0);
1382
}
1383
PROC_UNLOCK(p);
1384
}
1385
1386
/*
1387
* Look in the orphans list too, to allow the parent to
1388
* collect it's child exit status even if child is being
1389
* debugged.
1390
*
1391
* Debugger detaches from the parent upon successful
1392
* switch-over from parent to child. At this point due to
1393
* re-parenting the parent loses the child to debugger and a
1394
* wait4(2) call would report that it has no children to wait
1395
* for. By maintaining a list of orphans we allow the parent
1396
* to successfully wait until the child becomes a zombie.
1397
*/
1398
if (nfound == 0) {
1399
LIST_FOREACH(p, &q->p_orphans, p_orphan) {
1400
ret = proc_to_reap(td, p, idtype, id, NULL, options,
1401
NULL, NULL, 1);
1402
if (ret != 0) {
1403
KASSERT(ret != -1, ("reaped an orphan (pid %d)",
1404
(int)td->td_retval[0]));
1405
PROC_UNLOCK(p);
1406
nfound++;
1407
break;
1408
}
1409
}
1410
}
1411
if (nfound == 0) {
1412
sx_xunlock(&proctree_lock);
1413
return (ECHILD);
1414
}
1415
if (options & WNOHANG) {
1416
sx_xunlock(&proctree_lock);
1417
td->td_retval[0] = 0;
1418
return (0);
1419
}
1420
PROC_LOCK(q);
1421
if (q->p_flag & P_STATCHILD) {
1422
q->p_flag &= ~P_STATCHILD;
1423
PROC_UNLOCK(q);
1424
goto loop_locked;
1425
}
1426
sx_xunlock(&proctree_lock);
1427
error = msleep(q, &q->p_mtx, PWAIT | PCATCH | PDROP, "wait", 0);
1428
if (error)
1429
return (error);
1430
goto loop;
1431
}
1432
1433
void
1434
proc_add_orphan(struct proc *child, struct proc *parent)
1435
{
1436
1437
sx_assert(&proctree_lock, SX_XLOCKED);
1438
KASSERT((child->p_flag & P_TRACED) != 0,
1439
("proc_add_orphan: not traced"));
1440
1441
if (LIST_EMPTY(&parent->p_orphans)) {
1442
child->p_treeflag |= P_TREE_FIRST_ORPHAN;
1443
LIST_INSERT_HEAD(&parent->p_orphans, child, p_orphan);
1444
} else {
1445
LIST_INSERT_AFTER(LIST_FIRST(&parent->p_orphans),
1446
child, p_orphan);
1447
}
1448
child->p_treeflag |= P_TREE_ORPHANED;
1449
}
1450
1451
/*
1452
* Make process 'parent' the new parent of process 'child'.
1453
* Must be called with an exclusive hold of proctree lock.
1454
*/
1455
void
1456
proc_reparent(struct proc *child, struct proc *parent, bool set_oppid)
1457
{
1458
1459
sx_assert(&proctree_lock, SX_XLOCKED);
1460
PROC_LOCK_ASSERT(child, MA_OWNED);
1461
if (child->p_pptr == parent)
1462
return;
1463
1464
PROC_LOCK(child->p_pptr);
1465
sigqueue_take(child->p_ksi);
1466
PROC_UNLOCK(child->p_pptr);
1467
LIST_REMOVE(child, p_sibling);
1468
LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1469
1470
proc_clear_orphan(child);
1471
if ((child->p_flag & P_TRACED) != 0) {
1472
proc_add_orphan(child, child->p_pptr);
1473
}
1474
1475
child->p_pptr = parent;
1476
if (set_oppid)
1477
child->p_oppid = parent->p_pid;
1478
}
1479
1480