Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/openlaunchd
Path: blob/master/launchd/core.c
374 views
1
/*
2
* @APPLE_APACHE_LICENSE_HEADER_START@
3
*
4
* Licensed under the Apache License, Version 2.0 (the "License");
5
* you may not use this file except in compliance with the License.
6
* You may obtain a copy of the License at
7
*
8
* http://www.apache.org/licenses/LICENSE-2.0
9
*
10
* Unless required by applicable law or agreed to in writing, software
11
* distributed under the License is distributed on an "AS IS" BASIS,
12
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
* See the License for the specific language governing permissions and
14
* limitations under the License.
15
*
16
* @APPLE_APACHE_LICENSE_HEADER_END@
17
*/
18
19
#include "config.h"
20
#include "core.h"
21
#include "internal.h"
22
#include "helper.h"
23
24
#include <TargetConditionals.h>
25
#if HAS_MACH
26
#include <mach/mach.h>
27
#include <mach/mach_error.h>
28
#include <mach/boolean.h>
29
#include <mach/message.h>
30
#include <mach/notify.h>
31
#include <mach/mig_errors.h>
32
#include <mach/mach_traps.h>
33
#include <mach/mach_interface.h>
34
#include <mach/host_info.h>
35
#include <mach/mach_host.h>
36
#include <mach/exception.h>
37
#include <mach/host_reboot.h>
38
#endif
39
#include <sys/types.h>
40
#include <sys/queue.h>
41
#include <sys/event.h>
42
#include <sys/stat.h>
43
#include <sys/ucred.h>
44
#include <sys/fcntl.h>
45
#include <sys/un.h>
46
#include <sys/reboot.h>
47
#include <sys/wait.h>
48
#include <sys/sysctl.h>
49
#include <sys/sockio.h>
50
#include <sys/time.h>
51
#include <sys/resource.h>
52
#include <sys/ioctl.h>
53
#include <sys/mount.h>
54
#include <sys/pipe.h>
55
#include <sys/mman.h>
56
#include <sys/socket.h>
57
#include <sys/syscall.h>
58
#include <sys/kern_memorystatus.h>
59
#include <net/if.h>
60
#include <netinet/in.h>
61
#include <netinet/in_var.h>
62
#include <netinet6/nd6.h>
63
#include <bsm/libbsm.h>
64
#include <unistd.h>
65
#include <signal.h>
66
#include <errno.h>
67
#include <libgen.h>
68
#include <stdio.h>
69
#include <stdlib.h>
70
#include <stdarg.h>
71
#include <stdbool.h>
72
#include <paths.h>
73
#include <pwd.h>
74
#include <grp.h>
75
#include <ttyent.h>
76
#include <dlfcn.h>
77
#include <dirent.h>
78
#include <string.h>
79
#include <ctype.h>
80
#include <glob.h>
81
#include <System/sys/spawn.h>
82
#include <System/sys/spawn_internal.h>
83
#include <spawn.h>
84
#include <spawn_private.h>
85
#include <time.h>
86
#include <libinfo.h>
87
#include <os/assumes.h>
88
#include <xpc/launchd.h>
89
#include <asl.h>
90
#include <_simple.h>
91
92
#include <libproc.h>
93
#include <libproc_internal.h>
94
#include <System/sys/proc_info.h>
95
#include <malloc/malloc.h>
96
#include <pthread.h>
97
#if HAVE_SANDBOX
98
#define __APPLE_API_PRIVATE
99
#include <sandbox.h>
100
#endif
101
#if HAVE_QUARANTINE
102
#include <quarantine.h>
103
#endif
104
#if HAVE_RESPONSIBILITY
105
#include <responsibility.h>
106
#endif
107
#if !TARGET_OS_EMBEDDED
108
extern int gL1CacheEnabled;
109
#endif
110
#if HAVE_SYSTEMSTATS
111
#include <systemstats/systemstats.h>
112
#endif
113
114
#include "launch.h"
115
#include "launch_priv.h"
116
#include "launch_internal.h"
117
#include "bootstrap.h"
118
#include "bootstrap_priv.h"
119
#include "vproc.h"
120
#include "vproc_internal.h"
121
122
#include "reboot2.h"
123
124
#include "launchd.h"
125
#include "runtime.h"
126
#include "ipc.h"
127
#include "job.h"
128
#include "jobServer.h"
129
#include "job_reply.h"
130
#include "job_forward.h"
131
#include "mach_excServer.h"
132
133
#define POSIX_SPAWN_IOS_INTERACTIVE 0
134
135
#if TARGET_OS_EMBEDDED
136
/* Default memory highwatermark for daemons as set out in <rdar://problem/10307788>. */
137
#define DEFAULT_JETSAM_DAEMON_HIGHWATERMARK 5
138
#endif
139
140
/* LAUNCHD_DEFAULT_EXIT_TIMEOUT
141
* If the job hasn't exited in the given number of seconds after sending
142
* it a SIGTERM, SIGKILL it. Can be overriden in the job plist.
143
*/
144
#define LAUNCHD_MIN_JOB_RUN_TIME 10
145
#define LAUNCHD_DEFAULT_EXIT_TIMEOUT 20
146
#define LAUNCHD_SIGKILL_TIMER 4
147
#define LAUNCHD_LOG_FAILED_EXEC_FREQ 10
148
149
#define SHUTDOWN_LOG_DIR "/var/log/shutdown"
150
151
#define TAKE_SUBSET_NAME "TakeSubsetName"
152
#define TAKE_SUBSET_PID "TakeSubsetPID"
153
#define TAKE_SUBSET_PERPID "TakeSubsetPerPID"
154
155
#define IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
156
157
extern char **environ;
158
159
struct waiting_for_removal {
160
SLIST_ENTRY(waiting_for_removal) sle;
161
mach_port_t reply_port;
162
};
163
164
static bool waiting4removal_new(job_t j, mach_port_t rp);
165
static void waiting4removal_delete(job_t j, struct waiting_for_removal *w4r);
166
167
struct machservice {
168
SLIST_ENTRY(machservice) sle;
169
SLIST_ENTRY(machservice) special_port_sle;
170
LIST_ENTRY(machservice) name_hash_sle;
171
LIST_ENTRY(machservice) port_hash_sle;
172
struct machservice *alias;
173
job_t job;
174
unsigned int gen_num;
175
mach_port_name_t port;
176
unsigned int
177
isActive:1,
178
reset:1,
179
recv:1,
180
hide:1,
181
kUNCServer:1,
182
per_user_hack:1,
183
debug_on_close:1,
184
per_pid:1,
185
delete_on_destruction:1,
186
drain_one_on_crash:1,
187
drain_all_on_crash:1,
188
upfront:1,
189
event_channel:1,
190
recv_race_hack :1,
191
/* Don't let the size of this field to get too small. It has to be large
192
* enough to represent the reasonable range of special port numbers.
193
*/
194
special_port_num:17;
195
const char name[0];
196
};
197
198
// HACK: This should be per jobmgr_t
199
static SLIST_HEAD(, machservice) special_ports;
200
201
#define PORT_HASH_SIZE 32
202
#define HASH_PORT(x) (IS_POWER_OF_TWO(PORT_HASH_SIZE) ? (MACH_PORT_INDEX(x) & (PORT_HASH_SIZE - 1)) : (MACH_PORT_INDEX(x) % PORT_HASH_SIZE))
203
204
static LIST_HEAD(, machservice) port_hash[PORT_HASH_SIZE];
205
206
static void machservice_setup(launch_data_t obj, const char *key, void *context);
207
static void machservice_setup_options(launch_data_t obj, const char *key, void *context);
208
static void machservice_resetport(job_t j, struct machservice *ms);
209
static void machservice_stamp_port(job_t j, struct machservice *ms);
210
static struct machservice *machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local);
211
static struct machservice *machservice_new_alias(job_t aj, struct machservice *orig);
212
static void machservice_ignore(job_t j, struct machservice *ms);
213
static void machservice_watch(job_t j, struct machservice *ms);
214
static void machservice_delete(job_t j, struct machservice *, bool port_died);
215
static void machservice_request_notifications(struct machservice *);
216
static mach_port_t machservice_port(struct machservice *);
217
static job_t machservice_job(struct machservice *);
218
static bool machservice_hidden(struct machservice *);
219
static bool machservice_active(struct machservice *);
220
static const char *machservice_name(struct machservice *);
221
static bootstrap_status_t machservice_status(struct machservice *);
222
void machservice_drain_port(struct machservice *);
223
224
struct socketgroup {
225
SLIST_ENTRY(socketgroup) sle;
226
int *fds;
227
unsigned int fd_cnt;
228
union {
229
const char name[0];
230
char name_init[0];
231
};
232
};
233
234
static bool socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt);
235
static void socketgroup_delete(job_t j, struct socketgroup *sg);
236
static void socketgroup_watch(job_t j, struct socketgroup *sg);
237
static void socketgroup_ignore(job_t j, struct socketgroup *sg);
238
static void socketgroup_callback(job_t j);
239
static void socketgroup_setup(launch_data_t obj, const char *key, void *context);
240
static void socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add);
241
242
struct calendarinterval {
243
LIST_ENTRY(calendarinterval) global_sle;
244
SLIST_ENTRY(calendarinterval) sle;
245
job_t job;
246
struct tm when;
247
time_t when_next;
248
};
249
250
static LIST_HEAD(, calendarinterval) sorted_calendar_events;
251
252
static bool calendarinterval_new(job_t j, struct tm *w);
253
static bool calendarinterval_new_from_obj(job_t j, launch_data_t obj);
254
static void calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context);
255
static void calendarinterval_delete(job_t j, struct calendarinterval *ci);
256
static void calendarinterval_setalarm(job_t j, struct calendarinterval *ci);
257
static void calendarinterval_callback(void);
258
static void calendarinterval_sanity_check(void);
259
260
struct envitem {
261
SLIST_ENTRY(envitem) sle;
262
char *value;
263
union {
264
const char key[0];
265
char key_init[0];
266
};
267
};
268
269
static bool envitem_new(job_t j, const char *k, const char *v, bool global);
270
static void envitem_delete(job_t j, struct envitem *ei, bool global);
271
static void envitem_setup(launch_data_t obj, const char *key, void *context);
272
273
struct limititem {
274
SLIST_ENTRY(limititem) sle;
275
struct rlimit lim;
276
unsigned int setsoft:1, sethard:1, which:30;
277
};
278
279
static bool limititem_update(job_t j, int w, rlim_t r);
280
static void limititem_delete(job_t j, struct limititem *li);
281
static void limititem_setup(launch_data_t obj, const char *key, void *context);
282
#if HAVE_SANDBOX
283
static void seatbelt_setup_flags(launch_data_t obj, const char *key, void *context);
284
#endif
285
286
static void jetsam_property_setup(launch_data_t obj, const char *key, job_t j);
287
288
typedef enum {
289
NETWORK_UP = 1,
290
NETWORK_DOWN,
291
SUCCESSFUL_EXIT,
292
FAILED_EXIT,
293
CRASHED,
294
DID_NOT_CRASH,
295
OTHER_JOB_ENABLED,
296
OTHER_JOB_DISABLED,
297
OTHER_JOB_ACTIVE,
298
OTHER_JOB_INACTIVE,
299
} semaphore_reason_t;
300
301
struct semaphoreitem {
302
SLIST_ENTRY(semaphoreitem) sle;
303
semaphore_reason_t why;
304
305
union {
306
const char what[0];
307
char what_init[0];
308
};
309
};
310
311
struct semaphoreitem_dict_iter_context {
312
job_t j;
313
semaphore_reason_t why_true;
314
semaphore_reason_t why_false;
315
};
316
317
static bool semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what);
318
static void semaphoreitem_delete(job_t j, struct semaphoreitem *si);
319
static void semaphoreitem_setup(launch_data_t obj, const char *key, void *context);
320
static void semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context);
321
static void semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add);
322
323
struct externalevent {
324
LIST_ENTRY(externalevent) sys_le;
325
LIST_ENTRY(externalevent) job_le;
326
struct eventsystem *sys;
327
328
uint64_t id;
329
job_t job;
330
bool state;
331
bool wanted_state;
332
bool internal;
333
xpc_object_t event;
334
xpc_object_t entitlements;
335
336
char name[0];
337
};
338
339
struct externalevent_iter_ctx {
340
job_t j;
341
struct eventsystem *sys;
342
};
343
344
static bool externalevent_new(job_t j, struct eventsystem *sys, const char *evname, xpc_object_t event, uint64_t flags);
345
static void externalevent_delete(struct externalevent *ee);
346
static void externalevent_setup(launch_data_t obj, const char *key, void *context);
347
static struct externalevent *externalevent_find(const char *sysname, uint64_t id);
348
349
struct eventsystem {
350
LIST_ENTRY(eventsystem) global_le;
351
LIST_HEAD(, externalevent) events;
352
uint64_t curid;
353
char name[0];
354
};
355
356
static struct eventsystem *eventsystem_new(const char *name);
357
static void eventsystem_delete(struct eventsystem *sys) __attribute__((unused));
358
static void eventsystem_setup(launch_data_t obj, const char *key, void *context);
359
static struct eventsystem *eventsystem_find(const char *name);
360
static void eventsystem_ping(void);
361
362
struct waiting4attach {
363
LIST_ENTRY(waiting4attach) le;
364
mach_port_t port;
365
pid_t dest;
366
xpc_service_type_t type;
367
char name[0];
368
};
369
370
static LIST_HEAD(, waiting4attach) _launchd_domain_waiters;
371
372
static struct waiting4attach *waiting4attach_new(jobmgr_t jm, const char *name, mach_port_t port, pid_t dest, xpc_service_type_t type);
373
static void waiting4attach_delete(jobmgr_t jm, struct waiting4attach *w4a);
374
static struct waiting4attach *waiting4attach_find(jobmgr_t jm, job_t j);
375
376
#define ACTIVE_JOB_HASH_SIZE 32
377
#define ACTIVE_JOB_HASH(x) (IS_POWER_OF_TWO(ACTIVE_JOB_HASH_SIZE) ? (x & (ACTIVE_JOB_HASH_SIZE - 1)) : (x % ACTIVE_JOB_HASH_SIZE))
378
379
#define MACHSERVICE_HASH_SIZE 37
380
381
#define LABEL_HASH_SIZE 53
382
struct jobmgr_s {
383
kq_callback kqjobmgr_callback;
384
LIST_ENTRY(jobmgr_s) xpc_le;
385
SLIST_ENTRY(jobmgr_s) sle;
386
SLIST_HEAD(, jobmgr_s) submgrs;
387
LIST_HEAD(, job_s) jobs;
388
LIST_HEAD(, waiting4attach) attaches;
389
390
/* For legacy reasons, we keep all job labels that are imported in the root
391
* job manager's label hash. If a job manager is an XPC domain, then it gets
392
* its own label hash that is separate from the "global" one stored in the
393
* root job manager.
394
*/
395
LIST_HEAD(, job_s) label_hash[LABEL_HASH_SIZE];
396
LIST_HEAD(, job_s) active_jobs[ACTIVE_JOB_HASH_SIZE];
397
LIST_HEAD(, machservice) ms_hash[MACHSERVICE_HASH_SIZE];
398
LIST_HEAD(, job_s) global_env_jobs;
399
mach_port_t jm_port;
400
mach_port_t req_port;
401
jobmgr_t parentmgr;
402
int reboot_flags;
403
time_t shutdown_time;
404
unsigned int global_on_demand_cnt;
405
unsigned int normal_active_cnt;
406
unsigned int
407
shutting_down:1,
408
session_initialized:1,
409
killed_stray_jobs:1,
410
monitor_shutdown:1,
411
shutdown_jobs_dirtied:1,
412
shutdown_jobs_cleaned:1,
413
xpc_singleton:1;
414
uint32_t properties;
415
// XPC-specific properties.
416
char owner[MAXCOMLEN];
417
char *shortdesc;
418
mach_port_t req_bsport;
419
mach_port_t req_excport;
420
mach_port_t req_asport;
421
mach_port_t req_gui_asport;
422
pid_t req_pid;
423
uid_t req_euid;
424
gid_t req_egid;
425
au_asid_t req_asid;
426
vm_offset_t req_ctx;
427
mach_msg_type_number_t req_ctx_sz;
428
mach_port_t req_rport;
429
uint64_t req_uniqueid;
430
kern_return_t error;
431
union {
432
const char name[0];
433
char name_init[0];
434
};
435
};
436
437
// Global XPC domains.
438
static jobmgr_t _s_xpc_system_domain;
439
static LIST_HEAD(, jobmgr_s) _s_xpc_user_domains;
440
static LIST_HEAD(, jobmgr_s) _s_xpc_session_domains;
441
442
#define jobmgr_assumes(jm, e) os_assumes_ctx(jobmgr_log_bug, jm, (e))
443
#define jobmgr_assumes_zero(jm, e) os_assumes_zero_ctx(jobmgr_log_bug, jm, (e))
444
#define jobmgr_assumes_zero_p(jm, e) posix_assumes_zero_ctx(jobmgr_log_bug, jm, (e))
445
446
static jobmgr_t jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool no_init, mach_port_t asport);
447
static jobmgr_t jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name);
448
static jobmgr_t jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid);
449
static jobmgr_t jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid);
450
static job_t jobmgr_import2(jobmgr_t jm, launch_data_t pload);
451
static jobmgr_t jobmgr_parent(jobmgr_t jm);
452
static jobmgr_t jobmgr_do_garbage_collection(jobmgr_t jm);
453
static bool jobmgr_label_test(jobmgr_t jm, const char *str);
454
static void jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev);
455
static void jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays);
456
static void jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np);
457
static void jobmgr_remove(jobmgr_t jm);
458
static void jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack);
459
static job_t jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag);
460
static job_t jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay);
461
static job_t jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon);
462
static job_t managed_job(pid_t p);
463
static jobmgr_t jobmgr_find_by_name(jobmgr_t jm, const char *where);
464
static job_t job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid);
465
static job_t jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp);
466
static void job_export_all2(jobmgr_t jm, launch_data_t where);
467
static void jobmgr_callback(void *obj, struct kevent *kev);
468
static void jobmgr_setup_env_from_other_jobs(jobmgr_t jm);
469
static void jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict);
470
static struct machservice *jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid);
471
static void jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
472
static void jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
473
static void jobmgr_log_perf_statistics(jobmgr_t jm, bool signal_children);
474
// static void jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
475
static bool jobmgr_log_bug(_SIMPLE_STRING asl_message, void *ctx, const char *message);
476
477
#define AUTO_PICK_LEGACY_LABEL (const char *)(~0)
478
#define AUTO_PICK_ANONYMOUS_LABEL (const char *)(~1)
479
#define AUTO_PICK_XPC_LABEL (const char *)(~2)
480
481
struct suspended_peruser {
482
LIST_ENTRY(suspended_peruser) sle;
483
job_t j;
484
};
485
486
struct job_s {
487
// MUST be first element of this structure.
488
kq_callback kqjob_callback;
489
LIST_ENTRY(job_s) sle;
490
LIST_ENTRY(job_s) subjob_sle;
491
LIST_ENTRY(job_s) needing_session_sle;
492
LIST_ENTRY(job_s) jetsam_sle;
493
LIST_ENTRY(job_s) pid_hash_sle;
494
LIST_ENTRY(job_s) global_pid_hash_sle;
495
LIST_ENTRY(job_s) label_hash_sle;
496
LIST_ENTRY(job_s) global_env_sle;
497
SLIST_ENTRY(job_s) curious_jobs_sle;
498
LIST_HEAD(, suspended_peruser) suspended_perusers;
499
LIST_HEAD(, waiting_for_exit) exit_watchers;
500
LIST_HEAD(, job_s) subjobs;
501
LIST_HEAD(, externalevent) events;
502
SLIST_HEAD(, socketgroup) sockets;
503
SLIST_HEAD(, calendarinterval) cal_intervals;
504
SLIST_HEAD(, envitem) global_env;
505
SLIST_HEAD(, envitem) env;
506
SLIST_HEAD(, limititem) limits;
507
SLIST_HEAD(, machservice) machservices;
508
SLIST_HEAD(, semaphoreitem) semaphores;
509
SLIST_HEAD(, waiting_for_removal) removal_watchers;
510
struct waiting4attach *w4a;
511
job_t original;
512
job_t alias;
513
cpu_type_t *j_binpref;
514
size_t j_binpref_cnt;
515
mach_port_t j_port;
516
mach_port_t exit_status_dest;
517
mach_port_t exit_status_port;
518
mach_port_t spawn_reply_port;
519
uid_t mach_uid;
520
jobmgr_t mgr;
521
size_t argc;
522
char **argv;
523
char *prog;
524
char *rootdir;
525
char *workingdir;
526
char *username;
527
char *groupname;
528
char *stdinpath;
529
char *stdoutpath;
530
char *stderrpath;
531
char *alt_exc_handler;
532
char *cfbundleidentifier;
533
unsigned int nruns;
534
uint64_t trt;
535
#if HAVE_SANDBOX
536
char *seatbelt_profile;
537
uint64_t seatbelt_flags;
538
char *container_identifier;
539
#endif
540
#if HAVE_QUARANTINE
541
void *quarantine_data;
542
size_t quarantine_data_sz;
543
#endif
544
pid_t p;
545
uint64_t uniqueid;
546
int last_exit_status;
547
int stdin_fd;
548
int fork_fd;
549
int nice;
550
uint32_t pstype;
551
uint32_t psproctype;
552
int32_t jetsam_priority;
553
int32_t jetsam_memlimit;
554
int32_t main_thread_priority;
555
uint32_t timeout;
556
uint32_t exit_timeout;
557
uint64_t sent_signal_time;
558
uint64_t start_time;
559
uint32_t min_run_time;
560
bool unthrottle;
561
uint32_t start_interval;
562
uint32_t peruser_suspend_count;
563
uuid_t instance_id;
564
mode_t mask;
565
mach_port_t asport;
566
au_asid_t asid;
567
uuid_t expected_audit_uuid;
568
bool
569
// man launchd.plist --> Debug
570
debug:1,
571
// man launchd.plist --> KeepAlive == false
572
ondemand:1,
573
// man launchd.plist --> SessionCreate
574
session_create:1,
575
// man launchd.plist --> LowPriorityIO
576
low_pri_io:1,
577
// man launchd.plist --> InitGroups
578
no_init_groups:1,
579
/* A legacy mach_init concept to make bootstrap_create_server/service()
580
* work
581
*/
582
priv_port_has_senders:1,
583
// A hack during job importing
584
importing_global_env:1,
585
// A hack during job importing
586
importing_hard_limits:1,
587
// man launchd.plist --> Umask
588
setmask:1,
589
// A process that launchd knows about but doesn't manage.
590
anonymous:1,
591
// A legacy mach_init concept to detect sick jobs
592
checkedin:1,
593
// A job created via bootstrap_create_server()
594
legacy_mach_job:1,
595
// A job created via spawn_via_launchd()
596
legacy_LS_job:1,
597
// A legacy job that wants inetd compatible semantics
598
inetcompat:1,
599
// A twist on inetd compatibility
600
inetcompat_wait:1,
601
/* An event fired and the job should start, but not necessarily right
602
* away.
603
*/
604
start_pending:1,
605
// man launchd.plist --> EnableGlobbing
606
globargv:1,
607
// man launchd.plist --> WaitForDebugger
608
wait4debugger:1,
609
// One-shot WaitForDebugger.
610
wait4debugger_oneshot:1,
611
// MachExceptionHandler == true
612
internal_exc_handler:1,
613
// A hack to support an option of spawn_via_launchd()
614
stall_before_exec:1,
615
/* man launchd.plist --> LaunchOnlyOnce.
616
*
617
* Note: <rdar://problem/5465184> Rename this to "HopefullyNeverExits".
618
*/
619
only_once:1,
620
/* Make job_ignore() / job_watch() work. If these calls were balanced,
621
* then this wouldn't be necessarily.
622
*/
623
currently_ignored:1,
624
/* A job that forced all other jobs to be temporarily launch-on-
625
* demand
626
*/
627
forced_peers_to_demand_mode:1,
628
// man launchd.plist --> Nice
629
setnice:1,
630
/* A job was asked to be unloaded/removed while running, we'll remove it
631
* after it exits.
632
*/
633
removal_pending:1,
634
// job_kill() was called.
635
sent_sigkill:1,
636
// Enter the kernel debugger before killing a job.
637
debug_before_kill:1,
638
// A hack that launchd+launchctl use during jobmgr_t creation.
639
weird_bootstrap:1,
640
// man launchd.plist --> StartOnMount
641
start_on_mount:1,
642
// This job is a per-user launchd managed by the PID 1 launchd.
643
per_user:1,
644
// A job thoroughly confused launchd. We need to unload it ASAP.
645
unload_at_mig_return:1,
646
// man launchd.plist --> AbandonProcessGroup
647
abandon_pg:1,
648
/* During shutdown, do not send SIGTERM to stray processes in the
649
* process group of this job.
650
*/
651
ignore_pg_at_shutdown:1,
652
/* Don't let this job create new 'job_t' objects in launchd. Has been
653
* seriously overloaded for the purposes of sandboxing.
654
*/
655
deny_job_creation:1,
656
// man launchd.plist --> EnableTransactions
657
enable_transactions:1,
658
// The job was sent SIGKILL because it was clean.
659
clean_kill:1,
660
// The job has an OtherJobEnabled KeepAlive criterion.
661
nosy:1,
662
// The job exited due to a crash.
663
crashed:1,
664
// We've received NOTE_EXIT for the job and reaped it.
665
reaped:1,
666
// job_stop() was called.
667
stopped:1,
668
/* The job is to be kept alive continuously, but it must first get an
669
* initial kick off.
670
*/
671
needs_kickoff:1,
672
// The job is a bootstrapper.
673
is_bootstrapper:1,
674
// The job owns the console.
675
has_console:1,
676
/* The job runs as a non-root user on embedded but has select privileges
677
* of the root user. This is SpringBoard.
678
*/
679
embedded_god:1,
680
// The job is responsible for drawing the home screen on embedded.
681
embedded_home:1,
682
// We got NOTE_EXEC for the job.
683
did_exec:1,
684
// The job is an XPC service, and XPC proxy successfully exec(3)ed.
685
xpcproxy_did_exec:1,
686
// The (anonymous) job called vprocmgr_switch_to_session().
687
holds_ref:1,
688
// The job has Jetsam limits in place.
689
jetsam_properties:1,
690
// The job's Jetsam memory limits should only be applied in the background
691
jetsam_memory_limit_background:1,
692
/* This job was created as the result of a look up of a service provided
693
* by a MultipleInstance job.
694
*/
695
dedicated_instance:1,
696
// The job supports creating additional instances of itself.
697
multiple_instances:1,
698
/* The sub-job was already removed from the parent's list of
699
* sub-jobs.
700
*/
701
former_subjob:1,
702
/* The job is responsible for monitoring external events for this
703
* launchd.
704
*/
705
event_monitor:1,
706
// The event monitor job has retrieved the initial list of events.
707
event_monitor_ready2signal:1,
708
// A lame hack.
709
removing:1,
710
// Disable ASLR when launching this job.
711
disable_aslr:1,
712
// The job is an XPC Service.
713
xpc_service:1,
714
// The job is the Performance team's shutdown monitor.
715
shutdown_monitor:1,
716
// We should open a transaction for the job when shutdown begins.
717
dirty_at_shutdown:1,
718
/* The job was sent SIGKILL but did not exit in a timely fashion,
719
* indicating a kernel bug.
720
*/
721
workaround9359725:1,
722
// The job is the XPC domain bootstrapper.
723
xpc_bootstrapper:1,
724
// The job is an app (on either iOS or OS X) and has different resource
725
// limitations.
726
app:1,
727
// FairPlay decryption failed on the job. This should only ever happen
728
// to apps.
729
fpfail:1,
730
// The job failed to exec(3) for reasons that may be transient, so we're
731
// waiting for UserEventAgent to tell us when it's okay to try spawning
732
// again (i.e. when the executable path appears, when the UID appears,
733
// etc.).
734
waiting4ok:1,
735
// The job exited due to memory pressure.
736
jettisoned:1,
737
// The job supports idle-exit.
738
idle_exit:1,
739
// The job was implicitly reaped by the kernel.
740
implicit_reap:1,
741
system_app :1,
742
joins_gui_session :1,
743
low_priority_background_io :1,
744
legacy_timers :1;
745
746
const char label[0];
747
};
748
749
static size_t hash_label(const char *label) __attribute__((pure));
750
static size_t hash_ms(const char *msstr) __attribute__((pure));
751
static SLIST_HEAD(, job_s) s_curious_jobs;
752
static LIST_HEAD(, job_s) managed_actives[ACTIVE_JOB_HASH_SIZE];
753
754
#define job_assumes(j, e) os_assumes_ctx(job_log_bug, j, (e))
755
#define job_assumes_zero(j, e) os_assumes_zero_ctx(job_log_bug, j, (e))
756
#define job_assumes_zero_p(j, e) posix_assumes_zero_ctx(job_log_bug, j, (e))
757
758
static void job_import_keys(launch_data_t obj, const char *key, void *context);
759
static void job_import_bool(job_t j, const char *key, bool value);
760
static void job_import_string(job_t j, const char *key, const char *value);
761
static void job_import_integer(job_t j, const char *key, long long value);
762
static void job_import_dictionary(job_t j, const char *key, launch_data_t value);
763
static void job_import_array(job_t j, const char *key, launch_data_t value);
764
static void job_import_opaque(job_t j, const char *key, launch_data_t value);
765
static bool job_set_global_on_demand(job_t j, bool val);
766
static const char *job_active(job_t j);
767
static void job_watch(job_t j);
768
static void job_ignore(job_t j);
769
static void job_reap(job_t j);
770
static bool job_useless(job_t j);
771
static bool job_keepalive(job_t j);
772
static void job_dispatch_curious_jobs(job_t j);
773
static void job_start(job_t j);
774
static void job_start_child(job_t j) __attribute__((noreturn));
775
static void job_setup_attributes(job_t j);
776
static bool job_setup_machport(job_t j);
777
static kern_return_t job_setup_exit_port(job_t j);
778
static void job_setup_fd(job_t j, int target_fd, const char *path, int flags);
779
static void job_postfork_become_user(job_t j);
780
static void job_postfork_test_user(job_t j);
781
static void job_log_pids_with_weird_uids(job_t j);
782
static void job_setup_exception_port(job_t j, task_t target_task);
783
static void job_callback(void *obj, struct kevent *kev);
784
static void job_callback_proc(job_t j, struct kevent *kev);
785
static void job_callback_timer(job_t j, void *ident);
786
static void job_callback_read(job_t j, int ident);
787
static void job_log_stray_pg(job_t j);
788
static void job_log_children_without_exec(job_t j);
789
static job_t job_new_anonymous(jobmgr_t jm, pid_t anonpid) __attribute__((malloc, nonnull, warn_unused_result));
790
static job_t job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv) __attribute__((malloc, nonnull(1,2), warn_unused_result));
791
static job_t job_new_alias(jobmgr_t jm, job_t src);
792
static job_t job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond) __attribute__((malloc, nonnull, warn_unused_result));
793
static job_t job_new_subjob(job_t j, uuid_t identifier);
794
static void job_kill(job_t j);
795
static void job_uncork_fork(job_t j);
796
static void job_logv(job_t j, int pri, int err, const char *msg, va_list ap) __attribute__((format(printf, 4, 0)));
797
static void job_log_error(job_t j, int pri, const char *msg, ...) __attribute__((format(printf, 3, 4)));
798
static bool job_log_bug(_SIMPLE_STRING asl_message, void *ctx, const char *message);
799
static void job_log_perf_statistics(job_t j, struct rusage_info_v1 *ri, int64_t exit_status);
800
#if HAVE_SYSTEMSTATS
801
static void job_log_systemstats(pid_t pid, uint64_t uniqueid, uint64_t parent_uniqueid, pid_t req_pid, uint64_t req_uniqueid, const char *name, struct rusage_info_v1 *ri, int64_t exit_status);
802
#endif
803
static void job_set_exception_port(job_t j, mach_port_t port);
804
static kern_return_t job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj);
805
static void job_open_shutdown_transaction(job_t ji);
806
static void job_close_shutdown_transaction(job_t ji);
807
static launch_data_t job_do_legacy_ipc_request(job_t j, launch_data_t request, mach_port_t asport);
808
static void job_setup_per_user_directory(job_t j, uid_t uid, const char *path);
809
static void job_setup_per_user_directories(job_t j, uid_t uid, const char *label);
810
static void job_update_jetsam_properties(job_t j, xpc_jetsam_band_t band, uint64_t user_data);
811
static void job_update_jetsam_memory_limit(job_t j, int32_t limit);
812
813
#if TARGET_OS_EMBEDDED
814
static bool job_import_defaults(launch_data_t pload);
815
#endif
816
817
static struct priority_properties_t {
818
long long band;
819
int priority;
820
} _launchd_priority_map[] = {
821
{ XPC_JETSAM_BAND_SUSPENDED, JETSAM_PRIORITY_IDLE },
822
{ XPC_JETSAM_BAND_BACKGROUND_OPPORTUNISTIC, JETSAM_PRIORITY_BACKGROUND_OPPORTUNISTIC },
823
{ XPC_JETSAM_BAND_BACKGROUND, JETSAM_PRIORITY_BACKGROUND },
824
{ XPC_JETSAM_BAND_MAIL, JETSAM_PRIORITY_MAIL },
825
{ XPC_JETSAM_BAND_PHONE, JETSAM_PRIORITY_PHONE },
826
{ XPC_JETSAM_BAND_UI_SUPPORT, JETSAM_PRIORITY_UI_SUPPORT },
827
{ XPC_JETSAM_BAND_FOREGROUND_SUPPORT, JETSAM_PRIORITY_FOREGROUND_SUPPORT },
828
{ XPC_JETSAM_BAND_FOREGROUND, JETSAM_PRIORITY_FOREGROUND },
829
{ XPC_JETSAM_BAND_AUDIO, JETSAM_PRIORITY_AUDIO_AND_ACCESSORY },
830
{ XPC_JETSAM_BAND_ACCESSORY, JETSAM_PRIORITY_AUDIO_AND_ACCESSORY },
831
{ XPC_JETSAM_BAND_CRITICAL, JETSAM_PRIORITY_CRITICAL },
832
{ XPC_JETSAM_BAND_TELEPHONY, JETSAM_PRIORITY_TELEPHONY },
833
};
834
835
static const struct {
836
const char *key;
837
int val;
838
} launchd_keys2limits[] = {
839
{ LAUNCH_JOBKEY_RESOURCELIMIT_CORE, RLIMIT_CORE },
840
{ LAUNCH_JOBKEY_RESOURCELIMIT_CPU, RLIMIT_CPU },
841
{ LAUNCH_JOBKEY_RESOURCELIMIT_DATA, RLIMIT_DATA },
842
{ LAUNCH_JOBKEY_RESOURCELIMIT_FSIZE, RLIMIT_FSIZE },
843
{ LAUNCH_JOBKEY_RESOURCELIMIT_MEMLOCK, RLIMIT_MEMLOCK },
844
{ LAUNCH_JOBKEY_RESOURCELIMIT_NOFILE, RLIMIT_NOFILE },
845
{ LAUNCH_JOBKEY_RESOURCELIMIT_NPROC, RLIMIT_NPROC },
846
{ LAUNCH_JOBKEY_RESOURCELIMIT_RSS, RLIMIT_RSS },
847
{ LAUNCH_JOBKEY_RESOURCELIMIT_STACK, RLIMIT_STACK },
848
};
849
850
static time_t cronemu(int mon, int mday, int hour, int min);
851
static time_t cronemu_wday(int wday, int hour, int min);
852
static bool cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min);
853
static bool cronemu_mday(struct tm *wtm, int mday, int hour, int min);
854
static bool cronemu_hour(struct tm *wtm, int hour, int min);
855
static bool cronemu_min(struct tm *wtm, int min);
856
857
// miscellaneous file local functions
858
static size_t get_kern_max_proc(void);
859
static char **mach_cmd2argv(const char *string);
860
static size_t our_strhash(const char *s) __attribute__((pure));
861
862
void eliminate_double_reboot(void);
863
864
#pragma mark XPC Domain Forward Declarations
865
static job_t _xpc_domain_import_service(jobmgr_t jm, launch_data_t pload);
866
static int _xpc_domain_import_services(job_t j, launch_data_t services);
867
868
#pragma mark XPC Event Forward Declarations
869
static int xpc_event_find_channel(job_t j, const char *stream, struct machservice **ms);
870
static int xpc_event_get_event_name(job_t j, xpc_object_t request, xpc_object_t *reply);
871
static int xpc_event_set_event(job_t j, xpc_object_t request, xpc_object_t *reply);
872
static int xpc_event_copy_event(job_t j, xpc_object_t request, xpc_object_t *reply);
873
static int xpc_event_channel_check_in(job_t j, xpc_object_t request, xpc_object_t *reply);
874
static int xpc_event_channel_look_up(job_t j, xpc_object_t request, xpc_object_t *reply);
875
static int xpc_event_provider_check_in(job_t j, xpc_object_t request, xpc_object_t *reply);
876
static int xpc_event_provider_set_state(job_t j, xpc_object_t request, xpc_object_t *reply);
877
878
#pragma mark XPC Process Forward Declarations
879
static int xpc_process_set_jetsam_band(job_t j, xpc_object_t request, xpc_object_t *reply);
880
static int xpc_process_set_jetsam_memory_limit(job_t j, xpc_object_t request, xpc_object_t *reply);
881
882
// file local globals
883
static job_t _launchd_embedded_god = NULL;
884
static job_t _launchd_embedded_home = NULL;
885
static size_t total_children;
886
static size_t total_anon_children;
887
static mach_port_t the_exception_server;
888
static job_t workaround_5477111;
889
static LIST_HEAD(, job_s) s_needing_sessions;
890
static LIST_HEAD(, eventsystem) _s_event_systems;
891
static struct eventsystem *_launchd_support_system;
892
static job_t _launchd_event_monitor;
893
static job_t _launchd_xpc_bootstrapper;
894
static job_t _launchd_shutdown_monitor;
895
896
#if TARGET_OS_EMBEDDED
897
static xpc_object_t _launchd_defaults_cache;
898
899
mach_port_t launchd_audit_port = MACH_PORT_DEAD;
900
pid_t launchd_audit_session = 0;
901
#else
902
mach_port_t launchd_audit_port = MACH_PORT_NULL;
903
au_asid_t launchd_audit_session = AU_DEFAUDITSID;
904
#endif
905
906
static int s_no_hang_fd = -1;
907
908
// process wide globals
909
mach_port_t inherited_bootstrap_port;
910
jobmgr_t root_jobmgr;
911
bool launchd_shutdown_debugging = false;
912
bool launchd_verbose_boot = false;
913
bool launchd_embedded_handofgod = false;
914
bool launchd_runtime_busy_time = false;
915
916
void
917
job_ignore(job_t j)
918
{
919
struct socketgroup *sg;
920
struct machservice *ms;
921
922
if (j->currently_ignored) {
923
return;
924
}
925
926
job_log(j, LOG_DEBUG, "Ignoring...");
927
928
j->currently_ignored = true;
929
930
SLIST_FOREACH(sg, &j->sockets, sle) {
931
socketgroup_ignore(j, sg);
932
}
933
934
SLIST_FOREACH(ms, &j->machservices, sle) {
935
machservice_ignore(j, ms);
936
}
937
}
938
939
void
940
job_watch(job_t j)
941
{
942
struct socketgroup *sg;
943
struct machservice *ms;
944
945
if (!j->currently_ignored) {
946
return;
947
}
948
949
job_log(j, LOG_DEBUG, "Watching...");
950
951
j->currently_ignored = false;
952
953
SLIST_FOREACH(sg, &j->sockets, sle) {
954
socketgroup_watch(j, sg);
955
}
956
957
SLIST_FOREACH(ms, &j->machservices, sle) {
958
machservice_watch(j, ms);
959
}
960
}
961
962
void
963
job_stop(job_t j)
964
{
965
int sig;
966
967
if (unlikely(!j->p || j->stopped || j->anonymous)) {
968
return;
969
}
970
971
#if TARGET_OS_EMBEDDED
972
if (launchd_embedded_handofgod && _launchd_embedded_god) {
973
if (!_launchd_embedded_god->username || !j->username) {
974
errno = EPERM;
975
return;
976
}
977
978
if (strcmp(j->username, _launchd_embedded_god->username) != 0) {
979
errno = EPERM;
980
return;
981
}
982
} else if (launchd_embedded_handofgod) {
983
errno = EINVAL;
984
return;
985
}
986
#endif
987
988
j->sent_signal_time = runtime_get_opaque_time();
989
990
job_log(j, LOG_DEBUG | LOG_CONSOLE, "Stopping job...");
991
992
int error = -1;
993
error = proc_terminate(j->p, &sig);
994
if (error) {
995
job_log(j, LOG_ERR | LOG_CONSOLE, "Could not terminate job: %d: %s", error, strerror(error));
996
job_log(j, LOG_NOTICE | LOG_CONSOLE, "Using fallback option to terminate job...");
997
error = kill2(j->p, SIGTERM);
998
if (error) {
999
job_log(j, LOG_ERR, "Could not signal job: %d: %s", error, strerror(error));
1000
} else {
1001
sig = SIGTERM;
1002
}
1003
}
1004
1005
if (!error) {
1006
switch (sig) {
1007
case SIGKILL:
1008
j->sent_sigkill = true;
1009
j->clean_kill = true;
1010
1011
/* We cannot effectively simulate an exit for jobs during the course
1012
* of a normal run. Even if we pretend that the job exited, we will
1013
* still not have gotten the receive rights associated with the
1014
* job's MachServices back, so we cannot safely respawn it.
1015
*/
1016
if (j->mgr->shutting_down) {
1017
error = kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, LAUNCHD_SIGKILL_TIMER, j);
1018
(void)job_assumes_zero_p(j, error);
1019
}
1020
1021
job_log(j, LOG_DEBUG | LOG_CONSOLE, "Sent job SIGKILL.");
1022
break;
1023
case SIGTERM:
1024
if (j->exit_timeout) {
1025
error = kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, j->exit_timeout, j);
1026
(void)job_assumes_zero_p(j, error);
1027
} else {
1028
job_log(j, LOG_NOTICE, "This job has an infinite exit timeout");
1029
}
1030
job_log(j, LOG_DEBUG, "Sent job SIGTERM.");
1031
break;
1032
default:
1033
job_log(j, LOG_ERR | LOG_CONSOLE, "Job was sent unexpected signal: %d: %s", sig, strsignal(sig));
1034
break;
1035
}
1036
}
1037
1038
j->stopped = true;
1039
}
1040
1041
launch_data_t
1042
job_export(job_t j)
1043
{
1044
launch_data_t tmp, tmp2, tmp3, r = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
1045
1046
if (r == NULL) {
1047
return NULL;
1048
}
1049
1050
if ((tmp = launch_data_new_string(j->label))) {
1051
launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LABEL);
1052
}
1053
if ((tmp = launch_data_new_string(j->mgr->name))) {
1054
launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
1055
}
1056
if ((tmp = launch_data_new_bool(j->ondemand))) {
1057
launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ONDEMAND);
1058
}
1059
1060
long long status = j->last_exit_status;
1061
if (j->fpfail) {
1062
status = LAUNCH_EXITSTATUS_FAIRPLAY_FAIL;
1063
}
1064
if ((tmp = launch_data_new_integer(status))) {
1065
launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_LASTEXITSTATUS);
1066
}
1067
1068
if (j->p && (tmp = launch_data_new_integer(j->p))) {
1069
launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PID);
1070
}
1071
if ((tmp = launch_data_new_integer(j->timeout))) {
1072
launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_TIMEOUT);
1073
}
1074
if (j->prog && (tmp = launch_data_new_string(j->prog))) {
1075
launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAM);
1076
}
1077
if (j->stdinpath && (tmp = launch_data_new_string(j->stdinpath))) {
1078
launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDINPATH);
1079
}
1080
if (j->stdoutpath && (tmp = launch_data_new_string(j->stdoutpath))) {
1081
launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDOUTPATH);
1082
}
1083
if (j->stderrpath && (tmp = launch_data_new_string(j->stderrpath))) {
1084
launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_STANDARDERRORPATH);
1085
}
1086
if (likely(j->argv) && (tmp = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
1087
size_t i;
1088
1089
for (i = 0; i < j->argc; i++) {
1090
if ((tmp2 = launch_data_new_string(j->argv[i]))) {
1091
launch_data_array_set_index(tmp, tmp2, i);
1092
}
1093
}
1094
1095
launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_PROGRAMARGUMENTS);
1096
}
1097
1098
if (j->enable_transactions && (tmp = launch_data_new_bool(true))) {
1099
launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_ENABLETRANSACTIONS);
1100
}
1101
1102
if (j->session_create && (tmp = launch_data_new_bool(true))) {
1103
launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SESSIONCREATE);
1104
}
1105
1106
if (j->inetcompat && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
1107
if ((tmp2 = launch_data_new_bool(j->inetcompat_wait))) {
1108
launch_data_dict_insert(tmp, tmp2, LAUNCH_JOBINETDCOMPATIBILITY_WAIT);
1109
}
1110
launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_INETDCOMPATIBILITY);
1111
}
1112
1113
if (!SLIST_EMPTY(&j->sockets) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
1114
struct socketgroup *sg;
1115
unsigned int i;
1116
1117
SLIST_FOREACH(sg, &j->sockets, sle) {
1118
if ((tmp2 = launch_data_alloc(LAUNCH_DATA_ARRAY))) {
1119
for (i = 0; i < sg->fd_cnt; i++) {
1120
if ((tmp3 = launch_data_new_fd(sg->fds[i]))) {
1121
launch_data_array_set_index(tmp2, tmp3, i);
1122
}
1123
}
1124
launch_data_dict_insert(tmp, tmp2, sg->name);
1125
}
1126
}
1127
1128
launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_SOCKETS);
1129
}
1130
1131
if (!SLIST_EMPTY(&j->machservices) && (tmp = launch_data_alloc(LAUNCH_DATA_DICTIONARY))) {
1132
struct machservice *ms;
1133
1134
tmp3 = NULL;
1135
1136
SLIST_FOREACH(ms, &j->machservices, sle) {
1137
if (ms->per_pid) {
1138
if (tmp3 == NULL) {
1139
tmp3 = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
1140
}
1141
if (tmp3) {
1142
tmp2 = launch_data_new_machport(MACH_PORT_NULL);
1143
launch_data_dict_insert(tmp3, tmp2, ms->name);
1144
}
1145
} else {
1146
tmp2 = launch_data_new_machport(MACH_PORT_NULL);
1147
launch_data_dict_insert(tmp, tmp2, ms->name);
1148
}
1149
}
1150
1151
launch_data_dict_insert(r, tmp, LAUNCH_JOBKEY_MACHSERVICES);
1152
1153
if (tmp3) {
1154
launch_data_dict_insert(r, tmp3, LAUNCH_JOBKEY_PERJOBMACHSERVICES);
1155
}
1156
}
1157
1158
return r;
1159
}
1160
1161
static void
1162
jobmgr_log_active_jobs(jobmgr_t jm)
1163
{
1164
const char *why_active;
1165
jobmgr_t jmi;
1166
job_t ji;
1167
1168
SLIST_FOREACH(jmi, &jm->submgrs, sle) {
1169
jobmgr_log_active_jobs(jmi);
1170
}
1171
1172
int level = LOG_DEBUG;
1173
if (pid1_magic) {
1174
level |= LOG_CONSOLE;
1175
}
1176
1177
LIST_FOREACH(ji, &jm->jobs, sle) {
1178
if ((why_active = job_active(ji))) {
1179
if (ji->p != 1) {
1180
job_log(ji, level, "%s", why_active);
1181
1182
uint32_t flags = 0;
1183
(void)proc_get_dirty(ji->p, &flags);
1184
if (!(flags & PROC_DIRTY_TRACKED)) {
1185
continue;
1186
}
1187
1188
char *dirty = "clean";
1189
if (flags & PROC_DIRTY_IS_DIRTY) {
1190
dirty = "dirty";
1191
}
1192
1193
char *idle_exit = "idle-exit unsupported";
1194
if (flags & PROC_DIRTY_ALLOWS_IDLE_EXIT) {
1195
idle_exit = "idle-exit supported";
1196
}
1197
1198
job_log(ji, level, "Killability: %s/%s", dirty, idle_exit);
1199
}
1200
}
1201
}
1202
}
1203
1204
static void
1205
jobmgr_still_alive_with_check(jobmgr_t jm)
1206
{
1207
int level = LOG_DEBUG;
1208
if (pid1_magic) {
1209
level |= LOG_CONSOLE;
1210
}
1211
1212
jobmgr_log(jm, level, "Still alive with %lu/%lu (normal/anonymous) children.", total_children, total_anon_children);
1213
jobmgr_log_active_jobs(jm);
1214
launchd_log_push();
1215
}
1216
1217
jobmgr_t
1218
jobmgr_shutdown(jobmgr_t jm)
1219
{
1220
jobmgr_t jmi, jmn;
1221
jobmgr_log(jm, LOG_DEBUG, "Beginning job manager shutdown with flags: %s", reboot_flags_to_C_names(jm->reboot_flags));
1222
1223
jm->shutdown_time = runtime_get_wall_time() / USEC_PER_SEC;
1224
1225
struct tm curtime;
1226
(void)localtime_r(&jm->shutdown_time, &curtime);
1227
1228
char date[26];
1229
(void)asctime_r(&curtime, date);
1230
// Trim the new line that asctime_r(3) puts there for some reason.
1231
date[24] = 0;
1232
1233
if (jm == root_jobmgr && pid1_magic) {
1234
jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown begun at: %s", date);
1235
} else {
1236
jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown begun at: %s", date);
1237
}
1238
1239
jm->shutting_down = true;
1240
1241
SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
1242
jobmgr_shutdown(jmi);
1243
}
1244
1245
if (!jm->parentmgr) {
1246
if (pid1_magic) {
1247
// Spawn the shutdown monitor.
1248
if (_launchd_shutdown_monitor && !_launchd_shutdown_monitor->p) {
1249
job_log(_launchd_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Starting shutdown monitor.");
1250
job_dispatch(_launchd_shutdown_monitor, true);
1251
}
1252
}
1253
1254
(void)jobmgr_assumes_zero_p(jm, kevent_mod((uintptr_t)jm, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 5, jm));
1255
}
1256
1257
return jobmgr_do_garbage_collection(jm);
1258
}
1259
1260
void
1261
jobmgr_remove(jobmgr_t jm)
1262
{
1263
jobmgr_t jmi;
1264
job_t ji;
1265
1266
jobmgr_log(jm, LOG_DEBUG, "Removing job manager.");
1267
if (!SLIST_EMPTY(&jm->submgrs)) {
1268
size_t cnt = 0;
1269
while ((jmi = SLIST_FIRST(&jm->submgrs))) {
1270
jobmgr_remove(jmi);
1271
cnt++;
1272
}
1273
1274
(void)jobmgr_assumes_zero(jm, cnt);
1275
}
1276
1277
while ((ji = LIST_FIRST(&jm->jobs))) {
1278
if (!ji->anonymous && ji->p != 0) {
1279
job_log(ji, LOG_ERR, "Job is still active at job manager teardown.");
1280
ji->p = 0;
1281
}
1282
1283
job_remove(ji);
1284
}
1285
1286
struct waiting4attach *w4ai = NULL;
1287
while ((w4ai = LIST_FIRST(&jm->attaches))) {
1288
waiting4attach_delete(jm, w4ai);
1289
}
1290
1291
if (jm->req_port) {
1292
(void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_port));
1293
}
1294
if (jm->jm_port) {
1295
(void)jobmgr_assumes_zero(jm, launchd_mport_close_recv(jm->jm_port));
1296
}
1297
1298
if (jm->req_bsport) {
1299
(void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_bsport));
1300
}
1301
if (jm->req_excport) {
1302
(void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_excport));
1303
}
1304
if (MACH_PORT_VALID(jm->req_asport)) {
1305
(void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_asport));
1306
}
1307
if (jm->req_rport) {
1308
kern_return_t kr = xpc_call_wakeup(jm->req_rport, jm->error);
1309
if (!(kr == KERN_SUCCESS || kr == MACH_SEND_INVALID_DEST)) {
1310
/* If the originator went away, the reply port will be a dead name,
1311
* and we expect this to fail.
1312
*/
1313
(void)jobmgr_assumes_zero(jm, kr);
1314
}
1315
}
1316
if (jm->req_ctx) {
1317
(void)jobmgr_assumes_zero(jm, vm_deallocate(mach_task_self(), jm->req_ctx, jm->req_ctx_sz));
1318
}
1319
1320
time_t ts = runtime_get_wall_time() / USEC_PER_SEC;
1321
struct tm curtime;
1322
(void)localtime_r(&ts, &curtime);
1323
1324
char date[26];
1325
(void)asctime_r(&curtime, date);
1326
date[24] = 0;
1327
1328
time_t delta = ts - jm->shutdown_time;
1329
if (jm == root_jobmgr && pid1_magic) {
1330
jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown finished at: %s", date);
1331
jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Userspace shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
1332
} else {
1333
jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown finished at: %s", date);
1334
jobmgr_log(jm, LOG_DEBUG, "Job manager shutdown took approximately %ld second%s.", delta, (delta != 1) ? "s" : "");
1335
}
1336
1337
if (jm->parentmgr) {
1338
runtime_del_weak_ref();
1339
SLIST_REMOVE(&jm->parentmgr->submgrs, jm, jobmgr_s, sle);
1340
1341
// Hack for the guest user so that its stuff doesn't persist.
1342
//
1343
// <rdar://problem/14527875>
1344
if (strcmp(jm->name, VPROCMGR_SESSION_AQUA) == 0 && getuid() == 201) {
1345
raise(SIGTERM);
1346
}
1347
} else if (pid1_magic) {
1348
eliminate_double_reboot();
1349
launchd_log_vm_stats();
1350
jobmgr_log_stray_children(jm, true);
1351
jobmgr_log(root_jobmgr, LOG_NOTICE | LOG_CONSOLE, "About to call: reboot(%s).", reboot_flags_to_C_names(jm->reboot_flags));
1352
launchd_closelog();
1353
(void)jobmgr_assumes_zero_p(jm, reboot(jm->reboot_flags));
1354
} else {
1355
jobmgr_log(jm, LOG_DEBUG, "About to exit");
1356
launchd_closelog();
1357
exit(EXIT_SUCCESS);
1358
}
1359
1360
free(jm);
1361
}
1362
1363
void
1364
job_remove(job_t j)
1365
{
1366
struct waiting_for_removal *w4r;
1367
struct calendarinterval *ci;
1368
struct semaphoreitem *si;
1369
struct socketgroup *sg;
1370
struct machservice *ms;
1371
struct limititem *li;
1372
struct envitem *ei;
1373
1374
if (j->alias) {
1375
/* HACK: Egregious code duplication. But as with machservice_delete(),
1376
* job aliases can't (and shouldn't) have any complex behaviors
1377
* associated with them.
1378
*/
1379
while ((ms = SLIST_FIRST(&j->machservices))) {
1380
machservice_delete(j, ms, false);
1381
}
1382
1383
LIST_REMOVE(j, sle);
1384
LIST_REMOVE(j, label_hash_sle);
1385
free(j);
1386
return;
1387
}
1388
1389
#if TARGET_OS_EMBEDDED
1390
if (launchd_embedded_handofgod && _launchd_embedded_god) {
1391
if (!(_launchd_embedded_god->username && j->username)) {
1392
errno = EPERM;
1393
return;
1394
}
1395
1396
if (strcmp(j->username, _launchd_embedded_god->username) != 0) {
1397
errno = EPERM;
1398
return;
1399
}
1400
} else if (launchd_embedded_handofgod) {
1401
errno = EINVAL;
1402
return;
1403
}
1404
#endif
1405
1406
/* Do this BEFORE we check and see whether the job is still active. If we're
1407
* a sub-job, we're being removed due to the parent job removing us.
1408
* Therefore, the parent job will free itself after this call completes. So
1409
* if we defer removing ourselves from the parent's list, we'll crash when
1410
* we finally get around to it.
1411
*/
1412
if (j->dedicated_instance && !j->former_subjob) {
1413
LIST_REMOVE(j, subjob_sle);
1414
j->former_subjob = true;
1415
}
1416
1417
if (unlikely(j->p)) {
1418
if (j->anonymous) {
1419
job_reap(j);
1420
} else {
1421
job_log(j, LOG_DEBUG, "Removal pended until the job exits");
1422
1423
if (!j->removal_pending) {
1424
j->removal_pending = true;
1425
job_stop(j);
1426
}
1427
1428
return;
1429
}
1430
}
1431
1432
if (!j->removing) {
1433
j->removing = true;
1434
job_dispatch_curious_jobs(j);
1435
}
1436
1437
ipc_close_all_with_job(j);
1438
1439
if (j->forced_peers_to_demand_mode) {
1440
job_set_global_on_demand(j, false);
1441
}
1442
1443
if (job_assumes_zero(j, j->fork_fd)) {
1444
(void)posix_assumes_zero(runtime_close(j->fork_fd));
1445
}
1446
1447
if (j->stdin_fd) {
1448
(void)posix_assumes_zero(runtime_close(j->stdin_fd));
1449
}
1450
1451
if (j->j_port) {
1452
(void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
1453
}
1454
1455
while ((sg = SLIST_FIRST(&j->sockets))) {
1456
socketgroup_delete(j, sg);
1457
}
1458
while ((ci = SLIST_FIRST(&j->cal_intervals))) {
1459
calendarinterval_delete(j, ci);
1460
}
1461
while ((ei = SLIST_FIRST(&j->env))) {
1462
envitem_delete(j, ei, false);
1463
}
1464
while ((ei = SLIST_FIRST(&j->global_env))) {
1465
envitem_delete(j, ei, true);
1466
}
1467
while ((li = SLIST_FIRST(&j->limits))) {
1468
limititem_delete(j, li);
1469
}
1470
while ((ms = SLIST_FIRST(&j->machservices))) {
1471
machservice_delete(j, ms, false);
1472
}
1473
while ((si = SLIST_FIRST(&j->semaphores))) {
1474
semaphoreitem_delete(j, si);
1475
}
1476
while ((w4r = SLIST_FIRST(&j->removal_watchers))) {
1477
waiting4removal_delete(j, w4r);
1478
}
1479
1480
struct externalevent *eei = NULL;
1481
while ((eei = LIST_FIRST(&j->events))) {
1482
externalevent_delete(eei);
1483
}
1484
1485
if (j->event_monitor) {
1486
_launchd_event_monitor = NULL;
1487
}
1488
if (j->xpc_bootstrapper) {
1489
_launchd_xpc_bootstrapper = NULL;
1490
}
1491
1492
if (j->prog) {
1493
free(j->prog);
1494
}
1495
if (j->argv) {
1496
free(j->argv);
1497
}
1498
if (j->rootdir) {
1499
free(j->rootdir);
1500
}
1501
if (j->workingdir) {
1502
free(j->workingdir);
1503
}
1504
if (j->username) {
1505
free(j->username);
1506
}
1507
if (j->groupname) {
1508
free(j->groupname);
1509
}
1510
if (j->stdinpath) {
1511
free(j->stdinpath);
1512
}
1513
if (j->stdoutpath) {
1514
free(j->stdoutpath);
1515
}
1516
if (j->stderrpath) {
1517
free(j->stderrpath);
1518
}
1519
if (j->alt_exc_handler) {
1520
free(j->alt_exc_handler);
1521
}
1522
if (j->cfbundleidentifier) {
1523
free(j->cfbundleidentifier);
1524
}
1525
#if HAVE_SANDBOX
1526
if (j->seatbelt_profile) {
1527
free(j->seatbelt_profile);
1528
}
1529
if (j->container_identifier) {
1530
free(j->container_identifier);
1531
}
1532
#endif
1533
#if HAVE_QUARANTINE
1534
if (j->quarantine_data) {
1535
free(j->quarantine_data);
1536
}
1537
#endif
1538
if (j->j_binpref) {
1539
free(j->j_binpref);
1540
}
1541
if (j->start_interval) {
1542
runtime_del_weak_ref();
1543
(void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL));
1544
}
1545
if (j->exit_timeout) {
1546
/* If this fails, it just means the timer's already fired, so no need to
1547
* wrap it in an assumes() macro.
1548
*/
1549
(void)kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
1550
}
1551
if (j->asport != MACH_PORT_NULL) {
1552
(void)job_assumes_zero(j, launchd_mport_deallocate(j->asport));
1553
}
1554
if (!uuid_is_null(j->expected_audit_uuid)) {
1555
LIST_REMOVE(j, needing_session_sle);
1556
}
1557
if (j->embedded_god) {
1558
_launchd_embedded_god = NULL;
1559
}
1560
if (j->embedded_home) {
1561
_launchd_embedded_home = NULL;
1562
}
1563
if (j->shutdown_monitor) {
1564
_launchd_shutdown_monitor = NULL;
1565
}
1566
1567
(void)kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
1568
1569
LIST_REMOVE(j, sle);
1570
LIST_REMOVE(j, label_hash_sle);
1571
1572
job_t ji = NULL;
1573
job_t jit = NULL;
1574
LIST_FOREACH_SAFE(ji, &j->subjobs, subjob_sle, jit) {
1575
job_remove(ji);
1576
}
1577
1578
job_log(j, LOG_DEBUG, "Removed");
1579
1580
j->kqjob_callback = (kq_callback)0x8badf00d;
1581
free(j);
1582
}
1583
1584
void
1585
socketgroup_setup(launch_data_t obj, const char *key, void *context)
1586
{
1587
launch_data_t tmp_oai;
1588
job_t j = context;
1589
size_t i, fd_cnt = 1;
1590
int *fds;
1591
1592
if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
1593
fd_cnt = launch_data_array_get_count(obj);
1594
}
1595
1596
fds = alloca(fd_cnt * sizeof(int));
1597
1598
for (i = 0; i < fd_cnt; i++) {
1599
if (launch_data_get_type(obj) == LAUNCH_DATA_ARRAY) {
1600
tmp_oai = launch_data_array_get_index(obj, i);
1601
} else {
1602
tmp_oai = obj;
1603
}
1604
1605
fds[i] = launch_data_get_fd(tmp_oai);
1606
}
1607
1608
socketgroup_new(j, key, fds, fd_cnt);
1609
1610
ipc_revoke_fds(obj);
1611
}
1612
1613
bool
1614
job_set_global_on_demand(job_t j, bool val)
1615
{
1616
if (j->forced_peers_to_demand_mode && val) {
1617
return false;
1618
} else if (!j->forced_peers_to_demand_mode && !val) {
1619
return false;
1620
}
1621
1622
if ((j->forced_peers_to_demand_mode = val)) {
1623
j->mgr->global_on_demand_cnt++;
1624
} else {
1625
j->mgr->global_on_demand_cnt--;
1626
}
1627
1628
if (j->mgr->global_on_demand_cnt == 0) {
1629
jobmgr_dispatch_all(j->mgr, false);
1630
}
1631
1632
return true;
1633
}
1634
1635
bool
1636
job_setup_machport(job_t j)
1637
{
1638
if (job_assumes_zero(j, launchd_mport_create_recv(&j->j_port)) != KERN_SUCCESS) {
1639
goto out_bad;
1640
}
1641
1642
if (job_assumes_zero(j, runtime_add_mport(j->j_port, job_server)) != KERN_SUCCESS) {
1643
goto out_bad2;
1644
}
1645
1646
if (job_assumes_zero(j, launchd_mport_notify_req(j->j_port, MACH_NOTIFY_NO_SENDERS)) != KERN_SUCCESS) {
1647
(void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
1648
goto out_bad;
1649
}
1650
1651
return true;
1652
out_bad2:
1653
(void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
1654
out_bad:
1655
return false;
1656
}
1657
1658
kern_return_t
1659
job_setup_exit_port(job_t j)
1660
{
1661
kern_return_t kr = launchd_mport_create_recv(&j->exit_status_port);
1662
if (job_assumes_zero(j, kr) != KERN_SUCCESS) {
1663
return MACH_PORT_NULL;
1664
}
1665
1666
struct mach_port_limits limits = {
1667
.mpl_qlimit = 1,
1668
};
1669
kr = mach_port_set_attributes(mach_task_self(), j->exit_status_port, MACH_PORT_LIMITS_INFO, (mach_port_info_t)&limits, sizeof(limits));
1670
(void)job_assumes_zero(j, kr);
1671
1672
kr = launchd_mport_make_send_once(j->exit_status_port, &j->exit_status_dest);
1673
if (job_assumes_zero(j, kr) != KERN_SUCCESS) {
1674
(void)job_assumes_zero(j, launchd_mport_close_recv(j->exit_status_port));
1675
j->exit_status_port = MACH_PORT_NULL;
1676
}
1677
1678
return kr;
1679
}
1680
1681
job_t
1682
job_new_via_mach_init(job_t j, const char *cmd, uid_t uid, bool ond)
1683
{
1684
const char **argv = (const char **)mach_cmd2argv(cmd);
1685
job_t jr = NULL;
1686
1687
if (!argv) {
1688
goto out_bad;
1689
}
1690
1691
jr = job_new(j->mgr, AUTO_PICK_LEGACY_LABEL, NULL, argv);
1692
free(argv);
1693
1694
// Job creation can be denied during shutdown.
1695
if (unlikely(jr == NULL)) {
1696
goto out_bad;
1697
}
1698
1699
jr->mach_uid = uid;
1700
jr->ondemand = ond;
1701
jr->legacy_mach_job = true;
1702
jr->abandon_pg = true;
1703
jr->priv_port_has_senders = true; // the IPC that called us will make-send on this port
1704
1705
if (!job_setup_machport(jr)) {
1706
goto out_bad;
1707
}
1708
1709
job_log(jr, LOG_INFO, "Legacy%s server created", ond ? " on-demand" : "");
1710
1711
return jr;
1712
1713
out_bad:
1714
if (jr) {
1715
job_remove(jr);
1716
}
1717
return NULL;
1718
}
1719
1720
job_t
1721
job_new_anonymous(jobmgr_t jm, pid_t anonpid)
1722
{
1723
struct proc_bsdshortinfo proc;
1724
bool shutdown_state;
1725
job_t jp = NULL, jr = NULL;
1726
uid_t kp_euid, kp_uid, kp_svuid;
1727
gid_t kp_egid, kp_gid, kp_svgid;
1728
1729
if (anonpid == 0) {
1730
errno = EINVAL;
1731
return NULL;
1732
}
1733
1734
if (anonpid >= 100000) {
1735
/* The kernel current defines PID_MAX to be 99999, but that define isn't
1736
* exported.
1737
*/
1738
launchd_syslog(LOG_WARNING, "Did PID_MAX change? Got request from PID: %d", anonpid);
1739
errno = EINVAL;
1740
return NULL;
1741
}
1742
1743
/* libproc returns the number of bytes written into the buffer upon success,
1744
* zero on failure. I'd much rather it return -1 on failure, like sysctl(3).
1745
*/
1746
if (proc_pidinfo(anonpid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
1747
if (errno != ESRCH) {
1748
(void)jobmgr_assumes_zero(jm, errno);
1749
}
1750
return NULL;
1751
}
1752
1753
if (proc.pbsi_comm[0] == '\0') {
1754
launchd_syslog(LOG_WARNING, "Blank command for PID: %d", anonpid);
1755
errno = EINVAL;
1756
return NULL;
1757
}
1758
1759
if (unlikely(proc.pbsi_status == SZOMB)) {
1760
jobmgr_log(jm, LOG_DEBUG, "Tried to create an anonymous job for zombie PID %u: %s", anonpid, proc.pbsi_comm);
1761
}
1762
1763
if (unlikely(proc.pbsi_flags & P_SUGID)) {
1764
jobmgr_log(jm, LOG_DEBUG, "Inconsistency: P_SUGID is set on PID %u: %s", anonpid, proc.pbsi_comm);
1765
}
1766
1767
kp_euid = proc.pbsi_uid;
1768
kp_uid = proc.pbsi_ruid;
1769
kp_svuid = proc.pbsi_svuid;
1770
kp_egid = proc.pbsi_gid;
1771
kp_gid = proc.pbsi_rgid;
1772
kp_svgid = proc.pbsi_svgid;
1773
1774
if (unlikely(kp_euid != kp_uid || kp_euid != kp_svuid || kp_uid != kp_svuid || kp_egid != kp_gid || kp_egid != kp_svgid || kp_gid != kp_svgid)) {
1775
jobmgr_log(jm, LOG_DEBUG, "Inconsistency: Mixed credentials (e/r/s UID %u/%u/%u GID %u/%u/%u) detected on PID %u: %s",
1776
kp_euid, kp_uid, kp_svuid, kp_egid, kp_gid, kp_svgid, anonpid, proc.pbsi_comm);
1777
}
1778
1779
/* "Fix" for when the kernel turns the process tree into a weird, cyclic
1780
* graph.
1781
*
1782
* See <rdar://problem/7264615> for the symptom and <rdar://problem/5020256>
1783
* as to why this can happen.
1784
*/
1785
if ((pid_t)proc.pbsi_ppid == anonpid) {
1786
jobmgr_log(jm, LOG_WARNING, "Process has become its own parent through ptrace(3). Ignoring: %s", proc.pbsi_comm);
1787
errno = EINVAL;
1788
return NULL;
1789
}
1790
1791
/* HACK: Normally, job_new() returns an error during shutdown, but anonymous
1792
* jobs can pop up during shutdown and need to talk to us.
1793
*/
1794
if (unlikely(shutdown_state = jm->shutting_down)) {
1795
jm->shutting_down = false;
1796
}
1797
1798
// We only set requestor_pid for XPC domains.
1799
const char *whichlabel = (jm->req_pid == anonpid) ? AUTO_PICK_XPC_LABEL : AUTO_PICK_ANONYMOUS_LABEL;
1800
if ((jr = job_new(jm, whichlabel, proc.pbsi_comm, NULL))) {
1801
u_int proc_fflags = NOTE_EXEC|NOTE_FORK|NOTE_EXIT;
1802
1803
total_anon_children++;
1804
jr->anonymous = true;
1805
jr->p = anonpid;
1806
1807
// Anonymous process reaping is messy.
1808
LIST_INSERT_HEAD(&jm->active_jobs[ACTIVE_JOB_HASH(jr->p)], jr, pid_hash_sle);
1809
1810
if (unlikely(kevent_mod(jr->p, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr) == -1)) {
1811
if (errno != ESRCH) {
1812
(void)job_assumes_zero(jr, errno);
1813
}
1814
1815
// Zombies interact weirdly with kevent(3).
1816
job_log(jr, LOG_ERR, "Failed to add kevent for PID %u. Will unload at MIG return", jr->p);
1817
jr->unload_at_mig_return = true;
1818
}
1819
1820
if (unlikely(shutdown_state)) {
1821
job_log(jr, LOG_APPLEONLY, "This process showed up to the party while all the guests were leaving. Odds are that it will have a miserable time.");
1822
}
1823
1824
job_log(jr, LOG_DEBUG, "Created PID %u anonymously by PPID %u%s%s", anonpid, proc.pbsi_ppid, jp ? ": " : "", jp ? jp->label : "");
1825
} else {
1826
(void)os_assumes_zero(errno);
1827
}
1828
1829
// Undo our hack from above.
1830
if (unlikely(shutdown_state)) {
1831
jm->shutting_down = true;
1832
}
1833
1834
/* This is down here to prevent infinite recursion due to a process
1835
* attaching to its parent through ptrace(3) -- causing a cycle in the
1836
* process tree and thereby not making it a tree anymore. We need to make
1837
* sure that the anonymous job has been added to the process list so that
1838
* we'll find the tracing parent PID of the parent process, which is the
1839
* child, when we go looking for it in jobmgr_find_by_pid().
1840
*
1841
* <rdar://problem/7264615>
1842
*/
1843
switch (proc.pbsi_ppid) {
1844
case 0:
1845
// The kernel.
1846
break;
1847
case 1:
1848
if (!pid1_magic) {
1849
break;
1850
}
1851
// Fall through.
1852
default:
1853
jp = jobmgr_find_by_pid(jm, proc.pbsi_ppid, true);
1854
if (jobmgr_assumes(jm, jp != NULL)) {
1855
if (jp && !jp->anonymous && unlikely(!(proc.pbsi_flags & P_EXEC))) {
1856
job_log(jp, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", proc.pbsi_pid);
1857
}
1858
}
1859
break;
1860
}
1861
1862
return jr;
1863
}
1864
1865
job_t
1866
job_new_subjob(job_t j, uuid_t identifier)
1867
{
1868
char label[0];
1869
uuid_string_t idstr;
1870
uuid_unparse(identifier, idstr);
1871
size_t label_sz = snprintf(label, 0, "%s.%s", j->label, idstr);
1872
1873
job_t nj = (struct job_s *)calloc(1, sizeof(struct job_s) + label_sz + 1);
1874
if (nj != NULL) {
1875
nj->kqjob_callback = job_callback;
1876
nj->original = j;
1877
nj->mgr = j->mgr;
1878
nj->min_run_time = j->min_run_time;
1879
nj->timeout = j->timeout;
1880
nj->exit_timeout = j->exit_timeout;
1881
1882
snprintf((char *)nj->label, label_sz + 1, "%s.%s", j->label, idstr);
1883
1884
// Set all our simple Booleans that are applicable.
1885
nj->debug = j->debug;
1886
nj->ondemand = j->ondemand;
1887
nj->checkedin = true;
1888
nj->low_pri_io = j->low_pri_io;
1889
nj->setmask = j->setmask;
1890
nj->wait4debugger = j->wait4debugger;
1891
nj->internal_exc_handler = j->internal_exc_handler;
1892
nj->setnice = j->setnice;
1893
nj->abandon_pg = j->abandon_pg;
1894
nj->ignore_pg_at_shutdown = j->ignore_pg_at_shutdown;
1895
nj->deny_job_creation = j->deny_job_creation;
1896
nj->enable_transactions = j->enable_transactions;
1897
nj->needs_kickoff = j->needs_kickoff;
1898
nj->currently_ignored = true;
1899
nj->dedicated_instance = true;
1900
nj->xpc_service = j->xpc_service;
1901
nj->xpc_bootstrapper = j->xpc_bootstrapper;
1902
nj->jetsam_priority = j->jetsam_priority;
1903
nj->jetsam_memlimit = j->jetsam_memlimit;
1904
nj->psproctype = j->psproctype;
1905
1906
nj->mask = j->mask;
1907
uuid_copy(nj->instance_id, identifier);
1908
1909
// These jobs are purely on-demand Mach jobs.
1910
// {Hard | Soft}ResourceLimits are not supported.
1911
// JetsamPriority is not supported.
1912
1913
if (j->prog) {
1914
nj->prog = strdup(j->prog);
1915
}
1916
if (j->argv) {
1917
size_t sz = malloc_size(j->argv);
1918
nj->argv = (char **)malloc(sz);
1919
if (nj->argv != NULL) {
1920
// This is the start of our strings.
1921
char *p = ((char *)nj->argv) + ((j->argc + 1) * sizeof(char *));
1922
1923
size_t i = 0;
1924
for (i = 0; i < j->argc; i++) {
1925
(void)strcpy(p, j->argv[i]);
1926
nj->argv[i] = p;
1927
p += (strlen(j->argv[i]) + 1);
1928
}
1929
nj->argv[i] = NULL;
1930
} else {
1931
(void)job_assumes_zero(nj, errno);
1932
}
1933
1934
nj->argc = j->argc;
1935
}
1936
1937
struct machservice *msi = NULL;
1938
SLIST_FOREACH(msi, &j->machservices, sle) {
1939
/* Only copy MachServices that were actually declared in the plist.
1940
* So skip over per-PID ones and ones that were created via
1941
* bootstrap_register().
1942
*/
1943
if (msi->upfront) {
1944
mach_port_t mp = MACH_PORT_NULL;
1945
struct machservice *msj = machservice_new(nj, msi->name, &mp, false);
1946
if (msj != NULL) {
1947
msj->reset = msi->reset;
1948
msj->delete_on_destruction = msi->delete_on_destruction;
1949
msj->drain_one_on_crash = msi->drain_one_on_crash;
1950
msj->drain_all_on_crash = msi->drain_all_on_crash;
1951
1952
kern_return_t kr = mach_port_set_attributes(mach_task_self(), msj->port, MACH_PORT_TEMPOWNER, NULL, 0);
1953
(void)job_assumes_zero(j, kr);
1954
} else {
1955
(void)job_assumes_zero(nj, errno);
1956
}
1957
}
1958
}
1959
1960
// We ignore global environment variables.
1961
struct envitem *ei = NULL;
1962
SLIST_FOREACH(ei, &j->env, sle) {
1963
if (envitem_new(nj, ei->key, ei->value, false)) {
1964
(void)job_assumes_zero(nj, errno);
1965
}
1966
}
1967
uuid_string_t val;
1968
uuid_unparse(identifier, val);
1969
if (envitem_new(nj, LAUNCH_ENV_INSTANCEID, val, false)) {
1970
(void)job_assumes_zero(nj, errno);
1971
}
1972
1973
if (j->rootdir) {
1974
nj->rootdir = strdup(j->rootdir);
1975
}
1976
if (j->workingdir) {
1977
nj->workingdir = strdup(j->workingdir);
1978
}
1979
if (j->username) {
1980
nj->username = strdup(j->username);
1981
}
1982
if (j->groupname) {
1983
nj->groupname = strdup(j->groupname);
1984
}
1985
1986
/* FIXME: We shouldn't redirect all the output from these jobs to the
1987
* same file. We should uniquify the file names. But this hasn't shown
1988
* to be a problem in practice.
1989
*/
1990
if (j->stdinpath) {
1991
nj->stdinpath = strdup(j->stdinpath);
1992
}
1993
if (j->stdoutpath) {
1994
nj->stdoutpath = strdup(j->stdinpath);
1995
}
1996
if (j->stderrpath) {
1997
nj->stderrpath = strdup(j->stderrpath);
1998
}
1999
if (j->alt_exc_handler) {
2000
nj->alt_exc_handler = strdup(j->alt_exc_handler);
2001
}
2002
if (j->cfbundleidentifier) {
2003
nj->cfbundleidentifier = strdup(j->cfbundleidentifier);
2004
}
2005
#if HAVE_SANDBOX
2006
if (j->seatbelt_profile) {
2007
nj->seatbelt_profile = strdup(j->seatbelt_profile);
2008
}
2009
if (j->container_identifier) {
2010
nj->container_identifier = strdup(j->container_identifier);
2011
}
2012
#endif
2013
2014
#if HAVE_QUARANTINE
2015
if (j->quarantine_data) {
2016
nj->quarantine_data = strdup(j->quarantine_data);
2017
}
2018
nj->quarantine_data_sz = j->quarantine_data_sz;
2019
#endif
2020
if (j->j_binpref) {
2021
size_t sz = malloc_size(j->j_binpref);
2022
nj->j_binpref = (cpu_type_t *)malloc(sz);
2023
if (nj->j_binpref) {
2024
memcpy(&nj->j_binpref, &j->j_binpref, sz);
2025
} else {
2026
(void)job_assumes_zero(nj, errno);
2027
}
2028
}
2029
2030
if (j->asport != MACH_PORT_NULL) {
2031
(void)job_assumes_zero(nj, launchd_mport_copy_send(j->asport));
2032
nj->asport = j->asport;
2033
}
2034
2035
LIST_INSERT_HEAD(&nj->mgr->jobs, nj, sle);
2036
2037
jobmgr_t where2put = root_jobmgr;
2038
if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
2039
where2put = j->mgr;
2040
}
2041
LIST_INSERT_HEAD(&where2put->label_hash[hash_label(nj->label)], nj, label_hash_sle);
2042
LIST_INSERT_HEAD(&j->subjobs, nj, subjob_sle);
2043
} else {
2044
(void)os_assumes_zero(errno);
2045
}
2046
2047
return nj;
2048
}
2049
2050
job_t
2051
job_new(jobmgr_t jm, const char *label, const char *prog, const char *const *argv)
2052
{
2053
const char *const *argv_tmp = argv;
2054
char tmp_path[PATH_MAX];
2055
char auto_label[1000];
2056
const char *bn = NULL;
2057
char *co;
2058
size_t minlabel_len;
2059
size_t i, cc = 0;
2060
job_t j;
2061
2062
__OS_COMPILETIME_ASSERT__(offsetof(struct job_s, kqjob_callback) == 0);
2063
2064
if (unlikely(jm->shutting_down)) {
2065
errno = EINVAL;
2066
return NULL;
2067
}
2068
2069
if (unlikely(prog == NULL && argv == NULL)) {
2070
errno = EINVAL;
2071
return NULL;
2072
}
2073
2074
/* I'd really like to redo this someday. Anonymous jobs carry all the
2075
* baggage of managed jobs with them, even though most of it is unused.
2076
* Maybe when we have Objective-C objects in libSystem, there can be a base
2077
* job type that anonymous and managed jobs inherit from...
2078
*/
2079
char *anon_or_legacy = (label == AUTO_PICK_ANONYMOUS_LABEL) ? "anonymous" : "mach_init";
2080
if (unlikely(label == AUTO_PICK_LEGACY_LABEL || label == AUTO_PICK_ANONYMOUS_LABEL)) {
2081
if (prog) {
2082
bn = prog;
2083
} else {
2084
strlcpy(tmp_path, argv[0], sizeof(tmp_path));
2085
// prog for auto labels is kp.kp_kproc.p_comm.
2086
bn = basename(tmp_path);
2087
}
2088
2089
(void)snprintf(auto_label, sizeof(auto_label), "%s.%s.%s", sizeof(void *) == 8 ? "0xdeadbeeffeedface" : "0xbabecafe", anon_or_legacy, bn);
2090
label = auto_label;
2091
/* This is so we can do gross things later. See NOTE_EXEC for anonymous
2092
* jobs.
2093
*/
2094
minlabel_len = strlen(label) + MAXCOMLEN;
2095
} else {
2096
if (label == AUTO_PICK_XPC_LABEL) {
2097
minlabel_len = snprintf(auto_label, sizeof(auto_label), "com.apple.xpc.domain-owner.%s", jm->owner);
2098
} else {
2099
minlabel_len = strlen(label);
2100
}
2101
}
2102
2103
j = calloc(1, sizeof(struct job_s) + minlabel_len + 1);
2104
2105
if (!j) {
2106
(void)os_assumes_zero(errno);
2107
return NULL;
2108
}
2109
2110
if (unlikely(label == auto_label)) {
2111
(void)snprintf((char *)j->label, strlen(label) + 1, "%p.%s.%s", j, anon_or_legacy, bn);
2112
} else {
2113
(void)strcpy((char *)j->label, (label == AUTO_PICK_XPC_LABEL) ? auto_label : label);
2114
}
2115
2116
j->kqjob_callback = job_callback;
2117
j->mgr = jm;
2118
j->min_run_time = LAUNCHD_MIN_JOB_RUN_TIME;
2119
j->timeout = RUNTIME_ADVISABLE_IDLE_TIMEOUT;
2120
j->exit_timeout = LAUNCHD_DEFAULT_EXIT_TIMEOUT;
2121
j->currently_ignored = true;
2122
j->ondemand = true;
2123
j->checkedin = true;
2124
j->jetsam_priority = DEFAULT_JETSAM_PRIORITY;
2125
j->jetsam_memlimit = -1;
2126
uuid_clear(j->expected_audit_uuid);
2127
#if TARGET_OS_EMBEDDED
2128
/* Run embedded daemons as background by default. SpringBoard jobs are
2129
* Interactive by default. Unfortunately, so many daemons have opted into
2130
* this priority band that its usefulness is highly questionable.
2131
*
2132
* See <rdar://problem/9539873>.
2133
*
2134
* Also ensure that daemons have a default memory highwatermark unless
2135
* otherwise specified, as per <rdar://problem/10307814>.
2136
*/
2137
if (launchd_embedded_handofgod) {
2138
j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_DEFAULT;
2139
j->app = true;
2140
} else {
2141
j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND;
2142
j->jetsam_memlimit = DEFAULT_JETSAM_DAEMON_HIGHWATERMARK;
2143
}
2144
#else
2145
/* Jobs on OS X that just come from disk are "standard" by default so that
2146
* third-party daemons/agents don't encounter unexpected throttling.
2147
*/
2148
j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_STANDARD;
2149
#endif
2150
2151
if (prog) {
2152
j->prog = strdup(prog);
2153
if (!j->prog) {
2154
(void)os_assumes_zero(errno);
2155
goto out_bad;
2156
}
2157
}
2158
2159
if (likely(argv)) {
2160
while (*argv_tmp++) {
2161
j->argc++;
2162
}
2163
2164
for (i = 0; i < j->argc; i++) {
2165
cc += strlen(argv[i]) + 1;
2166
}
2167
2168
j->argv = malloc((j->argc + 1) * sizeof(char *) + cc);
2169
if (!j->argv) {
2170
(void)job_assumes_zero(j, errno);
2171
goto out_bad;
2172
}
2173
2174
co = ((char *)j->argv) + ((j->argc + 1) * sizeof(char *));
2175
2176
for (i = 0; i < j->argc; i++) {
2177
j->argv[i] = co;
2178
(void)strcpy(co, argv[i]);
2179
co += strlen(argv[i]) + 1;
2180
}
2181
j->argv[i] = NULL;
2182
}
2183
2184
// Sssshhh... don't tell anyone.
2185
if (strcmp(j->label, "com.apple.WindowServer") == 0) {
2186
j->has_console = true;
2187
}
2188
2189
LIST_INSERT_HEAD(&jm->jobs, j, sle);
2190
2191
jobmgr_t where2put_label = root_jobmgr;
2192
if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
2193
where2put_label = j->mgr;
2194
}
2195
LIST_INSERT_HEAD(&where2put_label->label_hash[hash_label(j->label)], j, label_hash_sle);
2196
uuid_clear(j->expected_audit_uuid);
2197
2198
job_log(j, LOG_DEBUG, "Conceived");
2199
2200
return j;
2201
2202
out_bad:
2203
if (j->prog) {
2204
free(j->prog);
2205
}
2206
free(j);
2207
2208
return NULL;
2209
}
2210
2211
job_t
2212
job_new_alias(jobmgr_t jm, job_t src)
2213
{
2214
if (job_find(jm, src->label)) {
2215
errno = EEXIST;
2216
return NULL;
2217
}
2218
2219
job_t j = calloc(1, sizeof(struct job_s) + strlen(src->label) + 1);
2220
if (!j) {
2221
(void)os_assumes_zero(errno);
2222
return NULL;
2223
}
2224
2225
(void)strcpy((char *)j->label, src->label);
2226
LIST_INSERT_HEAD(&jm->jobs, j, sle);
2227
LIST_INSERT_HEAD(&jm->label_hash[hash_label(j->label)], j, label_hash_sle);
2228
/* Bad jump address. The kqueue callback for aliases should never be
2229
* invoked.
2230
*/
2231
j->kqjob_callback = (kq_callback)0xfa1afe1;
2232
j->alias = src;
2233
j->mgr = jm;
2234
2235
struct machservice *msi = NULL;
2236
SLIST_FOREACH(msi, &src->machservices, sle) {
2237
if (!machservice_new_alias(j, msi)) {
2238
jobmgr_log(jm, LOG_ERR, "Failed to alias job: %s", src->label);
2239
errno = EINVAL;
2240
job_remove(j);
2241
j = NULL;
2242
break;
2243
}
2244
}
2245
2246
if (j) {
2247
job_log(j, LOG_DEBUG, "Aliased service into domain: %s", jm->name);
2248
}
2249
2250
return j;
2251
}
2252
2253
job_t
2254
job_import(launch_data_t pload)
2255
{
2256
#if TARGET_OS_EMBEDDED
2257
/* If this is the special payload of default values, handle it here */
2258
if (unlikely(launch_data_dict_lookup(pload, LAUNCH_JOBKEY_DEFAULTS))) {
2259
job_import_defaults(pload);
2260
return NULL;
2261
}
2262
#endif
2263
2264
job_t j = jobmgr_import2(root_jobmgr, pload);
2265
2266
if (unlikely(j == NULL)) {
2267
return NULL;
2268
}
2269
2270
/* Since jobs are effectively stalled until they get security sessions
2271
* assigned to them, we may wish to reconsider this behavior of calling the
2272
* job "enabled" as far as other jobs with the OtherJobEnabled KeepAlive
2273
* criterion set.
2274
*/
2275
job_dispatch_curious_jobs(j);
2276
return job_dispatch(j, false);
2277
}
2278
2279
#if TARGET_OS_EMBEDDED
2280
2281
bool
2282
job_import_defaults(launch_data_t pload)
2283
{
2284
bool result = false;
2285
xpc_object_t xd = NULL, defaults;
2286
2287
if (_launchd_defaults_cache) {
2288
xpc_release(_launchd_defaults_cache);
2289
_launchd_defaults_cache = NULL;
2290
}
2291
2292
xd = ld2xpc(pload);
2293
if (!xd || xpc_get_type(xd) != XPC_TYPE_DICTIONARY) {
2294
goto out;
2295
}
2296
2297
defaults = xpc_dictionary_get_value(xd, LAUNCHD_JOB_DEFAULTS);
2298
if (!defaults || xpc_get_type(defaults) != XPC_TYPE_DICTIONARY) {
2299
goto out;
2300
}
2301
2302
_launchd_defaults_cache = xpc_copy(defaults);
2303
result = true;
2304
out:
2305
if (xd) {
2306
xpc_release(xd);
2307
}
2308
2309
return result;
2310
}
2311
2312
bool
2313
job_apply_defaults(job_t j) {
2314
const char *test_prefix = "com.apple.test.";
2315
2316
char *sb_prefix_end, *sb_suffix_start;
2317
char true_job_label[strlen(j->label)];
2318
const char *label;
2319
2320
if (((sb_prefix_end = strchr(j->label, ':')) != NULL) &&
2321
((sb_suffix_start = strchr(sb_prefix_end + 1, '[')) != NULL)) {
2322
/*
2323
* Workaround 'UIKitApplication:com.apple.foo[bar]' convention for the processes
2324
* we're interested in. To be removed when <rdar://problem/13066361> is addressed.
2325
*/
2326
snprintf(true_job_label, sb_suffix_start - sb_prefix_end, "%s", sb_prefix_end + 1);
2327
label = true_job_label;
2328
} else {
2329
/* Just test the standard label */
2330
label = j->label;
2331
}
2332
2333
/* Test for cache presence and apply if found */
2334
if (_launchd_defaults_cache) {
2335
xpc_object_t props = xpc_dictionary_get_value(_launchd_defaults_cache, label);
2336
if (props && xpc_get_type(props) == XPC_TYPE_DICTIONARY) {
2337
launch_data_t lv = xpc2ld(props);
2338
launch_data_dict_iterate(lv, job_import_keys, j);
2339
launch_data_free(lv);
2340
return true;
2341
}
2342
}
2343
2344
/* Limit free? Disable the memory limit if this is a test job; see <rdar://problem/13180697> */
2345
if (!strncmp(label, test_prefix, strlen(test_prefix))) {
2346
j->jetsam_memlimit = -1;
2347
return true;
2348
}
2349
2350
return false;
2351
}
2352
2353
#endif
2354
2355
launch_data_t
2356
job_import_bulk(launch_data_t pload)
2357
{
2358
launch_data_t resp = launch_data_alloc(LAUNCH_DATA_ARRAY);
2359
job_t *ja;
2360
size_t i, c = launch_data_array_get_count(pload);
2361
2362
ja = alloca(c * sizeof(job_t));
2363
2364
for (i = 0; i < c; i++) {
2365
if ((likely(ja[i] = jobmgr_import2(root_jobmgr, launch_data_array_get_index(pload, i)))) && errno != ENEEDAUTH) {
2366
errno = 0;
2367
}
2368
launch_data_array_set_index(resp, launch_data_new_errno(errno), i);
2369
}
2370
2371
for (i = 0; i < c; i++) {
2372
if (likely(ja[i])) {
2373
job_dispatch_curious_jobs(ja[i]);
2374
job_dispatch(ja[i], false);
2375
}
2376
}
2377
2378
return resp;
2379
}
2380
2381
void
2382
job_import_bool(job_t j, const char *key, bool value)
2383
{
2384
bool found_key = false;
2385
2386
switch (key[0]) {
2387
case 'a':
2388
case 'A':
2389
if (strcasecmp(key, LAUNCH_JOBKEY_ABANDONPROCESSGROUP) == 0) {
2390
j->abandon_pg = value;
2391
found_key = true;
2392
}
2393
break;
2394
case 'b':
2395
case 'B':
2396
if (strcasecmp(key, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN) == 0) {
2397
j->dirty_at_shutdown = value;
2398
found_key = true;
2399
}
2400
break;
2401
case 'j':
2402
case 'J':
2403
if (strcasecmp(key, LAUNCH_JOBKEY_JOINGUISESSION) == 0) {
2404
j->joins_gui_session = value;
2405
found_key = true;
2406
}
2407
break;
2408
case 'k':
2409
case 'K':
2410
if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
2411
j->ondemand = !value;
2412
found_key = true;
2413
}
2414
break;
2415
case 'o':
2416
case 'O':
2417
if (strcasecmp(key, LAUNCH_JOBKEY_ONDEMAND) == 0) {
2418
j->ondemand = value;
2419
found_key = true;
2420
}
2421
break;
2422
case 'd':
2423
case 'D':
2424
if (strcasecmp(key, LAUNCH_JOBKEY_DEBUG) == 0) {
2425
j->debug = value;
2426
found_key = true;
2427
} else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLED) == 0) {
2428
(void)job_assumes(j, !value);
2429
found_key = true;
2430
} else if (strcasecmp(key, LAUNCH_JOBKEY_DISABLEASLR) == 0) {
2431
j->disable_aslr = value;
2432
found_key = true;
2433
}
2434
break;
2435
case 'h':
2436
case 'H':
2437
if (strcasecmp(key, LAUNCH_JOBKEY_HOPEFULLYEXITSLAST) == 0) {
2438
job_log(j, LOG_PERF, "%s has been deprecated. Please use the new %s key instead and add EnableTransactions to your launchd.plist.", LAUNCH_JOBKEY_HOPEFULLYEXITSLAST, LAUNCH_JOBKEY_BEGINTRANSACTIONATSHUTDOWN);
2439
j->dirty_at_shutdown = value;
2440
found_key = true;
2441
}
2442
break;
2443
case 's':
2444
case 'S':
2445
if (strcasecmp(key, LAUNCH_JOBKEY_SESSIONCREATE) == 0) {
2446
j->session_create = value;
2447
found_key = true;
2448
} else if (strcasecmp(key, LAUNCH_JOBKEY_STARTONMOUNT) == 0) {
2449
j->start_on_mount = value;
2450
found_key = true;
2451
} else if (strcasecmp(key, LAUNCH_JOBKEY_SERVICEIPC) == 0) {
2452
// this only does something on Mac OS X 10.4 "Tiger"
2453
found_key = true;
2454
} else if (strcasecmp(key, LAUNCH_JOBKEY_SHUTDOWNMONITOR) == 0) {
2455
if (_launchd_shutdown_monitor) {
2456
job_log(j, LOG_ERR, "Only one job may monitor shutdown.");
2457
} else {
2458
j->shutdown_monitor = true;
2459
_launchd_shutdown_monitor = j;
2460
}
2461
found_key = true;
2462
}
2463
break;
2464
case 'l':
2465
case 'L':
2466
if (strcasecmp(key, LAUNCH_JOBKEY_LOWPRIORITYIO) == 0) {
2467
j->low_pri_io = value;
2468
found_key = true;
2469
} else if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHONLYONCE) == 0) {
2470
j->only_once = value;
2471
found_key = true;
2472
} else if (strcasecmp(key, LAUNCH_JOBKEY_LOWPRIORITYBACKGROUNDIO) == 0) {
2473
j->low_priority_background_io = true;
2474
found_key = true;
2475
} else if (strcasecmp(key, LAUNCH_JOBKEY_LEGACYTIMERS) == 0) {
2476
#if !TARGET_OS_EMBEDDED
2477
j->legacy_timers = value;
2478
#else // !TARGET_OS_EMBEDDED
2479
job_log(j, LOG_ERR, "This key is not supported on this platform: %s", key);
2480
#endif // !TARGET_OS_EMBEDDED
2481
found_key = true;
2482
}
2483
break;
2484
case 'm':
2485
case 'M':
2486
if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
2487
j->internal_exc_handler = value;
2488
found_key = true;
2489
} else if (strcasecmp(key, LAUNCH_JOBKEY_MULTIPLEINSTANCES) == 0) {
2490
j->multiple_instances = value;
2491
found_key = true;
2492
}
2493
break;
2494
case 'i':
2495
case 'I':
2496
if (strcasecmp(key, LAUNCH_JOBKEY_INITGROUPS) == 0) {
2497
if (getuid() != 0) {
2498
job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2499
return;
2500
}
2501
j->no_init_groups = !value;
2502
found_key = true;
2503
} else if (strcasecmp(key, LAUNCH_JOBKEY_IGNOREPROCESSGROUPATSHUTDOWN) == 0) {
2504
j->ignore_pg_at_shutdown = value;
2505
found_key = true;
2506
}
2507
break;
2508
case 'r':
2509
case 'R':
2510
if (strcasecmp(key, LAUNCH_JOBKEY_RUNATLOAD) == 0) {
2511
if (value) {
2512
// We don't want value == false to change j->start_pending
2513
j->start_pending = true;
2514
}
2515
found_key = true;
2516
}
2517
break;
2518
case 'e':
2519
case 'E':
2520
if (strcasecmp(key, LAUNCH_JOBKEY_ENABLEGLOBBING) == 0) {
2521
j->globargv = value;
2522
found_key = true;
2523
} else if (strcasecmp(key, LAUNCH_JOBKEY_ENABLETRANSACTIONS) == 0) {
2524
j->enable_transactions = value;
2525
found_key = true;
2526
} else if (strcasecmp(key, LAUNCH_JOBKEY_ENTERKERNELDEBUGGERBEFOREKILL) == 0) {
2527
j->debug_before_kill = value;
2528
found_key = true;
2529
} else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDPRIVILEGEDISPENSATION) == 0) {
2530
#if TARGET_OS_EMBEDDED
2531
if (!_launchd_embedded_god) {
2532
if ((j->embedded_god = value)) {
2533
_launchd_embedded_god = j;
2534
}
2535
} else {
2536
job_log(j, LOG_ERR, "Job tried to claim %s after it has already been claimed.", key);
2537
}
2538
#else
2539
job_log(j, LOG_ERR, "This key is not supported on this platform: %s", key);
2540
#endif
2541
found_key = true;
2542
} else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDHOMESCREEN) == 0) {
2543
#if TARGET_OS_EMBEDDED
2544
if (!_launchd_embedded_home) {
2545
if ((j->embedded_home = value)) {
2546
_launchd_embedded_home = j;
2547
}
2548
} else {
2549
job_log(j, LOG_ERR, "Job tried to claim %s after it has already been claimed.", key);
2550
}
2551
#else
2552
job_log(j, LOG_ERR, "This key is not supported on this platform: %s", key);
2553
#endif
2554
} else if (strcasecmp(key, LAUNCH_JOBKEY_EVENTMONITOR) == 0) {
2555
if (!_launchd_event_monitor) {
2556
j->event_monitor = value;
2557
if (value) {
2558
_launchd_event_monitor = j;
2559
}
2560
} else {
2561
job_log(j, LOG_NOTICE, "Job tried to steal event monitoring responsibility from: %s", _launchd_event_monitor->label);
2562
}
2563
found_key = true;
2564
}
2565
break;
2566
case 'w':
2567
case 'W':
2568
if (strcasecmp(key, LAUNCH_JOBKEY_WAITFORDEBUGGER) == 0) {
2569
j->wait4debugger = value;
2570
found_key = true;
2571
}
2572
break;
2573
case 'x':
2574
case 'X':
2575
if (strcasecmp(key, LAUNCH_JOBKEY_XPCDOMAINBOOTSTRAPPER) == 0) {
2576
if (pid1_magic) {
2577
if (_launchd_xpc_bootstrapper) {
2578
job_log(j, LOG_ERR, "This job tried to steal the XPC domain bootstrapper property from the following job: %s", _launchd_xpc_bootstrapper->label);
2579
} else {
2580
_launchd_xpc_bootstrapper = j;
2581
j->xpc_bootstrapper = value;
2582
}
2583
} else {
2584
job_log(j, LOG_ERR, "Non-daemon tried to claim XPC bootstrapper property.");
2585
}
2586
}
2587
found_key = true;
2588
break;
2589
default:
2590
break;
2591
}
2592
2593
if (unlikely(!found_key)) {
2594
job_log(j, LOG_WARNING, "Unknown key for boolean: %s", key);
2595
}
2596
}
2597
2598
void
2599
job_import_string(job_t j, const char *key, const char *value)
2600
{
2601
char **where2put = NULL;
2602
2603
switch (key[0]) {
2604
case 'c':
2605
case 'C':
2606
if (strcasecmp(key, LAUNCH_JOBKEY_CFBUNDLEIDENTIFIER) == 0) {
2607
where2put = &j->cfbundleidentifier;
2608
}
2609
break;
2610
case 'm':
2611
case 'M':
2612
if (strcasecmp(key, LAUNCH_JOBKEY_MACHEXCEPTIONHANDLER) == 0) {
2613
where2put = &j->alt_exc_handler;
2614
}
2615
break;
2616
case 'p':
2617
case 'P':
2618
if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAM) == 0) {
2619
return;
2620
} else if (strcasecmp(key, LAUNCH_JOBKEY_POSIXSPAWNTYPE) == 0
2621
|| strcasecmp(key, LAUNCH_JOBKEY_PROCESSTYPE) == 0) {
2622
if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_INTERACTIVE) == 0) {
2623
j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_INTERACTIVE;
2624
} else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_ADAPTIVE) == 0) {
2625
j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_ADAPTIVE;
2626
} else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_STANDARD) == 0) {
2627
j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_STANDARD;
2628
} else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_BACKGROUND) == 0) {
2629
j->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND;
2630
} else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_TALAPP) == 0) {
2631
j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_TAL;
2632
} else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_SYSTEMAPP) == 0) {
2633
j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_DEFAULT;
2634
j->system_app = true;
2635
} else if (strcasecmp(value, LAUNCH_KEY_POSIXSPAWNTYPE_APP) == 0) {
2636
j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_DEFAULT;
2637
j->app = true;
2638
} else {
2639
job_log(j, LOG_ERR, "Unknown value for key %s: %s", key, value);
2640
}
2641
return;
2642
}
2643
break;
2644
case 'l':
2645
case 'L':
2646
if (strcasecmp(key, LAUNCH_JOBKEY_LABEL) == 0) {
2647
return;
2648
} else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
2649
return;
2650
} else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
2651
return;
2652
} else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
2653
return;
2654
}
2655
break;
2656
case 'r':
2657
case 'R':
2658
if (strcasecmp(key, LAUNCH_JOBKEY_ROOTDIRECTORY) == 0) {
2659
if (getuid() != 0) {
2660
job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2661
return;
2662
}
2663
where2put = &j->rootdir;
2664
}
2665
break;
2666
case 'w':
2667
case 'W':
2668
if (strcasecmp(key, LAUNCH_JOBKEY_WORKINGDIRECTORY) == 0) {
2669
where2put = &j->workingdir;
2670
}
2671
break;
2672
case 'u':
2673
case 'U':
2674
if (strcasecmp(key, LAUNCH_JOBKEY_USERNAME) == 0) {
2675
if (getuid() != 0) {
2676
job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2677
return;
2678
} else if (strcmp(value, "root") == 0) {
2679
return;
2680
}
2681
where2put = &j->username;
2682
}
2683
break;
2684
case 'g':
2685
case 'G':
2686
if (strcasecmp(key, LAUNCH_JOBKEY_GROUPNAME) == 0) {
2687
if (getuid() != 0) {
2688
job_log(j, LOG_WARNING, "Ignored this key: %s", key);
2689
return;
2690
} else if (strcmp(value, "wheel") == 0) {
2691
return;
2692
}
2693
where2put = &j->groupname;
2694
}
2695
break;
2696
case 's':
2697
case 'S':
2698
if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDOUTPATH) == 0) {
2699
where2put = &j->stdoutpath;
2700
} else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDERRORPATH) == 0) {
2701
where2put = &j->stderrpath;
2702
} else if (strcasecmp(key, LAUNCH_JOBKEY_STANDARDINPATH) == 0) {
2703
where2put = &j->stdinpath;
2704
j->stdin_fd = _fd(open(value, O_RDONLY|O_CREAT|O_NOCTTY|O_NONBLOCK, DEFFILEMODE));
2705
if (job_assumes_zero_p(j, j->stdin_fd) != -1) {
2706
// open() should not block, but regular IO by the job should
2707
(void)job_assumes_zero_p(j, fcntl(j->stdin_fd, F_SETFL, 0));
2708
// XXX -- EV_CLEAR should make named pipes happy?
2709
(void)job_assumes_zero_p(j, kevent_mod(j->stdin_fd, EVFILT_READ, EV_ADD|EV_CLEAR, 0, 0, j));
2710
} else {
2711
j->stdin_fd = 0;
2712
}
2713
#if HAVE_SANDBOX
2714
} else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXPROFILE) == 0) {
2715
where2put = &j->seatbelt_profile;
2716
} else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXCONTAINER) == 0) {
2717
where2put = &j->container_identifier;
2718
#endif
2719
}
2720
break;
2721
case 'X':
2722
case 'x':
2723
if (strcasecmp(key, LAUNCH_JOBKEY_XPCDOMAIN) == 0) {
2724
return;
2725
}
2726
break;
2727
default:
2728
job_log(j, LOG_WARNING, "Unknown key for string: %s", key);
2729
break;
2730
}
2731
2732
if (likely(where2put)) {
2733
if (!(*where2put = strdup(value))) {
2734
(void)job_assumes_zero(j, errno);
2735
}
2736
} else {
2737
// See rdar://problem/5496612. These two are okay.
2738
if (strncmp(key, "SHAuthorizationRight", sizeof("SHAuthorizationRight")) == 0
2739
|| strncmp(key, "ServiceDescription", sizeof("ServiceDescription")) == 0) {
2740
job_log(j, LOG_APPLEONLY, "This key is no longer relevant and should be removed: %s", key);
2741
} else {
2742
job_log(j, LOG_WARNING, "Unknown key: %s", key);
2743
}
2744
}
2745
}
2746
2747
void
2748
job_import_integer(job_t j, const char *key, long long value)
2749
{
2750
switch (key[0]) {
2751
case 'a':
2752
case 'A':
2753
#if TARGET_OS_EMBEDDED
2754
if (strcasecmp(key, LAUNCH_JOBKEY_ASID) == 0) {
2755
if (launchd_embedded_handofgod) {
2756
if (audit_session_port((au_asid_t)value, &j->asport) == -1 && errno != ENOSYS) {
2757
(void)job_assumes_zero(j, errno);
2758
}
2759
}
2760
}
2761
#endif
2762
case 'e':
2763
case 'E':
2764
if (strcasecmp(key, LAUNCH_JOBKEY_EXITTIMEOUT) == 0) {
2765
if (unlikely(value < 0)) {
2766
job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
2767
} else if (unlikely(value > UINT32_MAX)) {
2768
job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_EXITTIMEOUT);
2769
} else {
2770
j->exit_timeout = (typeof(j->exit_timeout)) value;
2771
}
2772
} else if (strcasecmp(key, LAUNCH_JOBKEY_EMBEDDEDMAINTHREADPRIORITY) == 0) {
2773
j->main_thread_priority = value;
2774
}
2775
break;
2776
case 'j':
2777
case 'J':
2778
if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0) {
2779
job_log(j, LOG_WARNING | LOG_CONSOLE, "Please change the JetsamPriority key to be in a dictionary named JetsamProperties.");
2780
2781
launch_data_t pri = launch_data_new_integer(value);
2782
if (job_assumes(j, pri != NULL)) {
2783
jetsam_property_setup(pri, LAUNCH_JOBKEY_JETSAMPRIORITY, j);
2784
launch_data_free(pri);
2785
}
2786
}
2787
case 'n':
2788
case 'N':
2789
if (strcasecmp(key, LAUNCH_JOBKEY_NICE) == 0) {
2790
if (unlikely(value < PRIO_MIN)) {
2791
job_log(j, LOG_WARNING, "%s less than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MIN);
2792
} else if (unlikely(value > PRIO_MAX)) {
2793
job_log(j, LOG_WARNING, "%s is greater than %d. Ignoring.", LAUNCH_JOBKEY_NICE, PRIO_MAX);
2794
} else {
2795
j->nice = (typeof(j->nice)) value;
2796
j->setnice = true;
2797
}
2798
}
2799
break;
2800
case 't':
2801
case 'T':
2802
if (strcasecmp(key, LAUNCH_JOBKEY_TIMEOUT) == 0) {
2803
if (unlikely(value < 0)) {
2804
job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
2805
} else if (unlikely(value > UINT32_MAX)) {
2806
job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_TIMEOUT);
2807
} else {
2808
j->timeout = (typeof(j->timeout)) value;
2809
}
2810
} else if (strcasecmp(key, LAUNCH_JOBKEY_THROTTLEINTERVAL) == 0) {
2811
if (value < 0) {
2812
job_log(j, LOG_WARNING, "%s less than zero. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
2813
} else if (value > UINT32_MAX) {
2814
job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_THROTTLEINTERVAL);
2815
} else {
2816
j->min_run_time = (typeof(j->min_run_time)) value;
2817
}
2818
}
2819
break;
2820
case 'u':
2821
case 'U':
2822
if (strcasecmp(key, LAUNCH_JOBKEY_UMASK) == 0) {
2823
j->mask = value;
2824
j->setmask = true;
2825
}
2826
break;
2827
case 's':
2828
case 'S':
2829
if (strcasecmp(key, LAUNCH_JOBKEY_STARTINTERVAL) == 0) {
2830
if (unlikely(value <= 0)) {
2831
job_log(j, LOG_WARNING, "%s is not greater than zero. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
2832
} else if (unlikely(value > UINT32_MAX)) {
2833
job_log(j, LOG_WARNING, "%s is too large. Ignoring.", LAUNCH_JOBKEY_STARTINTERVAL);
2834
} else {
2835
runtime_add_weak_ref();
2836
j->start_interval = (typeof(j->start_interval)) value;
2837
2838
(void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j));
2839
}
2840
#if HAVE_SANDBOX
2841
} else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2842
j->seatbelt_flags = value;
2843
#endif
2844
}
2845
2846
break;
2847
default:
2848
job_log(j, LOG_WARNING, "Unknown key for integer: %s", key);
2849
break;
2850
}
2851
}
2852
2853
void
2854
job_import_opaque(job_t j __attribute__((unused)), const char *key, launch_data_t value __attribute__((unused)))
2855
{
2856
switch (key[0]) {
2857
case 'q':
2858
case 'Q':
2859
#if HAVE_QUARANTINE
2860
if (strcasecmp(key, LAUNCH_JOBKEY_QUARANTINEDATA) == 0) {
2861
size_t tmpsz = launch_data_get_opaque_size(value);
2862
2863
if (job_assumes(j, j->quarantine_data = malloc(tmpsz))) {
2864
memcpy(j->quarantine_data, launch_data_get_opaque(value), tmpsz);
2865
j->quarantine_data_sz = tmpsz;
2866
}
2867
}
2868
#endif
2869
case 's':
2870
case 'S':
2871
if (strcasecmp(key, LAUNCH_JOBKEY_SECURITYSESSIONUUID) == 0) {
2872
size_t tmpsz = launch_data_get_opaque_size(value);
2873
if (job_assumes(j, tmpsz == sizeof(uuid_t))) {
2874
memcpy(j->expected_audit_uuid, launch_data_get_opaque(value), sizeof(uuid_t));
2875
}
2876
}
2877
break;
2878
default:
2879
break;
2880
}
2881
}
2882
2883
static void
2884
policy_setup(launch_data_t obj, const char *key, void *context)
2885
{
2886
job_t j = context;
2887
bool found_key = false;
2888
2889
switch (key[0]) {
2890
case 'd':
2891
case 'D':
2892
if (strcasecmp(key, LAUNCH_JOBPOLICY_DENYCREATINGOTHERJOBS) == 0) {
2893
j->deny_job_creation = launch_data_get_bool(obj);
2894
found_key = true;
2895
}
2896
break;
2897
default:
2898
break;
2899
}
2900
2901
if (unlikely(!found_key)) {
2902
job_log(j, LOG_WARNING, "Unknown policy: %s", key);
2903
}
2904
}
2905
2906
void
2907
job_import_dictionary(job_t j, const char *key, launch_data_t value)
2908
{
2909
launch_data_t tmp;
2910
2911
switch (key[0]) {
2912
case 'p':
2913
case 'P':
2914
if (strcasecmp(key, LAUNCH_JOBKEY_POLICIES) == 0) {
2915
launch_data_dict_iterate(value, policy_setup, j);
2916
}
2917
break;
2918
case 'k':
2919
case 'K':
2920
if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE) == 0) {
2921
launch_data_dict_iterate(value, semaphoreitem_setup, j);
2922
}
2923
break;
2924
case 'i':
2925
case 'I':
2926
if (strcasecmp(key, LAUNCH_JOBKEY_INETDCOMPATIBILITY) == 0) {
2927
j->inetcompat = true;
2928
j->abandon_pg = true;
2929
if ((tmp = launch_data_dict_lookup(value, LAUNCH_JOBINETDCOMPATIBILITY_WAIT))) {
2930
j->inetcompat_wait = launch_data_get_bool(tmp);
2931
}
2932
}
2933
break;
2934
case 'j':
2935
case 'J':
2936
if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPROPERTIES) == 0) {
2937
launch_data_dict_iterate(value, (void (*)(launch_data_t, const char *, void *))jetsam_property_setup, j);
2938
}
2939
case 'e':
2940
case 'E':
2941
if (strcasecmp(key, LAUNCH_JOBKEY_ENVIRONMENTVARIABLES) == 0) {
2942
launch_data_dict_iterate(value, envitem_setup, j);
2943
}
2944
break;
2945
case 'u':
2946
case 'U':
2947
if (strcasecmp(key, LAUNCH_JOBKEY_USERENVIRONMENTVARIABLES) == 0) {
2948
j->importing_global_env = true;
2949
launch_data_dict_iterate(value, envitem_setup, j);
2950
j->importing_global_env = false;
2951
}
2952
break;
2953
case 's':
2954
case 'S':
2955
if (strcasecmp(key, LAUNCH_JOBKEY_SOCKETS) == 0) {
2956
launch_data_dict_iterate(value, socketgroup_setup, j);
2957
} else if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
2958
calendarinterval_new_from_obj(j, value);
2959
} else if (strcasecmp(key, LAUNCH_JOBKEY_SOFTRESOURCELIMITS) == 0) {
2960
launch_data_dict_iterate(value, limititem_setup, j);
2961
#if HAVE_SANDBOX
2962
} else if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOXFLAGS) == 0) {
2963
launch_data_dict_iterate(value, seatbelt_setup_flags, j);
2964
#endif
2965
}
2966
break;
2967
case 'h':
2968
case 'H':
2969
if (strcasecmp(key, LAUNCH_JOBKEY_HARDRESOURCELIMITS) == 0) {
2970
j->importing_hard_limits = true;
2971
launch_data_dict_iterate(value, limititem_setup, j);
2972
j->importing_hard_limits = false;
2973
}
2974
break;
2975
case 'm':
2976
case 'M':
2977
if (strcasecmp(key, LAUNCH_JOBKEY_MACHSERVICES) == 0) {
2978
launch_data_dict_iterate(value, machservice_setup, j);
2979
}
2980
break;
2981
case 'l':
2982
case 'L':
2983
if (strcasecmp(key, LAUNCH_JOBKEY_LAUNCHEVENTS) == 0) {
2984
launch_data_dict_iterate(value, eventsystem_setup, j);
2985
} else {
2986
if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHARDWARE) == 0) {
2987
return;
2988
}
2989
if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHARDWARE) == 0) {
2990
return;
2991
}
2992
}
2993
break;
2994
default:
2995
job_log(j, LOG_WARNING, "Unknown key for dictionary: %s", key);
2996
break;
2997
}
2998
}
2999
3000
void
3001
job_import_array(job_t j, const char *key, launch_data_t value)
3002
{
3003
size_t i, value_cnt = launch_data_array_get_count(value);
3004
3005
switch (key[0]) {
3006
case 'p':
3007
case 'P':
3008
if (strcasecmp(key, LAUNCH_JOBKEY_PROGRAMARGUMENTS) == 0) {
3009
return;
3010
}
3011
break;
3012
case 'l':
3013
case 'L':
3014
if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOHOSTS) == 0) {
3015
return;
3016
} else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADFROMHOSTS) == 0) {
3017
return;
3018
} else if (strcasecmp(key, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE) == 0) {
3019
job_log(j, LOG_NOTICE, "launchctl should have transformed the \"%s\" array to a string", LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
3020
return;
3021
}
3022
break;
3023
case 'b':
3024
case 'B':
3025
if (strcasecmp(key, LAUNCH_JOBKEY_BINARYORDERPREFERENCE) == 0) {
3026
if (job_assumes(j, j->j_binpref = malloc(value_cnt * sizeof(*j->j_binpref)))) {
3027
j->j_binpref_cnt = value_cnt;
3028
for (i = 0; i < value_cnt; i++) {
3029
j->j_binpref[i] = (cpu_type_t) launch_data_get_integer(launch_data_array_get_index(value, i));
3030
}
3031
}
3032
}
3033
break;
3034
case 's':
3035
case 'S':
3036
if (strcasecmp(key, LAUNCH_JOBKEY_STARTCALENDARINTERVAL) == 0) {
3037
for (i = 0; i < value_cnt; i++) {
3038
calendarinterval_new_from_obj(j, launch_data_array_get_index(value, i));
3039
}
3040
}
3041
break;
3042
default:
3043
job_log(j, LOG_WARNING, "Unknown key for array: %s", key);
3044
break;
3045
}
3046
}
3047
3048
void
3049
job_import_keys(launch_data_t obj, const char *key, void *context)
3050
{
3051
job_t j = context;
3052
launch_data_type_t kind;
3053
3054
if (!obj) {
3055
launchd_syslog(LOG_ERR, "NULL object given to job_import_keys().");
3056
return;
3057
}
3058
3059
kind = launch_data_get_type(obj);
3060
3061
switch (kind) {
3062
case LAUNCH_DATA_BOOL:
3063
job_import_bool(j, key, launch_data_get_bool(obj));
3064
break;
3065
case LAUNCH_DATA_STRING:
3066
job_import_string(j, key, launch_data_get_string(obj));
3067
break;
3068
case LAUNCH_DATA_INTEGER:
3069
job_import_integer(j, key, launch_data_get_integer(obj));
3070
break;
3071
case LAUNCH_DATA_DICTIONARY:
3072
job_import_dictionary(j, key, obj);
3073
break;
3074
case LAUNCH_DATA_ARRAY:
3075
job_import_array(j, key, obj);
3076
break;
3077
case LAUNCH_DATA_OPAQUE:
3078
job_import_opaque(j, key, obj);
3079
break;
3080
default:
3081
job_log(j, LOG_WARNING, "Unknown value type '%d' for key: %s", kind, key);
3082
break;
3083
}
3084
}
3085
3086
job_t
3087
jobmgr_import2(jobmgr_t jm, launch_data_t pload)
3088
{
3089
launch_data_t tmp, ldpa;
3090
const char *label = NULL, *prog = NULL;
3091
const char **argv = NULL;
3092
job_t j;
3093
3094
if (!jobmgr_assumes(jm, pload != NULL)) {
3095
errno = EINVAL;
3096
return NULL;
3097
}
3098
3099
if (unlikely(launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY)) {
3100
errno = EINVAL;
3101
return NULL;
3102
}
3103
3104
if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL)))) {
3105
errno = EINVAL;
3106
return NULL;
3107
}
3108
3109
if (unlikely(launch_data_get_type(tmp) != LAUNCH_DATA_STRING)) {
3110
errno = EINVAL;
3111
return NULL;
3112
}
3113
3114
if (unlikely(!(label = launch_data_get_string(tmp)))) {
3115
errno = EINVAL;
3116
return NULL;
3117
}
3118
3119
#if TARGET_OS_EMBEDDED
3120
if (unlikely(launchd_embedded_handofgod && _launchd_embedded_god)) {
3121
if (unlikely(!(tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_USERNAME)))) {
3122
errno = EPERM;
3123
return NULL;
3124
}
3125
3126
const char *username = NULL;
3127
if (likely(tmp && launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
3128
username = launch_data_get_string(tmp);
3129
} else {
3130
errno = EPERM;
3131
return NULL;
3132
}
3133
3134
if (!jobmgr_assumes(jm, _launchd_embedded_god->username != NULL && username != NULL)) {
3135
errno = EPERM;
3136
return NULL;
3137
}
3138
3139
if (unlikely(strcmp(_launchd_embedded_god->username, username) != 0)) {
3140
errno = EPERM;
3141
return NULL;
3142
}
3143
} else if (launchd_embedded_handofgod) {
3144
errno = EINVAL;
3145
return NULL;
3146
}
3147
#endif
3148
3149
if ((tmp = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAM))
3150
&& (launch_data_get_type(tmp) == LAUNCH_DATA_STRING)) {
3151
prog = launch_data_get_string(tmp);
3152
}
3153
3154
int argc = 0;
3155
if ((ldpa = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_PROGRAMARGUMENTS))) {
3156
size_t i, c;
3157
3158
if (launch_data_get_type(ldpa) != LAUNCH_DATA_ARRAY) {
3159
errno = EINVAL;
3160
return NULL;
3161
}
3162
3163
c = launch_data_array_get_count(ldpa);
3164
3165
argv = alloca((c + 1) * sizeof(char *));
3166
3167
for (i = 0; i < c; i++) {
3168
tmp = launch_data_array_get_index(ldpa, i);
3169
3170
if (launch_data_get_type(tmp) != LAUNCH_DATA_STRING) {
3171
errno = EINVAL;
3172
return NULL;
3173
}
3174
3175
argv[i] = launch_data_get_string(tmp);
3176
}
3177
3178
argv[i] = NULL;
3179
argc = i;
3180
}
3181
3182
if (!prog && argc == 0) {
3183
jobmgr_log(jm, LOG_ERR, "Job specifies neither Program nor ProgramArguments: %s", label);
3184
errno = EINVAL;
3185
return NULL;
3186
}
3187
3188
/* Find the requested session. You cannot load services into XPC domains in
3189
* this manner.
3190
*/
3191
launch_data_t session = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LIMITLOADTOSESSIONTYPE);
3192
if (session) {
3193
jobmgr_t jmt = NULL;
3194
if (launch_data_get_type(session) == LAUNCH_DATA_STRING) {
3195
jmt = jobmgr_find_by_name(jm, launch_data_get_string(session));
3196
if (!jmt) {
3197
jobmgr_log(jm, LOG_ERR, "Could not find requested session: %s", launch_data_get_string(session));
3198
} else {
3199
jm = jmt;
3200
}
3201
} else {
3202
jobmgr_log(jm, LOG_ERR, "Session type is not a string.");
3203
}
3204
3205
if (!jmt) {
3206
errno = EINVAL;
3207
return NULL;
3208
}
3209
}
3210
3211
/* For legacy reasons, we have a global hash of all labels in all job
3212
* managers. So rather than make it a global, we store it in the root job
3213
* manager. But for an XPC domain, we store a local hash of all services in
3214
* the domain.
3215
*/
3216
jobmgr_t where2look = (jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) ? jm : root_jobmgr;
3217
if (unlikely((j = job_find(where2look, label)) != NULL)) {
3218
if (jm->xpc_singleton) {
3219
/* There can (and probably will be) multiple attemtps to import the
3220
* same XPC service from the same framework. This is okay. It's
3221
* treated as a singleton, so just return the existing one so that
3222
* it may be aliased into the requesting process' XPC domain.
3223
*/
3224
errno = EEXIST;
3225
return j;
3226
} else {
3227
/* If we're not a global XPC domain, then it's an error to try
3228
* importing the same job/service multiple times.
3229
*/
3230
errno = EEXIST;
3231
return NULL;
3232
}
3233
} else if (unlikely(!jobmgr_label_test(where2look, label))) {
3234
errno = EINVAL;
3235
return NULL;
3236
}
3237
jobmgr_log(jm, LOG_DEBUG, "Importing %s.", label);
3238
3239
if (likely(j = job_new(jm, label, prog, argv))) {
3240
#if TARGET_OS_EMBEDDED
3241
job_apply_defaults(j);
3242
#endif
3243
launch_data_dict_iterate(pload, job_import_keys, j);
3244
if (!uuid_is_null(j->expected_audit_uuid)) {
3245
uuid_string_t uuid_str;
3246
uuid_unparse(j->expected_audit_uuid, uuid_str);
3247
job_log(j, LOG_DEBUG, "Imported job. Waiting for session for UUID %s.", uuid_str);
3248
LIST_INSERT_HEAD(&s_needing_sessions, j, needing_session_sle);
3249
errno = ENEEDAUTH;
3250
} else {
3251
job_log(j, LOG_DEBUG, "No security session specified.");
3252
j->asport = MACH_PORT_NULL;
3253
}
3254
3255
if (pid1_magic && !jm->parentmgr) {
3256
/* Workaround reentrancy in CF. We don't make this a global variable
3257
* because we don't want per-user launchd's to inherit it. So we
3258
* just set it for every job that we import into the System session.
3259
*
3260
* See <rdar://problem/9468837>.
3261
*/
3262
envitem_new(j, "__CF_USER_TEXT_ENCODING", "0x0:0:0", false);
3263
}
3264
3265
if (j->event_monitor) {
3266
eventsystem_ping();
3267
}
3268
3269
#if TARGET_OS_EMBEDDED
3270
/* SpringBoard and backboardd must run at elevated priority.
3271
*
3272
* See <rdar://problem/9539873> and <rdar://problem/10984383>.
3273
*/
3274
if (j->embedded_god || j->embedded_home) {
3275
j->psproctype = POSIX_SPAWN_PROC_TYPE_APP_DEFAULT;
3276
}
3277
#endif
3278
}
3279
3280
return j;
3281
}
3282
3283
bool
3284
jobmgr_label_test(jobmgr_t jm, const char *str)
3285
{
3286
const char *ptr;
3287
3288
if (str[0] == '\0') {
3289
jobmgr_log(jm, LOG_ERR, "Empty job labels are not allowed");
3290
return false;
3291
}
3292
3293
for (ptr = str; *ptr; ptr++) {
3294
if (iscntrl(*ptr)) {
3295
jobmgr_log(jm, LOG_ERR, "ASCII control characters are not allowed in job labels. Index: %td Value: 0x%hhx", ptr - str, *ptr);
3296
return false;
3297
}
3298
}
3299
3300
if ((strncasecmp(str, "com.apple.launchd", strlen("com.apple.launchd")) == 0)
3301
|| (strncasecmp(str, "com.apple.launchctl", strlen("com.apple.launchctl")) == 0)) {
3302
jobmgr_log(jm, LOG_ERR, "Job labels are not allowed to use a reserved prefix: %s", str);
3303
return false;
3304
}
3305
3306
return true;
3307
}
3308
3309
job_t
3310
job_find(jobmgr_t jm, const char *label)
3311
{
3312
job_t ji;
3313
3314
if (!jm) {
3315
jm = root_jobmgr;
3316
}
3317
3318
LIST_FOREACH(ji, &jm->label_hash[hash_label(label)], label_hash_sle) {
3319
if (unlikely(ji->removal_pending || ji->mgr->shutting_down)) {
3320
// 5351245 and 5488633 respectively
3321
continue;
3322
}
3323
3324
if (strcmp(ji->label, label) == 0) {
3325
return ji;
3326
}
3327
}
3328
3329
errno = ESRCH;
3330
return NULL;
3331
}
3332
3333
// Should try and consolidate with job_mig_intran2() and jobmgr_find_by_pid().
3334
job_t
3335
jobmgr_find_by_pid_deep(jobmgr_t jm, pid_t p, bool anon_okay)
3336
{
3337
job_t ji = NULL;
3338
LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
3339
if (ji->p == p && (!ji->anonymous || (ji->anonymous && anon_okay))) {
3340
return ji;
3341
}
3342
}
3343
3344
jobmgr_t jmi = NULL;
3345
SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3346
if ((ji = jobmgr_find_by_pid_deep(jmi, p, anon_okay))) {
3347
break;
3348
}
3349
}
3350
3351
return ji;
3352
}
3353
3354
job_t
3355
jobmgr_find_by_pid(jobmgr_t jm, pid_t p, bool create_anon)
3356
{
3357
job_t ji;
3358
3359
LIST_FOREACH(ji, &jm->active_jobs[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
3360
if (ji->p == p) {
3361
return ji;
3362
}
3363
}
3364
3365
return create_anon ? job_new_anonymous(jm, p) : NULL;
3366
}
3367
3368
job_t
3369
managed_job(pid_t p)
3370
{
3371
job_t ji;
3372
3373
LIST_FOREACH(ji, &managed_actives[ACTIVE_JOB_HASH(p)], pid_hash_sle) {
3374
if (ji->p == p) {
3375
return ji;
3376
}
3377
}
3378
3379
return NULL;
3380
}
3381
3382
job_t
3383
job_mig_intran2(jobmgr_t jm, mach_port_t mport, pid_t upid)
3384
{
3385
jobmgr_t jmi;
3386
job_t ji;
3387
3388
if (jm->jm_port == mport) {
3389
return jobmgr_find_by_pid(jm, upid, true);
3390
}
3391
3392
SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3393
job_t jr;
3394
3395
if ((jr = job_mig_intran2(jmi, mport, upid))) {
3396
return jr;
3397
}
3398
}
3399
3400
LIST_FOREACH(ji, &jm->jobs, sle) {
3401
if (ji->j_port == mport) {
3402
return ji;
3403
}
3404
}
3405
3406
return NULL;
3407
}
3408
3409
job_t
3410
job_mig_intran(mach_port_t p)
3411
{
3412
struct ldcred *ldc = runtime_get_caller_creds();
3413
job_t jr;
3414
3415
jr = job_mig_intran2(root_jobmgr, p, ldc->pid);
3416
3417
if (!jr) {
3418
struct proc_bsdshortinfo proc;
3419
if (proc_pidinfo(ldc->pid, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3420
if (errno != ESRCH) {
3421
(void)jobmgr_assumes_zero(root_jobmgr, errno);
3422
} else {
3423
jobmgr_log(root_jobmgr, LOG_ERR, "%s[%i] disappeared out from under us (UID: %u EUID: %u)", proc.pbsi_comm, ldc->pid, ldc->uid, ldc->euid);
3424
}
3425
}
3426
}
3427
3428
return jr;
3429
}
3430
3431
job_t
3432
job_find_by_service_port(mach_port_t p)
3433
{
3434
struct machservice *ms;
3435
3436
LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
3437
if (ms->recv && (ms->port == p)) {
3438
return ms->job;
3439
}
3440
}
3441
3442
return NULL;
3443
}
3444
3445
void
3446
job_mig_destructor(job_t j)
3447
{
3448
/* The job can go invalid before this point.
3449
*
3450
* <rdar://problem/5477111>
3451
*/
3452
if (unlikely(j && (j != workaround_5477111) && j->unload_at_mig_return)) {
3453
job_log(j, LOG_NOTICE, "Unloading PID %u at MIG return.", j->p);
3454
job_remove(j);
3455
}
3456
3457
workaround_5477111 = NULL;
3458
3459
calendarinterval_sanity_check();
3460
}
3461
3462
void
3463
job_export_all2(jobmgr_t jm, launch_data_t where)
3464
{
3465
jobmgr_t jmi;
3466
job_t ji;
3467
3468
SLIST_FOREACH(jmi, &jm->submgrs, sle) {
3469
job_export_all2(jmi, where);
3470
}
3471
3472
LIST_FOREACH(ji, &jm->jobs, sle) {
3473
launch_data_t tmp;
3474
3475
if (jobmgr_assumes(jm, (tmp = job_export(ji)) != NULL)) {
3476
launch_data_dict_insert(where, tmp, ji->label);
3477
}
3478
}
3479
}
3480
3481
launch_data_t
3482
job_export_all(void)
3483
{
3484
launch_data_t resp = launch_data_alloc(LAUNCH_DATA_DICTIONARY);
3485
3486
if (resp != NULL) {
3487
job_export_all2(root_jobmgr, resp);
3488
} else {
3489
(void)os_assumes_zero(errno);
3490
}
3491
3492
return resp;
3493
}
3494
3495
void
3496
job_log_stray_pg(job_t j)
3497
{
3498
pid_t *pids = NULL;
3499
size_t len = sizeof(pid_t) * get_kern_max_proc();
3500
int i = 0, kp_cnt = 0;
3501
3502
if (!launchd_apple_internal) {
3503
return;
3504
}
3505
3506
runtime_ktrace(RTKT_LAUNCHD_FINDING_STRAY_PG, j->p, 0, 0);
3507
3508
if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
3509
return;
3510
}
3511
if (job_assumes_zero_p(j, (kp_cnt = proc_listpgrppids(j->p, pids, len))) == -1) {
3512
goto out;
3513
}
3514
3515
for (i = 0; i < kp_cnt; i++) {
3516
pid_t p_i = pids[i];
3517
if (p_i == j->p) {
3518
continue;
3519
} else if (p_i == 0 || p_i == 1) {
3520
continue;
3521
}
3522
3523
struct proc_bsdshortinfo proc;
3524
if (proc_pidinfo(p_i, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
3525
if (errno != ESRCH) {
3526
(void)job_assumes_zero(j, errno);
3527
}
3528
continue;
3529
}
3530
3531
pid_t pp_i = proc.pbsi_ppid;
3532
const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
3533
const char *n = proc.pbsi_comm;
3534
3535
job_log(j, LOG_WARNING, "Stray %sprocess with PGID equal to this dead job: PID %u PPID %u PGID %u %s", z, p_i, pp_i, proc.pbsi_pgid, n);
3536
}
3537
3538
out:
3539
free(pids);
3540
}
3541
3542
#if HAVE_SYSTEMSTATS
3543
static void
3544
systemstats_timer_callback(void)
3545
{
3546
jobmgr_log_perf_statistics(root_jobmgr, true);
3547
}
3548
3549
static bool
3550
systemstats_is_enabled(void)
3551
{
3552
static bool systemstats_enabled;
3553
3554
if (!systemstats_enabled) {
3555
char *store = launchd_copy_persistent_store(LAUNCHD_PERSISTENT_STORE_LOGS, NULL);
3556
systemstats_enabled = systemstats_init(SYSTEMSTATS_WRITER_launchd, store);
3557
free(store);
3558
3559
uint64_t interval;
3560
interval = systemstats_get_log_interval(SYSTEMSTATS_WRITER_launchd);
3561
3562
if (pid1_magic && systemstats_enabled && interval) {
3563
jobmgr_assumes_zero_p(root_jobmgr, kevent_mod((uintptr_t)systemstats_timer_callback, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, interval, root_jobmgr));
3564
}
3565
}
3566
3567
return systemstats_enabled;
3568
}
3569
#endif // HAVE_SYSTEMSTATS
3570
3571
void
3572
job_reap(job_t j)
3573
{
3574
bool is_system_bootstrapper = ((j->is_bootstrapper && pid1_magic) && !j->mgr->parentmgr);
3575
3576
job_log(j, LOG_DEBUG, "Reaping");
3577
3578
if (unlikely(j->weird_bootstrap)) {
3579
int64_t junk = 0;
3580
job_mig_swap_integer(j, VPROC_GSK_WEIRD_BOOTSTRAP, 0, 0, &junk);
3581
}
3582
3583
if (j->fork_fd) {
3584
(void)job_assumes_zero_p(j, runtime_close(j->fork_fd));
3585
j->fork_fd = 0;
3586
}
3587
3588
bool was_dirty = false;
3589
if (!(j->anonymous || j->implicit_reap)) {
3590
uint32_t flags = 0;
3591
(void)job_assumes_zero(j, proc_get_dirty(j->p, &flags));
3592
3593
j->idle_exit = (flags & PROC_DIRTY_ALLOWS_IDLE_EXIT);
3594
was_dirty = (flags & PROC_DIRTY_IS_DIRTY);
3595
3596
job_log(j, LOG_DEBUG, "%sob exited %s.", j->idle_exit ? "Idle-exit j" : "J", was_dirty ? "while dirty" : "cleanly");
3597
}
3598
3599
if (j->idle_exit && was_dirty) {
3600
if (j->jettisoned) {
3601
job_log(j, LOG_NOTICE, "Idle-exit job was jettisoned while dirty. Will respawn immediately.");
3602
j->unthrottle = true;
3603
j->start_pending = true;
3604
} else {
3605
job_log(j, LOG_INFO, "Idle-exit job exited while dirty.");
3606
}
3607
} else if (j->idle_exit && j->jettisoned) {
3608
/* If an idle-exit job is jettisoned, then we shouldn't throttle its
3609
* next respawn because it could not help when it exited. If it ran for
3610
* the minimum runtime, then this doesn't really matter. If it ran for
3611
* less than the minimum runtime, it will not be throttled.
3612
*
3613
* <rdar://problem/12098667>
3614
*/
3615
job_log(j, LOG_NOTICE, "Idle-exit job was jettisoned. Will bypass throttle interval for next on-demand launch.");
3616
j->unthrottle = true;
3617
}
3618
3619
if (j->anonymous) {
3620
j->last_exit_status = 0;
3621
} else {
3622
uint64_t rt = runtime_get_nanoseconds_since(j->start_time);
3623
j->trt += rt;
3624
3625
job_log(j, LOG_PERF, "Last instance wall time: %06f", (double)rt / (double)NSEC_PER_SEC);
3626
j->nruns++;
3627
3628
/* The job is dead. While the PID/PGID is still known to be valid, try
3629
* to kill abandoned descendant processes.
3630
*/
3631
job_log_stray_pg(j);
3632
if (!j->abandon_pg) {
3633
if (unlikely(killpg2(j->p, SIGTERM) == -1 && errno != ESRCH)) {
3634
job_log(j, LOG_APPLEONLY, "Bug: 5487498");
3635
}
3636
}
3637
3638
int r = -1;
3639
if (!j->implicit_reap) {
3640
/* If the shutdown monitor has suspended a task and not resumed it
3641
* resumed it before exiting, the kernel will not clean up after the
3642
* shutdown monitor. It will, instead, leave the task suspended and
3643
* not process any pending signals on the event loop for the task.
3644
*
3645
* There are a variety of other kernel bugs that could prevent a
3646
* process from exiting, usually having to do with faulty hardware
3647
* or talking to misbehaving drivers that mark a thread as
3648
* uninterruptible and deadlock/hang before unmarking it as such. So
3649
* we have to work around that too.
3650
*
3651
* See <rdar://problem/9284889&9359725>.
3652
*/
3653
if (j->workaround9359725) {
3654
job_log(j, LOG_NOTICE, "Simulated exit: <rdar://problem/9359725>");
3655
j->last_exit_status = W_EXITCODE(-1, SIGSEGV);
3656
} else {
3657
#if HAVE_SYSTEMSTATS
3658
int r2;
3659
struct rusage_info_v1 ri;
3660
r2 = job_assumes_zero(j, proc_pid_rusage(j->p, RUSAGE_INFO_V1, (rusage_info_t)&ri));
3661
#endif
3662
if ((r = wait4(j->p, &j->last_exit_status, 0, NULL)) == -1) {
3663
job_log(j, LOG_ERR, "Reap failed. Assuming job exited: %d: %s", errno, strerror(errno));
3664
j->last_exit_status = W_EXITCODE(-1, SIGSEGV);
3665
}
3666
3667
if (j->idle_exit && j->jettisoned) {
3668
// Treat idle-exit jettisons as successful exit.
3669
//
3670
// <rdar://problem/13338973>
3671
(void)job_assumes_zero(j, WTERMSIG(j->last_exit_status));
3672
j->last_exit_status = W_EXITCODE(0, 0);
3673
}
3674
#if HAVE_SYSTEMSTATS
3675
if (r2 == 0) {
3676
job_log_perf_statistics(j, &ri, j->last_exit_status);
3677
}
3678
#endif
3679
}
3680
} else {
3681
job_log(j, LOG_INFO, "Job was implicitly reaped by the kernel.");
3682
}
3683
}
3684
3685
if (j->exit_timeout) {
3686
(void)kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_DELETE, 0, 0, NULL);
3687
}
3688
3689
LIST_REMOVE(j, pid_hash_sle);
3690
if (!j->anonymous) {
3691
LIST_REMOVE(j, global_pid_hash_sle);
3692
}
3693
3694
if (j->sent_signal_time) {
3695
uint64_t td_sec, td_usec, td = runtime_get_nanoseconds_since(j->sent_signal_time);
3696
3697
td_sec = td / NSEC_PER_SEC;
3698
td_usec = (td % NSEC_PER_SEC) / NSEC_PER_USEC;
3699
3700
job_log(j, LOG_DEBUG, "Exited %llu.%06llu seconds after the first signal was sent", td_sec, td_usec);
3701
}
3702
3703
int exit_status = WEXITSTATUS(j->last_exit_status);
3704
if (WIFEXITED(j->last_exit_status) && exit_status != 0) {
3705
if (!j->did_exec && _launchd_support_system) {
3706
xpc_object_t event = NULL;
3707
switch (exit_status) {
3708
case ENOENT:
3709
case ENOTDIR:
3710
case ESRCH:
3711
job_log(j, LOG_NOTICE, "Job failed to exec(3). Setting up event to tell us when to try again: %d: %s", exit_status, strerror(exit_status));
3712
event = xpc_dictionary_create(NULL, NULL, 0);
3713
xpc_dictionary_set_string(event, "Executable", j->prog ? j->prog : j->argv[0]);
3714
if (j->mach_uid) {
3715
xpc_dictionary_set_uint64(event, "UID", j->mach_uid);
3716
} else if (j->username) {
3717
xpc_dictionary_set_string(event, "UserName", j->username);
3718
}
3719
3720
if (j->groupname) {
3721
xpc_dictionary_set_string(event, "GroupName", j->groupname);
3722
}
3723
3724
(void)externalevent_new(j, _launchd_support_system, j->label, event, 0);
3725
xpc_release(event);
3726
3727
j->waiting4ok = true;
3728
default:
3729
job_log(j, LOG_NOTICE, "Job failed to exec(3) for weird reason: %d", exit_status);
3730
}
3731
} else {
3732
int level = LOG_INFO;
3733
if (exit_status != 0) {
3734
level = LOG_ERR;
3735
}
3736
3737
job_log(j, level, "Exited with code: %d", exit_status);
3738
}
3739
}
3740
3741
if (WIFSIGNALED(j->last_exit_status)) {
3742
int s = WTERMSIG(j->last_exit_status);
3743
if ((SIGKILL == s || SIGTERM == s) && !j->stopped) {
3744
job_log(j, LOG_NOTICE, "Exited: %s", strsignal(s));
3745
} else if (!(j->stopped || j->clean_kill || j->jettisoned)) {
3746
switch (s) {
3747
// Signals which indicate a crash.
3748
case SIGILL:
3749
case SIGABRT:
3750
case SIGFPE:
3751
case SIGBUS:
3752
case SIGSEGV:
3753
case SIGSYS:
3754
/* If the kernel has posted NOTE_EXIT and the signal sent to the process was
3755
* SIGTRAP, assume that it's a crash.
3756
*/
3757
case SIGTRAP:
3758
j->crashed = true;
3759
job_log(j, LOG_WARNING, "Job appears to have crashed: %s", strsignal(s));
3760
break;
3761
default:
3762
job_log(j, LOG_WARNING, "Exited abnormally: %s", strsignal(s));
3763
break;
3764
}
3765
3766
if (is_system_bootstrapper && j->crashed) {
3767
job_log(j, LOG_ERR | LOG_CONSOLE, "The %s bootstrapper has crashed: %s", j->mgr->name, strsignal(s));
3768
}
3769
}
3770
}
3771
3772
j->reaped = true;
3773
3774
struct machservice *msi = NULL;
3775
if (j->crashed || !(j->did_exec || j->anonymous)) {
3776
SLIST_FOREACH(msi, &j->machservices, sle) {
3777
if (j->crashed && !msi->isActive && (msi->drain_one_on_crash || msi->drain_all_on_crash)) {
3778
machservice_drain_port(msi);
3779
}
3780
3781
if (!j->did_exec && msi->reset && job_assumes(j, !msi->isActive)) {
3782
machservice_resetport(j, msi);
3783
}
3784
}
3785
}
3786
3787
/* HACK: Essentially duplicating the logic directly above. But this has
3788
* gotten really hairy, and I don't want to try consolidating it right now.
3789
*/
3790
if (j->xpc_service && !j->xpcproxy_did_exec) {
3791
job_log(j, LOG_ERR, "XPC Service could not exec(3). Resetting port.");
3792
SLIST_FOREACH(msi, &j->machservices, sle) {
3793
/* Drain the messages but do not reset the port. If xpcproxy could
3794
* not exec(3), then we don't want to continue trying, since there
3795
* is very likely a serious configuration error with the service.
3796
*
3797
* The above comment is weird. I originally said we should drain
3798
* messages but not reset the port, but that's exactly what we do
3799
* below, and I'm not sure which is the mistake, the comment or the
3800
* actual behavior.
3801
*
3802
* Since it's always been this way, I'll assume that the comment is
3803
* incorrect, but I'll leave it in place just to remind myself to
3804
* actually look into it at some point.
3805
*
3806
* <rdar://problem/8986802>
3807
*/
3808
if (msi->upfront && job_assumes(j, !msi->isActive)) {
3809
machservice_resetport(j, msi);
3810
}
3811
}
3812
}
3813
3814
struct suspended_peruser *spi = NULL;
3815
while ((spi = LIST_FIRST(&j->suspended_perusers))) {
3816
job_log(j, LOG_ERR, "Job exited before resuming per-user launchd for UID %u. Will forcibly resume.", spi->j->mach_uid);
3817
spi->j->peruser_suspend_count--;
3818
if (spi->j->peruser_suspend_count == 0) {
3819
job_dispatch(spi->j, false);
3820
}
3821
LIST_REMOVE(spi, sle);
3822
free(spi);
3823
}
3824
3825
if (j->exit_status_dest) {
3826
errno = helper_downcall_wait(j->exit_status_dest, j->last_exit_status);
3827
if (errno && errno != MACH_SEND_INVALID_DEST) {
3828
(void)job_assumes_zero(j, errno);
3829
}
3830
3831
j->exit_status_dest = MACH_PORT_NULL;
3832
}
3833
3834
if (j->spawn_reply_port) {
3835
/* If the child never called exec(3), we must send a spawn() reply so
3836
* that the requestor can get exit status from it. If we fail to send
3837
* the reply for some reason, we have to deallocate the exit status port
3838
* ourselves.
3839
*/
3840
kern_return_t kr = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
3841
if (kr) {
3842
if (kr != MACH_SEND_INVALID_DEST) {
3843
(void)job_assumes_zero(j, kr);
3844
}
3845
3846
(void)job_assumes_zero(j, launchd_mport_close_recv(j->exit_status_port));
3847
}
3848
3849
j->exit_status_port = MACH_PORT_NULL;
3850
j->spawn_reply_port = MACH_PORT_NULL;
3851
}
3852
3853
if (j->anonymous) {
3854
total_anon_children--;
3855
if (j->holds_ref) {
3856
job_log(j, LOG_PERF, "Anonymous job exited holding reference.");
3857
runtime_del_ref();
3858
}
3859
} else {
3860
job_log(j, LOG_PERF, "Job exited.");
3861
runtime_del_ref();
3862
total_children--;
3863
}
3864
3865
if (j->has_console) {
3866
launchd_wsp = 0;
3867
}
3868
3869
if (j->shutdown_monitor) {
3870
job_log(j, LOG_NOTICE | LOG_CONSOLE, "Shutdown monitor has exited.");
3871
_launchd_shutdown_monitor = NULL;
3872
j->shutdown_monitor = false;
3873
}
3874
3875
if (!j->anonymous) {
3876
j->mgr->normal_active_cnt--;
3877
}
3878
j->sent_signal_time = 0;
3879
j->sent_sigkill = false;
3880
j->clean_kill = false;
3881
j->event_monitor_ready2signal = false;
3882
j->p = 0;
3883
j->uniqueid = 0;
3884
}
3885
3886
void
3887
jobmgr_dispatch_all(jobmgr_t jm, bool newmounthack)
3888
{
3889
jobmgr_t jmi, jmn;
3890
job_t ji, jn;
3891
3892
if (jm->shutting_down) {
3893
return;
3894
}
3895
3896
SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
3897
jobmgr_dispatch_all(jmi, newmounthack);
3898
}
3899
3900
LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
3901
if (newmounthack && ji->start_on_mount) {
3902
ji->start_pending = true;
3903
}
3904
3905
job_dispatch(ji, false);
3906
}
3907
}
3908
3909
void
3910
job_dispatch_curious_jobs(job_t j)
3911
{
3912
job_t ji = NULL, jt = NULL;
3913
SLIST_FOREACH_SAFE(ji, &s_curious_jobs, curious_jobs_sle, jt) {
3914
struct semaphoreitem *si = NULL;
3915
SLIST_FOREACH(si, &ji->semaphores, sle) {
3916
if (!(si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED)) {
3917
continue;
3918
}
3919
3920
if (strcmp(si->what, j->label) == 0) {
3921
job_log(ji, LOG_DEBUG, "Dispatching out of interest in \"%s\".", j->label);
3922
3923
if (!ji->removing) {
3924
job_dispatch(ji, false);
3925
} else {
3926
job_log(ji, LOG_NOTICE, "The following job is circularly dependent upon this one: %s", j->label);
3927
}
3928
3929
/* ji could be removed here, so don't do anything with it or its semaphores
3930
* after this point.
3931
*/
3932
break;
3933
}
3934
}
3935
}
3936
}
3937
3938
job_t
3939
job_dispatch(job_t j, bool kickstart)
3940
{
3941
// Don't dispatch a job if it has no audit session set.
3942
if (!uuid_is_null(j->expected_audit_uuid)) {
3943
job_log(j, LOG_DEBUG, "Job is still awaiting its audit session UUID. Not dispatching.");
3944
return NULL;
3945
}
3946
if (j->alias) {
3947
job_log(j, LOG_DEBUG, "Job is an alias. Not dispatching.");
3948
return NULL;
3949
}
3950
3951
if (j->waiting4ok) {
3952
job_log(j, LOG_DEBUG, "Job cannot exec(3). Not dispatching.");
3953
return NULL;
3954
}
3955
3956
#if TARGET_OS_EMBEDDED
3957
if (launchd_embedded_handofgod && _launchd_embedded_god) {
3958
if (!job_assumes(j, _launchd_embedded_god->username != NULL && j->username != NULL)) {
3959
errno = EPERM;
3960
return NULL;
3961
}
3962
3963
if (strcmp(j->username, _launchd_embedded_god->username) != 0) {
3964
errno = EPERM;
3965
return NULL;
3966
}
3967
} else if (launchd_embedded_handofgod) {
3968
errno = EINVAL;
3969
return NULL;
3970
}
3971
#endif
3972
3973
/*
3974
* The whole job removal logic needs to be consolidated. The fact that
3975
* a job can be removed from just about anywhere makes it easy to have
3976
* stale pointers left behind somewhere on the stack that might get
3977
* used after the deallocation. In particular, during job iteration.
3978
*
3979
* This is a classic example. The act of dispatching a job may delete it.
3980
*/
3981
if (!job_active(j)) {
3982
if (job_useless(j)) {
3983
job_log(j, LOG_DEBUG, "Job is useless. Removing.");
3984
job_remove(j);
3985
return NULL;
3986
}
3987
if (unlikely(j->per_user && j->peruser_suspend_count > 0)) {
3988
job_log(j, LOG_DEBUG, "Per-user launchd is suspended. Not dispatching.");
3989
return NULL;
3990
}
3991
3992
if (kickstart || job_keepalive(j)) {
3993
job_log(j, LOG_DEBUG, "%starting job", kickstart ? "Kicks" : "S");
3994
job_start(j);
3995
} else {
3996
job_log(j, LOG_DEBUG, "Watching job.");
3997
job_watch(j);
3998
}
3999
} else {
4000
job_log(j, LOG_DEBUG, "Tried to dispatch an already active job: %s.", job_active(j));
4001
}
4002
4003
return j;
4004
}
4005
4006
void
4007
job_kill(job_t j)
4008
{
4009
if (unlikely(!j->p || j->anonymous)) {
4010
return;
4011
}
4012
4013
(void)job_assumes_zero_p(j, kill2(j->p, SIGKILL));
4014
4015
j->sent_sigkill = true;
4016
(void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->exit_timeout, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, LAUNCHD_SIGKILL_TIMER, j));
4017
4018
job_log(j, LOG_DEBUG, "Sent SIGKILL signal");
4019
}
4020
4021
void
4022
job_open_shutdown_transaction(job_t j)
4023
{
4024
int rv = proc_set_dirty(j->p, true);
4025
if (rv != 0) {
4026
job_log(j, LOG_DEBUG, "Job wants to be dirty at shutdown, but it is not Instant Off-compliant. Treating normally.");
4027
j->dirty_at_shutdown = false;
4028
}
4029
}
4030
4031
void
4032
job_close_shutdown_transaction(job_t j)
4033
{
4034
if (j->dirty_at_shutdown) {
4035
job_log(j, LOG_DEBUG, "Closing shutdown transaction for job.");
4036
(void)job_assumes_zero(j, proc_set_dirty(j->p, false));
4037
j->dirty_at_shutdown = false;
4038
}
4039
}
4040
4041
void
4042
job_log_children_without_exec(job_t j)
4043
{
4044
pid_t *pids = NULL;
4045
size_t len = sizeof(pid_t) * get_kern_max_proc();
4046
int i = 0, kp_cnt = 0;
4047
4048
if (!launchd_apple_internal || j->anonymous || j->per_user) {
4049
return;
4050
}
4051
4052
if (!job_assumes(j, (pids = malloc(len)) != NULL)) {
4053
return;
4054
}
4055
if (job_assumes_zero_p(j, (kp_cnt = proc_listchildpids(j->p, pids, len))) == -1) {
4056
goto out;
4057
}
4058
4059
for (i = 0; i < kp_cnt; i++) {
4060
struct proc_bsdshortinfo proc;
4061
if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
4062
if (errno != ESRCH) {
4063
(void)job_assumes_zero(j, errno);
4064
}
4065
continue;
4066
}
4067
if (proc.pbsi_flags & P_EXEC) {
4068
continue;
4069
}
4070
4071
job_log(j, LOG_DEBUG, "Called *fork(). Please switch to posix_spawn*(), pthreads or launchd. Child PID %u", pids[i]);
4072
}
4073
4074
out:
4075
free(pids);
4076
}
4077
4078
void
4079
job_callback_proc(job_t j, struct kevent *kev)
4080
{
4081
bool program_changed = false;
4082
int fflags = kev->fflags;
4083
4084
job_log(j, LOG_DEBUG, "EVFILT_PROC event for job.");
4085
log_kevent_struct(LOG_DEBUG, kev, 0);
4086
4087
if (fflags & NOTE_EXEC) {
4088
program_changed = true;
4089
4090
if (j->anonymous) {
4091
struct proc_bsdshortinfo proc;
4092
if (proc_pidinfo(j->p, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) > 0) {
4093
char newlabel[1000];
4094
4095
snprintf(newlabel, sizeof(newlabel), "%p.anonymous.%s", j, proc.pbsi_comm);
4096
4097
job_log(j, LOG_INFO, "Program changed. Updating the label to: %s", newlabel);
4098
4099
LIST_REMOVE(j, label_hash_sle);
4100
strcpy((char *)j->label, newlabel);
4101
4102
jobmgr_t where2put = root_jobmgr;
4103
if (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
4104
where2put = j->mgr;
4105
}
4106
LIST_INSERT_HEAD(&where2put->label_hash[hash_label(j->label)], j, label_hash_sle);
4107
} else if (errno != ESRCH) {
4108
(void)job_assumes_zero(j, errno);
4109
}
4110
} else {
4111
if (j->spawn_reply_port) {
4112
errno = job_mig_spawn2_reply(j->spawn_reply_port, BOOTSTRAP_SUCCESS, j->p, j->exit_status_port);
4113
if (errno) {
4114
if (errno != MACH_SEND_INVALID_DEST) {
4115
(void)job_assumes_zero(j, errno);
4116
}
4117
(void)job_assumes_zero(j, launchd_mport_close_recv(j->exit_status_port));
4118
}
4119
4120
j->spawn_reply_port = MACH_PORT_NULL;
4121
j->exit_status_port = MACH_PORT_NULL;
4122
}
4123
4124
if (j->xpc_service && j->did_exec) {
4125
j->xpcproxy_did_exec = true;
4126
}
4127
4128
j->did_exec = true;
4129
job_log(j, LOG_DEBUG, "Program changed");
4130
}
4131
}
4132
4133
if (fflags & NOTE_FORK) {
4134
job_log(j, LOG_DEBUG, "fork()ed%s", program_changed ? ". For this message only: We don't know whether this event happened before or after execve()." : "");
4135
job_log_children_without_exec(j);
4136
}
4137
4138
if (fflags & NOTE_EXIT) {
4139
if (kev->data & NOTE_EXIT_DECRYPTFAIL) {
4140
j->fpfail = true;
4141
job_log(j, LOG_WARNING, "FairPlay decryption failed on binary for job.");
4142
} else if (kev->data & NOTE_EXIT_MEMORY) {
4143
j->jettisoned = true;
4144
job_log(j, LOG_INFO, "Job was killed due to memory pressure.");
4145
}
4146
4147
job_reap(j);
4148
4149
if (j->anonymous) {
4150
job_remove(j);
4151
j = NULL;
4152
} else {
4153
struct waiting4attach *w4ai = NULL;
4154
struct waiting4attach *w4ait = NULL;
4155
LIST_FOREACH_SAFE(w4ai, &_launchd_domain_waiters, le, w4ait) {
4156
if (w4ai->dest == (pid_t)kev->ident) {
4157
waiting4attach_delete(j->mgr, w4ai);
4158
}
4159
}
4160
4161
(void)job_dispatch(j, false);
4162
}
4163
}
4164
}
4165
4166
void
4167
job_callback_timer(job_t j, void *ident)
4168
{
4169
if (j == ident) {
4170
job_log(j, LOG_DEBUG, "j == ident (%p)", ident);
4171
job_dispatch(j, true);
4172
} else if (&j->semaphores == ident) {
4173
job_log(j, LOG_DEBUG, "&j->semaphores == ident (%p)", ident);
4174
job_dispatch(j, false);
4175
} else if (&j->start_interval == ident) {
4176
job_log(j, LOG_DEBUG, "&j->start_interval == ident (%p)", ident);
4177
j->start_pending = true;
4178
job_dispatch(j, false);
4179
} else if (&j->exit_timeout == ident) {
4180
if (!job_assumes(j, j->p != 0)) {
4181
return;
4182
}
4183
4184
if (j->sent_sigkill) {
4185
uint64_t td = runtime_get_nanoseconds_since(j->sent_signal_time);
4186
4187
td /= NSEC_PER_SEC;
4188
td -= j->clean_kill ? 0 : j->exit_timeout;
4189
4190
job_log(j, LOG_WARNING | LOG_CONSOLE, "Job has not died after being %skilled %llu seconds ago. Simulating exit.", j->clean_kill ? "cleanly " : "", td);
4191
j->workaround9359725 = true;
4192
4193
// This basically has to be done off the main thread. We have no
4194
// mechanism for draining the main queue in our run loop (like CF
4195
// does), and the kevent mechanism wants an object to be associated
4196
// as the callback. So we just create a dispatch source and reap the
4197
// errant PID whenever we can. Note that it is not safe for us to do
4198
// any logging in this block, since logging requires exclusive
4199
// access to global data structures that is only protected by the
4200
// main thread.
4201
dispatch_source_t hack_13570156 = dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC, j->p, DISPATCH_PROC_EXIT, dispatch_get_global_queue(0, 0));
4202
dispatch_source_set_event_handler(hack_13570156, ^{
4203
pid_t pid = (pid_t)dispatch_source_get_handle(hack_13570156);
4204
4205
int status = 0;
4206
(void)waitpid(pid, &status, 0);
4207
dispatch_release(hack_13570156);
4208
});
4209
4210
dispatch_resume(hack_13570156);
4211
4212
if (launchd_trap_sigkill_bugs) {
4213
job_log(j, LOG_NOTICE | LOG_CONSOLE, "Trapping into kernel debugger. You can continue the machine after it has been debugged, and shutdown will proceed normally.");
4214
(void)job_assumes_zero(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER));
4215
}
4216
4217
struct kevent bogus_exit;
4218
EV_SET(&bogus_exit, j->p, EVFILT_PROC, 0, NOTE_EXIT, 0, 0);
4219
jobmgr_callback(j->mgr, &bogus_exit);
4220
} else {
4221
if (unlikely(j->debug_before_kill)) {
4222
job_log(j, LOG_NOTICE, "Exit timeout elapsed. Entering the kernel debugger");
4223
(void)job_assumes_zero(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER));
4224
}
4225
4226
job_log(j, LOG_WARNING | LOG_CONSOLE, "Exit timeout elapsed (%u seconds). Killing", j->exit_timeout);
4227
job_kill(j);
4228
}
4229
} else {
4230
job_log(j, LOG_ERR, "Unrecognized job timer callback: %p", ident);
4231
}
4232
}
4233
4234
void
4235
job_callback_read(job_t j, int ident)
4236
{
4237
if (ident == j->stdin_fd) {
4238
job_dispatch(j, true);
4239
} else {
4240
socketgroup_callback(j);
4241
}
4242
}
4243
4244
void
4245
jobmgr_reap_bulk(jobmgr_t jm, struct kevent *kev)
4246
{
4247
jobmgr_t jmi;
4248
job_t j;
4249
4250
SLIST_FOREACH(jmi, &jm->submgrs, sle) {
4251
jobmgr_reap_bulk(jmi, kev);
4252
}
4253
4254
if ((j = jobmgr_find_by_pid(jm, (pid_t)kev->ident, false))) {
4255
kev->udata = j;
4256
job_callback(j, kev);
4257
}
4258
}
4259
4260
void
4261
jobmgr_callback(void *obj, struct kevent *kev)
4262
{
4263
jobmgr_t jm = obj;
4264
4265
#if TARGET_OS_EMBEDDED
4266
int flag2check = VQ_MOUNT;
4267
#else
4268
int flag2check = VQ_UPDATE;
4269
#endif
4270
4271
switch (kev->filter) {
4272
case EVFILT_PROC:
4273
jobmgr_reap_bulk(jm, kev);
4274
root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
4275
break;
4276
case EVFILT_SIGNAL:
4277
switch (kev->ident) {
4278
case SIGTERM:
4279
jobmgr_log(jm, LOG_DEBUG, "Got SIGTERM. Shutting down.");
4280
return launchd_shutdown();
4281
case SIGUSR1:
4282
return calendarinterval_callback();
4283
case SIGUSR2:
4284
// Turn on all logging.
4285
launchd_log_perf = true;
4286
launchd_log_debug = true;
4287
launchd_log_shutdown = true;
4288
/* Hopefully /var is available by this point. If not, uh, oh well.
4289
* It's just a debugging facility.
4290
*/
4291
return jobmgr_log_perf_statistics(jm, false);
4292
case SIGINFO:
4293
return jobmgr_log_perf_statistics(jm, true);
4294
default:
4295
jobmgr_log(jm, LOG_ERR, "Unrecognized signal: %lu: %s", kev->ident, strsignal(kev->ident));
4296
}
4297
break;
4298
case EVFILT_FS:
4299
if (kev->fflags & flag2check) {
4300
if (!launchd_var_available) {
4301
struct stat sb;
4302
if (stat("/var/log", &sb) == 0 && (sb.st_mode & S_IWUSR)) {
4303
launchd_var_available = true;
4304
}
4305
}
4306
} else if (kev->fflags & VQ_MOUNT) {
4307
jobmgr_dispatch_all(jm, true);
4308
}
4309
jobmgr_dispatch_all_semaphores(jm);
4310
break;
4311
case EVFILT_TIMER:
4312
if (kev->ident == (uintptr_t)&sorted_calendar_events) {
4313
calendarinterval_callback();
4314
} else if (kev->ident == (uintptr_t)jm) {
4315
jobmgr_log(jm, LOG_DEBUG, "Shutdown timer firing.");
4316
jobmgr_still_alive_with_check(jm);
4317
} else if (kev->ident == (uintptr_t)&jm->reboot_flags) {
4318
jobmgr_do_garbage_collection(jm);
4319
} else if (kev->ident == (uintptr_t)&launchd_runtime_busy_time) {
4320
jobmgr_log(jm, LOG_DEBUG, "Idle exit timer fired. Shutting down.");
4321
if (jobmgr_assumes_zero(jm, runtime_busy_cnt) == 0) {
4322
return launchd_shutdown();
4323
}
4324
#if HAVE_SYSTEMSTATS
4325
} else if (kev->ident == (uintptr_t)systemstats_timer_callback) {
4326
systemstats_timer_callback();
4327
#endif
4328
}
4329
break;
4330
case EVFILT_VNODE:
4331
if (kev->ident == (uintptr_t)s_no_hang_fd) {
4332
int _no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
4333
if (unlikely(_no_hang_fd != -1)) {
4334
jobmgr_log(root_jobmgr, LOG_DEBUG, "/dev/autofs_nowait has appeared!");
4335
(void)jobmgr_assumes_zero_p(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL));
4336
(void)jobmgr_assumes_zero_p(root_jobmgr, runtime_close(s_no_hang_fd));
4337
s_no_hang_fd = _fd(_no_hang_fd);
4338
}
4339
} else if (pid1_magic && launchd_console && kev->ident == (uintptr_t)fileno(launchd_console)) {
4340
int cfd = -1;
4341
if (jobmgr_assumes_zero_p(jm, cfd = open(_PATH_CONSOLE, O_WRONLY | O_NOCTTY)) != -1) {
4342
_fd(cfd);
4343
if (!(launchd_console = fdopen(cfd, "w"))) {
4344
(void)jobmgr_assumes_zero(jm, errno);
4345
(void)close(cfd);
4346
}
4347
}
4348
}
4349
break;
4350
default:
4351
jobmgr_log(jm, LOG_ERR, "Unrecognized kevent filter: %hd", kev->filter);
4352
}
4353
}
4354
4355
void
4356
job_callback(void *obj, struct kevent *kev)
4357
{
4358
job_t j = obj;
4359
4360
job_log(j, LOG_DEBUG, "Dispatching kevent callback.");
4361
4362
switch (kev->filter) {
4363
case EVFILT_PROC:
4364
return job_callback_proc(j, kev);
4365
case EVFILT_TIMER:
4366
return job_callback_timer(j, (void *) kev->ident);
4367
case EVFILT_READ:
4368
return job_callback_read(j, (int) kev->ident);
4369
case EVFILT_MACHPORT:
4370
return (void)job_dispatch(j, true);
4371
default:
4372
job_log(j, LOG_ERR, "Unrecognized job callback filter: %hd", kev->filter);
4373
}
4374
}
4375
4376
void
4377
job_start(job_t j)
4378
{
4379
uint64_t td;
4380
int spair[2];
4381
int execspair[2];
4382
char nbuf[64];
4383
pid_t c;
4384
bool sipc = false;
4385
u_int proc_fflags = NOTE_EXIT|NOTE_FORK|NOTE_EXEC|NOTE_EXIT_DETAIL|NOTE_EXITSTATUS;
4386
4387
if (!job_assumes(j, j->mgr != NULL)) {
4388
return;
4389
}
4390
4391
if (unlikely(job_active(j))) {
4392
job_log(j, LOG_DEBUG, "Already started");
4393
return;
4394
}
4395
4396
if (!LIST_EMPTY(&j->mgr->attaches)) {
4397
job_log(j, LOG_DEBUG, "Looking for attachments for job: %s", j->label);
4398
(void)waiting4attach_find(j->mgr, j);
4399
}
4400
4401
/*
4402
* Some users adjust the wall-clock and then expect software to not notice.
4403
* Therefore, launchd must use an absolute clock instead of the wall clock
4404
* wherever possible.
4405
*/
4406
td = runtime_get_nanoseconds_since(j->start_time);
4407
td /= NSEC_PER_SEC;
4408
4409
if (j->start_time && (td < j->min_run_time) && !j->legacy_mach_job && !j->inetcompat && !j->unthrottle) {
4410
time_t respawn_delta = j->min_run_time - (uint32_t)td;
4411
/* We technically should ref-count throttled jobs to prevent idle exit,
4412
* but we're not directly tracking the 'throttled' state at the moment.
4413
*/
4414
job_log(j, LOG_NOTICE, "Throttling respawn: Will start in %ld seconds", respawn_delta);
4415
(void)job_assumes_zero_p(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, respawn_delta, j));
4416
job_ignore(j);
4417
return;
4418
}
4419
4420
if (likely(!j->legacy_mach_job)) {
4421
sipc = ((!SLIST_EMPTY(&j->sockets) || !SLIST_EMPTY(&j->machservices)) && !j->deny_job_creation) || j->embedded_god;
4422
}
4423
4424
if (sipc) {
4425
(void)job_assumes_zero_p(j, socketpair(AF_UNIX, SOCK_STREAM, 0, spair));
4426
}
4427
4428
(void)job_assumes_zero_p(j, socketpair(AF_UNIX, SOCK_STREAM, 0, execspair));
4429
4430
switch (c = runtime_fork(j->weird_bootstrap ? j->j_port : j->mgr->jm_port)) {
4431
case -1:
4432
job_log_error(j, LOG_ERR, "fork() failed, will try again in one second");
4433
(void)job_assumes_zero_p(j, kevent_mod((uintptr_t)j, EVFILT_TIMER, EV_ADD|EV_ONESHOT, NOTE_SECONDS, 1, j));
4434
job_ignore(j);
4435
4436
(void)job_assumes_zero(j, runtime_close(execspair[0]));
4437
(void)job_assumes_zero(j, runtime_close(execspair[1]));
4438
if (sipc) {
4439
(void)job_assumes_zero(j, runtime_close(spair[0]));
4440
(void)job_assumes_zero(j, runtime_close(spair[1]));
4441
}
4442
break;
4443
case 0:
4444
if (unlikely(_vproc_post_fork_ping())) {
4445
_exit(EXIT_FAILURE);
4446
}
4447
4448
(void)job_assumes_zero(j, runtime_close(execspair[0]));
4449
// wait for our parent to say they've attached a kevent to us
4450
read(_fd(execspair[1]), &c, sizeof(c));
4451
4452
if (sipc) {
4453
(void)job_assumes_zero(j, runtime_close(spair[0]));
4454
snprintf(nbuf, sizeof(nbuf), "%d", spair[1]);
4455
setenv(LAUNCHD_TRUSTED_FD_ENV, nbuf, 1);
4456
}
4457
job_start_child(j);
4458
break;
4459
default:
4460
j->start_time = runtime_get_opaque_time();
4461
4462
job_log(j, LOG_DEBUG, "Started as PID: %u", c);
4463
4464
j->did_exec = false;
4465
j->fpfail = false;
4466
j->jettisoned = false;
4467
j->xpcproxy_did_exec = false;
4468
j->checkedin = false;
4469
j->start_pending = false;
4470
j->reaped = false;
4471
j->crashed = false;
4472
j->stopped = false;
4473
j->workaround9359725 = false;
4474
j->implicit_reap = false;
4475
j->unthrottle = false;
4476
if (j->needs_kickoff) {
4477
j->needs_kickoff = false;
4478
4479
if (SLIST_EMPTY(&j->semaphores)) {
4480
j->ondemand = false;
4481
}
4482
}
4483
4484
if (j->has_console) {
4485
launchd_wsp = c;
4486
}
4487
4488
job_log(j, LOG_PERF, "Job started.");
4489
runtime_add_ref();
4490
total_children++;
4491
LIST_INSERT_HEAD(&j->mgr->active_jobs[ACTIVE_JOB_HASH(c)], j, pid_hash_sle);
4492
LIST_INSERT_HEAD(&managed_actives[ACTIVE_JOB_HASH(c)], j, global_pid_hash_sle);
4493
j->p = c;
4494
4495
struct proc_uniqidentifierinfo info;
4496
if (proc_pidinfo(c, PROC_PIDUNIQIDENTIFIERINFO, 0, &info, PROC_PIDUNIQIDENTIFIERINFO_SIZE) != 0) {
4497
// ignore errors here, kevent_mod below will catch them and clean up
4498
j->uniqueid = info.p_uniqueid;
4499
}
4500
4501
j->mgr->normal_active_cnt++;
4502
j->fork_fd = _fd(execspair[0]);
4503
(void)job_assumes_zero(j, runtime_close(execspair[1]));
4504
if (sipc) {
4505
(void)job_assumes_zero(j, runtime_close(spair[1]));
4506
ipc_open(_fd(spair[0]), j);
4507
}
4508
if (kevent_mod(c, EVFILT_PROC, EV_ADD, proc_fflags, 0, root_jobmgr ? root_jobmgr : j->mgr) != -1) {
4509
job_ignore(j);
4510
} else {
4511
if (errno == ESRCH) {
4512
job_log(j, LOG_ERR, "Child was killed before we could attach a kevent.");
4513
} else {
4514
(void)job_assumes(j, errno == ESRCH);
4515
}
4516
job_reap(j);
4517
4518
/* If we have reaped this job within this same run loop pass, then
4519
* it will be currently ignored. So if there's a failure to attach a
4520
* kevent, we need to make sure that we watch the job so that we can
4521
* respawn it.
4522
*
4523
* See <rdar://problem/10140809>.
4524
*/
4525
job_watch(j);
4526
}
4527
4528
#if HAVE_SYSTEMSTATS
4529
if (systemstats_is_enabled()) {
4530
/* We don't really *need* to make the full rusage call -- it
4531
* will be mostly 0s and very small numbers. We only need
4532
* ri_proc_start_abstime, because that's how we disambiguiate
4533
* PIDs when they wrap around; and the UUID.
4534
* In the future we should use the 64-bit process unique ID,
4535
* so there's nothing to disambiguiate, and skip the full
4536
* rusage call here.
4537
*
4538
* Well, the future is now.
4539
*/
4540
if (_systemstats_get_property(SYSTEMSTATS_API_VERSION, SYSTEMSTATS_WRITER_launchd, SYSTEMSTATS_PROPERTY_LAUNCHD_SHOULD_LOG_JOB_START)) {
4541
job_log_perf_statistics(j, NULL, -3);
4542
}
4543
}
4544
#endif
4545
j->wait4debugger_oneshot = false;
4546
if (likely(!j->stall_before_exec)) {
4547
job_uncork_fork(j);
4548
}
4549
break;
4550
}
4551
}
4552
4553
void
4554
job_start_child(job_t j)
4555
{
4556
typeof(posix_spawn) *psf;
4557
const char *file2exec = "/usr/libexec/launchproxy";
4558
const char **argv;
4559
posix_spawnattr_t spattr;
4560
int gflags = GLOB_NOSORT|GLOB_NOCHECK|GLOB_TILDE|GLOB_DOOFFS;
4561
glob_t g;
4562
short spflags = POSIX_SPAWN_SETEXEC;
4563
int psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND;
4564
size_t binpref_out_cnt = 0;
4565
size_t i;
4566
4567
(void)job_assumes_zero(j, posix_spawnattr_init(&spattr));
4568
4569
job_setup_attributes(j);
4570
4571
bool use_xpcproxy = false;
4572
struct waiting4attach *w4a = waiting4attach_find(j->mgr, j);
4573
if (w4a) {
4574
(void)setenv(XPC_SERVICE_ENV_ATTACHED, "1", 1);
4575
if (!j->xpc_service) {
4576
use_xpcproxy = true;
4577
}
4578
}
4579
4580
if (use_xpcproxy) {
4581
argv = alloca(3 * sizeof(char *));
4582
argv[0] = "/usr/libexec/xpcproxy";
4583
argv[1] = "-debug";
4584
argv[2] = NULL;
4585
4586
file2exec = argv[0];
4587
} else if (unlikely(j->argv && j->globargv)) {
4588
g.gl_offs = 1;
4589
for (i = 0; i < j->argc; i++) {
4590
if (i > 0) {
4591
gflags |= GLOB_APPEND;
4592
}
4593
if (glob(j->argv[i], gflags, NULL, &g) != 0) {
4594
job_log_error(j, LOG_ERR, "glob(\"%s\")", j->argv[i]);
4595
exit(EXIT_FAILURE);
4596
}
4597
}
4598
g.gl_pathv[0] = (char *)file2exec;
4599
argv = (const char **)g.gl_pathv;
4600
} else if (likely(j->argv)) {
4601
argv = alloca((j->argc + 2) * sizeof(char *));
4602
argv[0] = file2exec;
4603
for (i = 0; i < j->argc; i++) {
4604
argv[i + 1] = j->argv[i];
4605
}
4606
argv[i + 1] = NULL;
4607
} else {
4608
argv = alloca(3 * sizeof(char *));
4609
argv[0] = file2exec;
4610
argv[1] = j->prog;
4611
argv[2] = NULL;
4612
}
4613
4614
if (likely(!(j->inetcompat || use_xpcproxy))) {
4615
argv++;
4616
}
4617
4618
if (unlikely(j->wait4debugger || j->wait4debugger_oneshot)) {
4619
if (!j->app) {
4620
job_log(j, LOG_WARNING, "Spawned and waiting for the debugger to attach before continuing...");
4621
}
4622
spflags |= POSIX_SPAWN_START_SUSPENDED;
4623
}
4624
4625
#if !TARGET_OS_EMBEDDED
4626
if (unlikely(j->disable_aslr)) {
4627
spflags |= _POSIX_SPAWN_DISABLE_ASLR;
4628
}
4629
#endif
4630
spflags |= j->pstype;
4631
4632
(void)job_assumes_zero(j, posix_spawnattr_setflags(&spattr, spflags));
4633
if (unlikely(j->j_binpref_cnt)) {
4634
(void)job_assumes_zero(j, posix_spawnattr_setbinpref_np(&spattr, j->j_binpref_cnt, j->j_binpref, &binpref_out_cnt));
4635
(void)job_assumes(j, binpref_out_cnt == j->j_binpref_cnt);
4636
}
4637
4638
psproctype = j->psproctype;
4639
(void)job_assumes_zero(j, posix_spawnattr_setprocesstype_np(&spattr, psproctype));
4640
4641
#if TARGET_OS_EMBEDDED
4642
/* Set jetsam attributes. POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY guards
4643
* against a race which arises if, during spawn, an initial jetsam property
4644
* update occurs before the values below are applied. In this case, the flag
4645
* ensures that the subsequent change is ignored; the explicit update should
4646
* be given priority.
4647
*/
4648
(void)job_assumes_zero(j, posix_spawnattr_setjetsam(&spattr,
4649
POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY | (j->jetsam_memory_limit_background ? POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND : 0),
4650
j->jetsam_priority, j->jetsam_memlimit));
4651
#endif
4652
4653
mach_port_array_t sports = NULL;
4654
mach_msg_type_number_t sports_cnt = 0;
4655
kern_return_t kr = vproc_mig_get_listener_port_rights(bootstrap_port, &sports, &sports_cnt);
4656
if (kr == 0 && sports_cnt) {
4657
/* For some reason, this SPI takes a count as a signed quantity. */
4658
(void)posix_spawnattr_set_importancewatch_port_np(&spattr, (int)sports_cnt, sports);
4659
4660
/* All "count" parameters in MIG are counts of the array. So an array of
4661
* mach_port_t containing 10 elements will have a count of ten, but it
4662
* will occupy 40 bytes. So we must do the multiplication here to pass
4663
* the correct size.
4664
*
4665
* Note that we do NOT release the send rights. We need them to be valid
4666
* at the time they are passed to posix_spawn(2). When we exec(3) using
4667
* posix_spawn(2), they'll be cleaned up anyway.
4668
*/
4669
mig_deallocate((vm_address_t)sports, sports_cnt * sizeof(sports[0]));
4670
} else if (kr != BOOTSTRAP_UNKNOWN_SERVICE) {
4671
(void)job_assumes_zero(j, kr);
4672
}
4673
4674
#if TARGET_OS_EMBEDDED
4675
if (!j->app || j->system_app) {
4676
(void)job_assumes_zero(j, posix_spawnattr_setcpumonitor_default(&spattr));
4677
}
4678
#else
4679
(void)job_assumes_zero(j, posix_spawnattr_setcpumonitor_default(&spattr));
4680
#endif
4681
4682
#if !TARGET_OS_EMBEDDED
4683
struct task_qos_policy qosinfo = {
4684
.task_latency_qos_tier = LATENCY_QOS_LAUNCH_DEFAULT_TIER,
4685
.task_throughput_qos_tier = THROUGHPUT_QOS_LAUNCH_DEFAULT_TIER,
4686
};
4687
4688
if (!j->legacy_timers) {
4689
kr = task_policy_set(mach_task_self(), TASK_BASE_QOS_POLICY, (task_policy_t)&qosinfo, TASK_QOS_POLICY_COUNT);
4690
(void)job_assumes_zero_p(j, kr);
4691
}
4692
#endif
4693
4694
#if HAVE_RESPONSIBILITY
4695
/* Specify which process is responsible for the new job. Per-app XPC
4696
* services are the responsibility of the app. Other processes are
4697
* responsible for themselves. This decision is final and also applies
4698
* to the process's children, so don't initialize responsibility when
4699
* starting a per-user launchd.
4700
*/
4701
if (j->mgr->req_pid) {
4702
responsibility_init2(j->mgr->req_pid, NULL);
4703
} else if (!j->per_user) {
4704
responsibility_init2(getpid(), j->prog ? j->prog : j->argv[0]);
4705
}
4706
#endif
4707
4708
#if HAVE_QUARANTINE
4709
if (j->quarantine_data) {
4710
qtn_proc_t qp;
4711
4712
if (job_assumes(j, qp = qtn_proc_alloc())) {
4713
if (job_assumes_zero(j, qtn_proc_init_with_data(qp, j->quarantine_data, j->quarantine_data_sz) == 0)) {
4714
(void)job_assumes_zero(j, qtn_proc_apply_to_self(qp));
4715
}
4716
}
4717
}
4718
#endif
4719
4720
#if HAVE_SANDBOX
4721
#if TARGET_OS_EMBEDDED
4722
struct sandbox_spawnattrs sbattrs;
4723
if (j->seatbelt_profile || j->container_identifier) {
4724
sandbox_spawnattrs_init(&sbattrs);
4725
if (j->seatbelt_profile) {
4726
sandbox_spawnattrs_setprofilename(&sbattrs, j->seatbelt_profile);
4727
}
4728
if (j->container_identifier) {
4729
sandbox_spawnattrs_setcontainer(&sbattrs, j->container_identifier);
4730
}
4731
(void)job_assumes_zero(j, posix_spawnattr_setmacpolicyinfo_np(&spattr, "Sandbox", &sbattrs, sizeof(sbattrs)));
4732
}
4733
#else
4734
if (j->seatbelt_profile) {
4735
char *seatbelt_err_buf = NULL;
4736
4737
if (job_assumes_zero_p(j, sandbox_init(j->seatbelt_profile, j->seatbelt_flags, &seatbelt_err_buf)) == -1) {
4738
if (seatbelt_err_buf) {
4739
job_log(j, LOG_ERR, "Sandbox failed to init: %s", seatbelt_err_buf);
4740
}
4741
goto out_bad;
4742
}
4743
}
4744
#endif
4745
#endif
4746
4747
psf = j->prog ? posix_spawn : posix_spawnp;
4748
4749
if (likely(!(j->inetcompat || use_xpcproxy))) {
4750
file2exec = j->prog ? j->prog : argv[0];
4751
}
4752
4753
errno = psf(NULL, file2exec, NULL, &spattr, (char *const *)argv, environ);
4754
4755
#if HAVE_SANDBOX && !TARGET_OS_EMBEDDED
4756
out_bad:
4757
#endif
4758
_exit(errno);
4759
}
4760
4761
void
4762
jobmgr_export_env_from_other_jobs(jobmgr_t jm, launch_data_t dict)
4763
{
4764
launch_data_t tmp;
4765
struct envitem *ei;
4766
job_t ji;
4767
4768
if (jm->parentmgr) {
4769
jobmgr_export_env_from_other_jobs(jm->parentmgr, dict);
4770
} else {
4771
char **tmpenviron = environ;
4772
for (; *tmpenviron; tmpenviron++) {
4773
char envkey[1024];
4774
launch_data_t s = launch_data_alloc(LAUNCH_DATA_STRING);
4775
launch_data_set_string(s, strchr(*tmpenviron, '=') + 1);
4776
strncpy(envkey, *tmpenviron, sizeof(envkey));
4777
*(strchr(envkey, '=')) = '\0';
4778
launch_data_dict_insert(dict, s, envkey);
4779
}
4780
}
4781
4782
LIST_FOREACH(ji, &jm->jobs, sle) {
4783
SLIST_FOREACH(ei, &ji->global_env, sle) {
4784
if ((tmp = launch_data_new_string(ei->value))) {
4785
launch_data_dict_insert(dict, tmp, ei->key);
4786
}
4787
}
4788
}
4789
}
4790
4791
void
4792
jobmgr_setup_env_from_other_jobs(jobmgr_t jm)
4793
{
4794
struct envitem *ei;
4795
job_t ji;
4796
4797
if (jm->parentmgr) {
4798
jobmgr_setup_env_from_other_jobs(jm->parentmgr);
4799
}
4800
4801
LIST_FOREACH(ji, &jm->global_env_jobs, global_env_sle) {
4802
SLIST_FOREACH(ei, &ji->global_env, sle) {
4803
setenv(ei->key, ei->value, 1);
4804
}
4805
}
4806
}
4807
4808
void
4809
job_log_pids_with_weird_uids(job_t j)
4810
{
4811
size_t len = sizeof(pid_t) * get_kern_max_proc();
4812
pid_t *pids = NULL;
4813
uid_t u = j->mach_uid;
4814
int i = 0, kp_cnt = 0;
4815
4816
if (!launchd_apple_internal) {
4817
return;
4818
}
4819
4820
pids = malloc(len);
4821
if (!job_assumes(j, pids != NULL)) {
4822
return;
4823
}
4824
4825
runtime_ktrace(RTKT_LAUNCHD_FINDING_WEIRD_UIDS, j->p, u, 0);
4826
4827
/* libproc actually has some serious performance drawbacks when used over sysctl(3) in
4828
* scenarios like this. Whereas sysctl(3) can give us back all the kinfo_proc's in
4829
* one kernel call, libproc requires that we get a list of PIDs we're interested in
4830
* (in this case, all PIDs on the system) and then get a single proc_bsdshortinfo
4831
* struct back in a single call for each one.
4832
*
4833
* This kind of thing is also more inherently racy than sysctl(3). While sysctl(3)
4834
* returns a snapshot, it returns the whole shebang at once. Any PIDs given to us by
4835
* libproc could go stale before we call proc_pidinfo().
4836
*
4837
* Note that proc_list*() APIs return the number of PIDs given back, not the number
4838
* of bytes written to the buffer.
4839
*/
4840
if (job_assumes_zero_p(j, (kp_cnt = proc_listallpids(pids, len))) == -1) {
4841
goto out;
4842
}
4843
4844
for (i = 0; i < kp_cnt; i++) {
4845
struct proc_bsdshortinfo proc;
4846
/* We perhaps should not log a bug here if we get ESRCH back, due to the race
4847
* detailed above.
4848
*/
4849
if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
4850
if (errno != ESRCH) {
4851
(void)job_assumes_zero(j, errno);
4852
}
4853
continue;
4854
}
4855
4856
uid_t i_euid = proc.pbsi_uid;
4857
uid_t i_uid = proc.pbsi_ruid;
4858
uid_t i_svuid = proc.pbsi_svuid;
4859
pid_t i_pid = pids[i];
4860
4861
if (i_euid != u && i_uid != u && i_svuid != u) {
4862
continue;
4863
}
4864
4865
job_log(j, LOG_ERR, "PID %u \"%s\" has no account to back it! Real/effective/saved UIDs: %u/%u/%u", i_pid, proc.pbsi_comm, i_uid, i_euid, i_svuid);
4866
4867
// Temporarily disabled due to 5423935 and 4946119.
4868
#if 0
4869
// Ask the accountless process to exit.
4870
(void)job_assumes_zero_p(j, kill2(i_pid, SIGTERM));
4871
#endif
4872
}
4873
4874
out:
4875
free(pids);
4876
}
4877
4878
static struct passwd *
4879
job_getpwnam(job_t j, const char *name)
4880
{
4881
/*
4882
* methodology for system daemons
4883
*
4884
* first lookup user record without any opendirectoryd interaction,
4885
* we don't know what interprocess dependencies might be in flight.
4886
* if that fails, we re-enable opendirectoryd interaction and
4887
* re-issue the lookup. We have to disable the libinfo L1 cache
4888
* otherwise libinfo will return the negative cache entry on the retry
4889
*/
4890
#if !TARGET_OS_EMBEDDED
4891
struct passwd *pw = NULL;
4892
4893
if (pid1_magic && j->mgr == root_jobmgr) {
4894
// 1 == SEARCH_MODULE_FLAG_DISABLED
4895
si_search_module_set_flags("ds", 1);
4896
gL1CacheEnabled = false;
4897
4898
pw = getpwnam(name);
4899
si_search_module_set_flags("ds", 0);
4900
}
4901
4902
if (pw == NULL) {
4903
pw = getpwnam(name);
4904
}
4905
4906
return pw;
4907
#else
4908
#pragma unused (j)
4909
return getpwnam(name);
4910
#endif
4911
}
4912
4913
static struct group *
4914
job_getgrnam(job_t j, const char *name)
4915
{
4916
#if !TARGET_OS_EMBEDDED
4917
struct group *gr = NULL;
4918
4919
if (pid1_magic && j->mgr == root_jobmgr) {
4920
si_search_module_set_flags("ds", 1);
4921
gL1CacheEnabled = false;
4922
4923
gr = getgrnam(name);
4924
4925
si_search_module_set_flags("ds", 0);
4926
}
4927
4928
if (gr == NULL) {
4929
gr = getgrnam(name);
4930
}
4931
4932
return gr;
4933
#else
4934
#pragma unused (j)
4935
return getgrnam(name);
4936
#endif
4937
}
4938
4939
void
4940
job_postfork_test_user(job_t j)
4941
{
4942
// This function is all about 5201578
4943
4944
const char *home_env_var = getenv("HOME");
4945
const char *user_env_var = getenv("USER");
4946
const char *logname_env_var = getenv("LOGNAME");
4947
uid_t tmp_uid, local_uid = getuid();
4948
gid_t tmp_gid, local_gid = getgid();
4949
char shellpath[PATH_MAX];
4950
char homedir[PATH_MAX];
4951
char loginname[2000];
4952
struct passwd *pwe;
4953
4954
4955
if (!job_assumes(j, home_env_var && user_env_var && logname_env_var
4956
&& strcmp(user_env_var, logname_env_var) == 0)) {
4957
goto out_bad;
4958
}
4959
4960
if ((pwe = job_getpwnam(j, user_env_var)) == NULL) {
4961
job_log(j, LOG_ERR, "The account \"%s\" has been deleted out from under us!", user_env_var);
4962
goto out_bad;
4963
}
4964
4965
/*
4966
* We must copy the results of getpw*().
4967
*
4968
* Why? Because subsequent API calls may call getpw*() as a part of
4969
* their implementation. Since getpw*() returns a [now thread scoped]
4970
* global, we must therefore cache the results before continuing.
4971
*/
4972
4973
tmp_uid = pwe->pw_uid;
4974
tmp_gid = pwe->pw_gid;
4975
4976
strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
4977
strlcpy(loginname, pwe->pw_name, sizeof(loginname));
4978
strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
4979
4980
if (strcmp(loginname, logname_env_var) != 0) {
4981
job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "USER");
4982
goto out_bad;
4983
}
4984
if (strcmp(homedir, home_env_var) != 0) {
4985
job_log(j, LOG_ERR, "The %s environmental variable changed out from under us!", "HOME");
4986
goto out_bad;
4987
}
4988
if (local_uid != tmp_uid) {
4989
job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
4990
'U', tmp_uid, local_uid);
4991
goto out_bad;
4992
}
4993
if (local_gid != tmp_gid) {
4994
job_log(j, LOG_ERR, "The %cID of the account (%u) changed out from under us (%u)!",
4995
'G', tmp_gid, local_gid);
4996
goto out_bad;
4997
}
4998
4999
return;
5000
out_bad:
5001
#if 0
5002
(void)job_assumes_zero_p(j, kill2(getppid(), SIGTERM));
5003
_exit(EXIT_FAILURE);
5004
#else
5005
job_log(j, LOG_WARNING, "In a future build of the OS, this error will be fatal.");
5006
#endif
5007
}
5008
5009
void
5010
job_postfork_become_user(job_t j)
5011
{
5012
char loginname[2000];
5013
char tmpdirpath[PATH_MAX];
5014
char shellpath[PATH_MAX];
5015
char homedir[PATH_MAX];
5016
struct passwd *pwe;
5017
size_t r;
5018
gid_t desired_gid = -1;
5019
uid_t desired_uid = -1;
5020
5021
if (getuid() != 0) {
5022
return job_postfork_test_user(j);
5023
}
5024
5025
/*
5026
* I contend that having UID == 0 and GID != 0 is of dubious value.
5027
* Nevertheless, this used to work in Tiger. See: 5425348
5028
*/
5029
if (j->groupname && !j->username) {
5030
j->username = "root";
5031
}
5032
5033
if (j->username) {
5034
if ((pwe = job_getpwnam(j, j->username)) == NULL) {
5035
job_log(j, LOG_ERR, "getpwnam(\"%s\") failed", j->username);
5036
_exit(ESRCH);
5037
}
5038
} else if (j->mach_uid) {
5039
if ((pwe = getpwuid(j->mach_uid)) == NULL) {
5040
job_log(j, LOG_ERR, "getpwuid(\"%u\") failed", j->mach_uid);
5041
job_log_pids_with_weird_uids(j);
5042
_exit(ESRCH);
5043
}
5044
} else {
5045
return;
5046
}
5047
5048
/*
5049
* We must copy the results of getpw*().
5050
*
5051
* Why? Because subsequent API calls may call getpw*() as a part of
5052
* their implementation. Since getpw*() returns a [now thread scoped]
5053
* global, we must therefore cache the results before continuing.
5054
*/
5055
5056
desired_uid = pwe->pw_uid;
5057
desired_gid = pwe->pw_gid;
5058
5059
strlcpy(shellpath, pwe->pw_shell, sizeof(shellpath));
5060
strlcpy(loginname, pwe->pw_name, sizeof(loginname));
5061
strlcpy(homedir, pwe->pw_dir, sizeof(homedir));
5062
5063
if (unlikely(pwe->pw_expire && time(NULL) >= pwe->pw_expire)) {
5064
job_log(j, LOG_ERR, "Expired account");
5065
_exit(EXIT_FAILURE);
5066
}
5067
5068
5069
if (unlikely(j->username && strcmp(j->username, loginname) != 0)) {
5070
job_log(j, LOG_WARNING, "Suspicious setup: User \"%s\" maps to user: %s", j->username, loginname);
5071
} else if (unlikely(j->mach_uid && (j->mach_uid != desired_uid))) {
5072
job_log(j, LOG_WARNING, "Suspicious setup: UID %u maps to UID %u", j->mach_uid, desired_uid);
5073
}
5074
5075
if (j->groupname) {
5076
struct group *gre;
5077
5078
if (unlikely((gre = job_getgrnam(j, j->groupname)) == NULL)) {
5079
job_log(j, LOG_ERR, "getgrnam(\"%s\") failed", j->groupname);
5080
_exit(ESRCH);
5081
}
5082
5083
desired_gid = gre->gr_gid;
5084
}
5085
5086
if (job_assumes_zero_p(j, setlogin(loginname)) == -1) {
5087
_exit(EXIT_FAILURE);
5088
}
5089
5090
if (job_assumes_zero_p(j, setgid(desired_gid)) == -1) {
5091
_exit(EXIT_FAILURE);
5092
}
5093
5094
/*
5095
* The kernel team and the DirectoryServices team want initgroups()
5096
* called after setgid(). See 4616864 for more information.
5097
*/
5098
5099
if (likely(!j->no_init_groups)) {
5100
#if 1
5101
if (job_assumes_zero_p(j, initgroups(loginname, desired_gid)) == -1) {
5102
_exit(EXIT_FAILURE);
5103
}
5104
#else
5105
/* Do our own little initgroups(). We do this to guarantee that we're
5106
* always opted into dynamic group resolution in the kernel. initgroups(3)
5107
* does not make this guarantee.
5108
*/
5109
int groups[NGROUPS], ngroups;
5110
5111
// A failure here isn't fatal, and we'll still get data we can use.
5112
(void)job_assumes_zero_p(j, getgrouplist(j->username, desired_gid, groups, &ngroups));
5113
5114
if (job_assumes_zero_p(j, syscall(SYS_initgroups, ngroups, groups, desired_uid)) == -1) {
5115
_exit(EXIT_FAILURE);
5116
}
5117
#endif
5118
}
5119
5120
if (job_assumes_zero_p(j, setuid(desired_uid)) == -1) {
5121
_exit(EXIT_FAILURE);
5122
}
5123
5124
r = confstr(_CS_DARWIN_USER_TEMP_DIR, tmpdirpath, sizeof(tmpdirpath));
5125
5126
if (likely(r > 0 && r < sizeof(tmpdirpath))) {
5127
setenv("TMPDIR", tmpdirpath, 0);
5128
}
5129
5130
setenv("SHELL", shellpath, 0);
5131
setenv("HOME", homedir, 0);
5132
setenv("USER", loginname, 0);
5133
setenv("LOGNAME", loginname, 0);
5134
}
5135
5136
void
5137
job_setup_attributes(job_t j)
5138
{
5139
struct limititem *li;
5140
struct envitem *ei;
5141
5142
if (unlikely(j->setnice)) {
5143
(void)job_assumes_zero_p(j, setpriority(PRIO_PROCESS, 0, j->nice));
5144
}
5145
5146
SLIST_FOREACH(li, &j->limits, sle) {
5147
struct rlimit rl;
5148
5149
if (job_assumes_zero_p(j, getrlimit(li->which, &rl) == -1)) {
5150
continue;
5151
}
5152
5153
if (li->sethard) {
5154
rl.rlim_max = li->lim.rlim_max;
5155
}
5156
if (li->setsoft) {
5157
rl.rlim_cur = li->lim.rlim_cur;
5158
}
5159
5160
if (setrlimit(li->which, &rl) == -1) {
5161
job_log_error(j, LOG_WARNING, "setrlimit()");
5162
}
5163
}
5164
5165
if (unlikely(!j->inetcompat && j->session_create)) {
5166
launchd_SessionCreate();
5167
}
5168
5169
if (unlikely(j->low_pri_io)) {
5170
(void)job_assumes_zero_p(j, setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, IOPOL_THROTTLE));
5171
}
5172
if (j->low_priority_background_io) {
5173
(void)job_assumes_zero_p(j, setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_DARWIN_BG, IOPOL_THROTTLE));
5174
}
5175
if (unlikely(j->rootdir)) {
5176
(void)job_assumes_zero_p(j, chroot(j->rootdir));
5177
(void)job_assumes_zero_p(j, chdir("."));
5178
}
5179
5180
job_postfork_become_user(j);
5181
5182
if (unlikely(j->workingdir)) {
5183
if (chdir(j->workingdir) == -1) {
5184
if (errno == ENOENT || errno == ENOTDIR) {
5185
job_log(j, LOG_ERR, "Job specified non-existent working directory: %s", j->workingdir);
5186
} else {
5187
(void)job_assumes_zero(j, errno);
5188
}
5189
}
5190
}
5191
5192
if (unlikely(j->setmask)) {
5193
umask(j->mask);
5194
}
5195
5196
if (j->stdin_fd) {
5197
(void)job_assumes_zero_p(j, dup2(j->stdin_fd, STDIN_FILENO));
5198
} else {
5199
job_setup_fd(j, STDIN_FILENO, j->stdinpath, O_RDONLY|O_CREAT);
5200
}
5201
job_setup_fd(j, STDOUT_FILENO, j->stdoutpath, O_WRONLY|O_CREAT|O_APPEND);
5202
job_setup_fd(j, STDERR_FILENO, j->stderrpath, O_WRONLY|O_CREAT|O_APPEND);
5203
5204
jobmgr_setup_env_from_other_jobs(j->mgr);
5205
5206
SLIST_FOREACH(ei, &j->env, sle) {
5207
setenv(ei->key, ei->value, 1);
5208
}
5209
5210
#if !TARGET_OS_EMBEDDED
5211
if (j->jetsam_properties) {
5212
(void)job_assumes_zero(j, proc_setpcontrol(PROC_SETPC_TERMINATE));
5213
}
5214
#endif
5215
5216
#if TARGET_OS_EMBEDDED
5217
if (j->main_thread_priority != 0) {
5218
struct sched_param params;
5219
bzero(&params, sizeof(params));
5220
params.sched_priority = j->main_thread_priority;
5221
(void)job_assumes_zero_p(j, pthread_setschedparam(pthread_self(), SCHED_OTHER, &params));
5222
}
5223
#endif
5224
5225
/*
5226
* We'd like to call setsid() unconditionally, but we have reason to
5227
* believe that prevents launchd from being able to send signals to
5228
* setuid children. We'll settle for process-groups.
5229
*/
5230
if (getppid() != 1) {
5231
(void)job_assumes_zero_p(j, setpgid(0, 0));
5232
} else {
5233
(void)job_assumes_zero_p(j, setsid());
5234
}
5235
}
5236
5237
void
5238
job_setup_fd(job_t j, int target_fd, const char *path, int flags)
5239
{
5240
int fd;
5241
5242
if (!path) {
5243
return;
5244
}
5245
5246
if ((fd = open(path, flags|O_NOCTTY, DEFFILEMODE)) == -1) {
5247
job_log_error(j, LOG_WARNING, "open(\"%s\", ...)", path);
5248
return;
5249
}
5250
5251
(void)job_assumes_zero_p(j, dup2(fd, target_fd));
5252
(void)job_assumes_zero(j, runtime_close(fd));
5253
}
5254
5255
void
5256
calendarinterval_setalarm(job_t j, struct calendarinterval *ci)
5257
{
5258
struct calendarinterval *ci_iter, *ci_prev = NULL;
5259
time_t later, head_later;
5260
5261
later = cronemu(ci->when.tm_mon, ci->when.tm_mday, ci->when.tm_hour, ci->when.tm_min);
5262
5263
if (ci->when.tm_wday != -1) {
5264
time_t otherlater = cronemu_wday(ci->when.tm_wday, ci->when.tm_hour, ci->when.tm_min);
5265
5266
if (ci->when.tm_mday == -1) {
5267
later = otherlater;
5268
} else {
5269
later = later < otherlater ? later : otherlater;
5270
}
5271
}
5272
5273
ci->when_next = later;
5274
5275
LIST_FOREACH(ci_iter, &sorted_calendar_events, global_sle) {
5276
if (ci->when_next < ci_iter->when_next) {
5277
LIST_INSERT_BEFORE(ci_iter, ci, global_sle);
5278
break;
5279
}
5280
5281
ci_prev = ci_iter;
5282
}
5283
5284
if (ci_iter == NULL) {
5285
// ci must want to fire after every other timer, or there are no timers
5286
5287
if (LIST_EMPTY(&sorted_calendar_events)) {
5288
LIST_INSERT_HEAD(&sorted_calendar_events, ci, global_sle);
5289
} else {
5290
LIST_INSERT_AFTER(ci_prev, ci, global_sle);
5291
}
5292
}
5293
5294
head_later = LIST_FIRST(&sorted_calendar_events)->when_next;
5295
5296
if (job_assumes_zero_p(j, kevent_mod((uintptr_t)&sorted_calendar_events, EVFILT_TIMER, EV_ADD, NOTE_ABSOLUTE|NOTE_SECONDS, head_later, root_jobmgr)) != -1) {
5297
char time_string[100];
5298
size_t time_string_len;
5299
5300
ctime_r(&later, time_string);
5301
time_string_len = strlen(time_string);
5302
5303
if (likely(time_string_len && time_string[time_string_len - 1] == '\n')) {
5304
time_string[time_string_len - 1] = '\0';
5305
}
5306
5307
job_log(j, LOG_INFO, "Scheduled to run again at %s", time_string);
5308
}
5309
}
5310
5311
bool
5312
jobmgr_log_bug(_SIMPLE_STRING asl_message __attribute__((unused)), void *ctx, const char *message)
5313
{
5314
jobmgr_t jm = ctx;
5315
jobmgr_log(jm, LOG_ERR, "%s", message);
5316
5317
return true;
5318
}
5319
5320
bool
5321
job_log_bug(_SIMPLE_STRING asl_message __attribute__((unused)), void *ctx, const char *message)
5322
{
5323
job_t j = ctx;
5324
job_log(j, LOG_ERR, "%s", message);
5325
5326
return true;
5327
}
5328
5329
// ri: NULL = please sample j->p; non-NULL = use this sample
5330
void
5331
job_log_perf_statistics(job_t j, struct rusage_info_v1 *ri, int64_t exit_status)
5332
{
5333
#if HAVE_SYSTEMSTATS
5334
if (j->anonymous || !j->p) {
5335
return;
5336
}
5337
if (!systemstats_is_enabled()) {
5338
return;
5339
}
5340
const char *name;
5341
if (j->cfbundleidentifier) {
5342
name = j->cfbundleidentifier;
5343
} else {
5344
name = j->label;
5345
}
5346
int r = 0;
5347
struct rusage_info_v1 ris;
5348
if (ri == NULL) {
5349
ri = &ris;
5350
r = proc_pid_rusage(j->p, RUSAGE_INFO_V1, (rusage_info_t)ri);
5351
}
5352
if (r == -1) {
5353
return;
5354
}
5355
job_log_systemstats(j->p, j->uniqueid, runtime_get_uniqueid(), j->mgr->req_pid, j->mgr->req_uniqueid, name, ri, exit_status);
5356
#else
5357
#pragma unused (j, ri, exit_status)
5358
#endif
5359
}
5360
5361
#if HAVE_SYSTEMSTATS
5362
// ri: NULL = don't write fields from ri; non-NULL = use this sample
5363
static
5364
void
5365
job_log_systemstats(pid_t pid, uint64_t uniqueid, uint64_t parent_uniqueid, pid_t req_pid, uint64_t req_uniqueid, const char *name, struct rusage_info_v1 *ri, int64_t exit_status)
5366
{
5367
if (!systemstats_is_enabled()) {
5368
return;
5369
}
5370
5371
struct systemstats_process_usage_s info;
5372
bzero(&info, sizeof(info));
5373
info.name = name;
5374
info.pid = pid;
5375
info.exit_status = exit_status;
5376
info.uid = getuid();
5377
info.ppid = getpid();
5378
info.responsible_pid = req_pid;
5379
5380
if (likely(ri)) {
5381
info.macho_uuid = (const uint8_t *)&ri->ri_uuid;
5382
info.user_time = ri->ri_user_time;
5383
info.system_time = ri->ri_system_time;
5384
info.pkg_idle_wkups = ri->ri_pkg_idle_wkups;
5385
info.interrupt_wkups = ri->ri_interrupt_wkups;
5386
info.proc_start_abstime = ri->ri_proc_start_abstime;
5387
info.proc_exit_abstime = ri->ri_proc_exit_abstime;
5388
#if SYSTEMSTATS_API_VERSION >= 20130319
5389
info.pageins = ri->ri_pageins;
5390
info.wired_size = ri->ri_wired_size;
5391
info.resident_size = ri->ri_resident_size;
5392
info.phys_footprint = ri->ri_phys_footprint;
5393
// info.purgeablesize = ???
5394
#endif
5395
#if SYSTEMSTATS_API_VERSION >= 20130328
5396
info.child_user_time = ri->ri_child_user_time;
5397
info.child_system_time = ri->ri_child_system_time;
5398
info.child_pkg_idle_wkups = ri->ri_child_pkg_idle_wkups;
5399
info.child_interrupt_wkups = ri->ri_child_interrupt_wkups;
5400
info.child_pageins = ri->ri_child_pageins;
5401
info.child_elapsed_abstime = ri->ri_child_elapsed_abstime;
5402
#endif
5403
}
5404
#if SYSTEMSTATS_API_VERSION >= 20130410
5405
info.uniqueid = uniqueid;
5406
info.parent_uniqueid = parent_uniqueid;
5407
info.responsible_uniqueid = req_uniqueid;
5408
#endif
5409
systemstats_write_process_usage(&info);
5410
}
5411
#endif /* HAVE_SYSTEMSTATS */
5412
5413
struct waiting4attach *
5414
waiting4attach_new(jobmgr_t jm, const char *name, mach_port_t port, pid_t dest, xpc_service_type_t type)
5415
{
5416
size_t xtra = strlen(name) + 1;
5417
5418
struct waiting4attach *w4a = malloc(sizeof(*w4a) + xtra);
5419
if (!w4a) {
5420
return NULL;
5421
}
5422
5423
w4a->port = port;
5424
w4a->dest = dest;
5425
w4a->type = type;
5426
(void)strcpy(w4a->name, name);
5427
5428
if (dest) {
5429
LIST_INSERT_HEAD(&_launchd_domain_waiters, w4a, le);
5430
} else {
5431
LIST_INSERT_HEAD(&jm->attaches, w4a, le);
5432
}
5433
5434
5435
(void)jobmgr_assumes_zero(jm, launchd_mport_notify_req(port, MACH_NOTIFY_DEAD_NAME));
5436
return w4a;
5437
}
5438
5439
void
5440
waiting4attach_delete(jobmgr_t jm, struct waiting4attach *w4a)
5441
{
5442
jobmgr_log(jm, LOG_DEBUG, "Canceling dead-name notification for waiter port: 0x%x", w4a->port);
5443
5444
LIST_REMOVE(w4a, le);
5445
5446
mach_port_t previous = MACH_PORT_NULL;
5447
(void)jobmgr_assumes_zero(jm, mach_port_request_notification(mach_task_self(), w4a->port, MACH_NOTIFY_DEAD_NAME, 0, MACH_PORT_NULL, MACH_MSG_TYPE_MOVE_SEND_ONCE, &previous));
5448
if (previous) {
5449
(void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(previous));
5450
}
5451
5452
jobmgr_assumes_zero(jm, launchd_mport_deallocate(w4a->port));
5453
free(w4a);
5454
}
5455
5456
struct waiting4attach *
5457
waiting4attach_find(jobmgr_t jm, job_t j)
5458
{
5459
char *name2use = (char *)j->label;
5460
if (j->app) {
5461
struct envitem *ei = NULL;
5462
SLIST_FOREACH(ei, &j->env, sle) {
5463
if (strcmp(ei->key, XPC_SERVICE_RENDEZVOUS_TOKEN) == 0) {
5464
name2use = ei->value;
5465
break;
5466
}
5467
}
5468
}
5469
5470
struct waiting4attach *w4ai = NULL;
5471
LIST_FOREACH(w4ai, &jm->attaches, le) {
5472
if (strcmp(name2use, w4ai->name) == 0) {
5473
job_log(j, LOG_DEBUG, "Found attachment: %s", name2use);
5474
break;
5475
}
5476
}
5477
5478
return w4ai;
5479
}
5480
5481
void
5482
job_logv(job_t j, int pri, int err, const char *msg, va_list ap)
5483
{
5484
const char *label2use = j ? j->label : "com.apple.launchd.job-unknown";
5485
const char *mgr2use = j ? j->mgr->name : "com.apple.launchd.jobmanager-unknown";
5486
char *newmsg;
5487
int oldmask = 0;
5488
size_t newmsgsz;
5489
5490
struct launchd_syslog_attr attr = {
5491
.from_name = launchd_label,
5492
.about_name = label2use,
5493
.session_name = mgr2use,
5494
.priority = pri,
5495
.from_uid = getuid(),
5496
.from_pid = getpid(),
5497
.about_pid = j ? j->p : 0,
5498
};
5499
5500
/* Hack: If bootstrap_port is set, we must be on the child side of a
5501
* fork(2), but before the exec*(3). Let's route the log message back to
5502
* launchd proper.
5503
*/
5504
if (bootstrap_port) {
5505
return _vproc_logv(pri, err, msg, ap);
5506
}
5507
5508
newmsgsz = strlen(msg) + 200;
5509
newmsg = alloca(newmsgsz);
5510
5511
if (err) {
5512
#if !TARGET_OS_EMBEDDED
5513
snprintf(newmsg, newmsgsz, "%s: %d: %s", msg, err, strerror(err));
5514
#else
5515
snprintf(newmsg, newmsgsz, "(%s) %s: %d: %s", label2use, msg, err, strerror(err));
5516
#endif
5517
} else {
5518
#if !TARGET_OS_EMBEDDED
5519
snprintf(newmsg, newmsgsz, "%s", msg);
5520
#else
5521
snprintf(newmsg, newmsgsz, "(%s) %s", label2use, msg);
5522
#endif
5523
}
5524
5525
if (j && unlikely(j->debug)) {
5526
oldmask = setlogmask(LOG_UPTO(LOG_DEBUG));
5527
}
5528
5529
launchd_vsyslog(&attr, newmsg, ap);
5530
5531
if (j && unlikely(j->debug)) {
5532
setlogmask(oldmask);
5533
}
5534
}
5535
5536
void
5537
job_log_error(job_t j, int pri, const char *msg, ...)
5538
{
5539
va_list ap;
5540
5541
va_start(ap, msg);
5542
job_logv(j, pri, errno, msg, ap);
5543
va_end(ap);
5544
}
5545
5546
void
5547
job_log(job_t j, int pri, const char *msg, ...)
5548
{
5549
va_list ap;
5550
5551
va_start(ap, msg);
5552
job_logv(j, pri, 0, msg, ap);
5553
va_end(ap);
5554
}
5555
5556
#if 0
5557
void
5558
jobmgr_log_error(jobmgr_t jm, int pri, const char *msg, ...)
5559
{
5560
va_list ap;
5561
5562
va_start(ap, msg);
5563
jobmgr_logv(jm, pri, errno, msg, ap);
5564
va_end(ap);
5565
}
5566
#endif
5567
5568
void
5569
jobmgr_log_perf_statistics(jobmgr_t jm, bool signal_children)
5570
{
5571
#if HAVE_SYSTEMSTATS
5572
// Log information for kernel_task and pid 1 launchd.
5573
if (systemstats_is_enabled() && pid1_magic && jm == root_jobmgr) {
5574
#if SYSTEMSTATS_API_VERSION >= 20130328
5575
if (_systemstats_get_property(SYSTEMSTATS_API_VERSION, SYSTEMSTATS_WRITER_launchd, SYSTEMSTATS_PROPERTY_SHOULD_LOG_ENERGY_STATISTICS)) {
5576
systemstats_write_intel_energy_statistics(NULL);
5577
}
5578
#else
5579
systemstats_write_intel_energy_statistics(NULL);
5580
#endif
5581
job_log_systemstats(0, 0, 0, 0, 0, "com.apple.kernel", NULL, -1);
5582
job_log_systemstats(1, 1, 0, 1, 1, "com.apple.launchd", NULL, -1);
5583
}
5584
#endif
5585
jobmgr_t jmi = NULL;
5586
SLIST_FOREACH(jmi, &jm->submgrs, sle) {
5587
jobmgr_log_perf_statistics(jmi, signal_children);
5588
}
5589
5590
if (jm->xpc_singleton) {
5591
jobmgr_log(jm, LOG_PERF, "XPC Singleton Domain: %s", jm->shortdesc);
5592
} else if (jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
5593
jobmgr_log(jm, LOG_PERF, "XPC Private Domain: %s", jm->owner);
5594
} else if (jm->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) {
5595
jobmgr_log(jm, LOG_PERF, "Created via bootstrap_subset()");
5596
}
5597
5598
jobmgr_log(jm, LOG_PERF, "Jobs in job manager:");
5599
5600
job_t ji = NULL;
5601
LIST_FOREACH(ji, &jm->jobs, sle) {
5602
job_log_perf_statistics(ji, NULL, -1);
5603
if (unlikely(signal_children) && unlikely(strstr(ji->label, "com.apple.launchd.peruser.") == ji->label)) {
5604
jobmgr_log(jm, LOG_PERF, "Sending SIGINFO to peruser launchd %d", ji->p);
5605
kill(ji->p, SIGINFO);
5606
}
5607
}
5608
5609
jobmgr_log(jm, LOG_PERF, "End of job list.");
5610
}
5611
5612
void
5613
jobmgr_log(jobmgr_t jm, int pri, const char *msg, ...)
5614
{
5615
va_list ap;
5616
5617
va_start(ap, msg);
5618
jobmgr_logv(jm, pri, 0, msg, ap);
5619
va_end(ap);
5620
}
5621
5622
void
5623
jobmgr_logv(jobmgr_t jm, int pri, int err, const char *msg, va_list ap)
5624
{
5625
if (!jm) {
5626
jm = root_jobmgr;
5627
}
5628
5629
char *newmsg;
5630
char *newname;
5631
size_t i, o, jmname_len = strlen(jm->name), newmsgsz;
5632
5633
newname = alloca((jmname_len + 1) * 2);
5634
newmsgsz = (jmname_len + 1) * 2 + strlen(msg) + 100;
5635
newmsg = alloca(newmsgsz);
5636
5637
for (i = 0, o = 0; i < jmname_len; i++, o++) {
5638
if (jm->name[i] == '%') {
5639
newname[o] = '%';
5640
o++;
5641
}
5642
newname[o] = jm->name[i];
5643
}
5644
newname[o] = '\0';
5645
5646
if (err) {
5647
snprintf(newmsg, newmsgsz, "%s: %s: %s", newname, msg, strerror(err));
5648
} else {
5649
snprintf(newmsg, newmsgsz, "%s: %s", newname, msg);
5650
}
5651
5652
if (jm->parentmgr) {
5653
jobmgr_logv(jm->parentmgr, pri, 0, newmsg, ap);
5654
} else {
5655
struct launchd_syslog_attr attr = {
5656
.from_name = launchd_label,
5657
.about_name = launchd_label,
5658
.session_name = jm->name,
5659
.priority = pri,
5660
.from_uid = getuid(),
5661
.from_pid = getpid(),
5662
.about_pid = getpid(),
5663
};
5664
5665
launchd_vsyslog(&attr, newmsg, ap);
5666
}
5667
}
5668
5669
struct cal_dict_walk {
5670
job_t j;
5671
struct tm tmptm;
5672
};
5673
5674
void
5675
calendarinterval_new_from_obj_dict_walk(launch_data_t obj, const char *key, void *context)
5676
{
5677
struct cal_dict_walk *cdw = context;
5678
struct tm *tmptm = &cdw->tmptm;
5679
job_t j = cdw->j;
5680
int64_t val;
5681
5682
if (unlikely(LAUNCH_DATA_INTEGER != launch_data_get_type(obj))) {
5683
// hack to let caller know something went wrong
5684
tmptm->tm_sec = -1;
5685
return;
5686
}
5687
5688
val = launch_data_get_integer(obj);
5689
5690
if (val < 0) {
5691
job_log(j, LOG_WARNING, "The interval for key \"%s\" is less than zero.", key);
5692
} else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MINUTE) == 0) {
5693
if (val > 59) {
5694
job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 59 (inclusive).", key);
5695
tmptm->tm_sec = -1;
5696
} else {
5697
tmptm->tm_min = (typeof(tmptm->tm_min)) val;
5698
}
5699
} else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_HOUR) == 0) {
5700
if (val > 23) {
5701
job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 23 (inclusive).", key);
5702
tmptm->tm_sec = -1;
5703
} else {
5704
tmptm->tm_hour = (typeof(tmptm->tm_hour)) val;
5705
}
5706
} else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_DAY) == 0) {
5707
if (val < 1 || val > 31) {
5708
job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 1 and 31 (inclusive).", key);
5709
tmptm->tm_sec = -1;
5710
} else {
5711
tmptm->tm_mday = (typeof(tmptm->tm_mday)) val;
5712
}
5713
} else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_WEEKDAY) == 0) {
5714
if (val > 7) {
5715
job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 7 (inclusive).", key);
5716
tmptm->tm_sec = -1;
5717
} else {
5718
tmptm->tm_wday = (typeof(tmptm->tm_wday)) val;
5719
}
5720
} else if (strcasecmp(key, LAUNCH_JOBKEY_CAL_MONTH) == 0) {
5721
if (val > 12) {
5722
job_log(j, LOG_WARNING, "The interval for key \"%s\" is not between 0 and 12 (inclusive).", key);
5723
tmptm->tm_sec = -1;
5724
} else {
5725
tmptm->tm_mon = (typeof(tmptm->tm_mon)) val;
5726
tmptm->tm_mon -= 1; // 4798263 cron compatibility
5727
}
5728
}
5729
}
5730
5731
bool
5732
calendarinterval_new_from_obj(job_t j, launch_data_t obj)
5733
{
5734
struct cal_dict_walk cdw;
5735
5736
cdw.j = j;
5737
memset(&cdw.tmptm, 0, sizeof(0));
5738
5739
cdw.tmptm.tm_min = -1;
5740
cdw.tmptm.tm_hour = -1;
5741
cdw.tmptm.tm_mday = -1;
5742
cdw.tmptm.tm_wday = -1;
5743
cdw.tmptm.tm_mon = -1;
5744
5745
if (!job_assumes(j, obj != NULL)) {
5746
return false;
5747
}
5748
5749
if (unlikely(LAUNCH_DATA_DICTIONARY != launch_data_get_type(obj))) {
5750
return false;
5751
}
5752
5753
launch_data_dict_iterate(obj, calendarinterval_new_from_obj_dict_walk, &cdw);
5754
5755
if (unlikely(cdw.tmptm.tm_sec == -1)) {
5756
return false;
5757
}
5758
5759
return calendarinterval_new(j, &cdw.tmptm);
5760
}
5761
5762
bool
5763
calendarinterval_new(job_t j, struct tm *w)
5764
{
5765
struct calendarinterval *ci = calloc(1, sizeof(struct calendarinterval));
5766
5767
if (!job_assumes(j, ci != NULL)) {
5768
return false;
5769
}
5770
5771
ci->when = *w;
5772
ci->job = j;
5773
5774
SLIST_INSERT_HEAD(&j->cal_intervals, ci, sle);
5775
5776
calendarinterval_setalarm(j, ci);
5777
5778
runtime_add_weak_ref();
5779
5780
return true;
5781
}
5782
5783
void
5784
calendarinterval_delete(job_t j, struct calendarinterval *ci)
5785
{
5786
SLIST_REMOVE(&j->cal_intervals, ci, calendarinterval, sle);
5787
LIST_REMOVE(ci, global_sle);
5788
5789
free(ci);
5790
5791
runtime_del_weak_ref();
5792
}
5793
5794
void
5795
calendarinterval_sanity_check(void)
5796
{
5797
struct calendarinterval *ci = LIST_FIRST(&sorted_calendar_events);
5798
time_t now = time(NULL);
5799
5800
if (unlikely(ci && (ci->when_next < now))) {
5801
(void)jobmgr_assumes_zero_p(root_jobmgr, raise(SIGUSR1));
5802
}
5803
}
5804
5805
void
5806
calendarinterval_callback(void)
5807
{
5808
struct calendarinterval *ci, *ci_next;
5809
time_t now = time(NULL);
5810
5811
LIST_FOREACH_SAFE(ci, &sorted_calendar_events, global_sle, ci_next) {
5812
job_t j = ci->job;
5813
5814
if (ci->when_next > now) {
5815
break;
5816
}
5817
5818
LIST_REMOVE(ci, global_sle);
5819
calendarinterval_setalarm(j, ci);
5820
5821
j->start_pending = true;
5822
job_dispatch(j, false);
5823
}
5824
}
5825
5826
bool
5827
socketgroup_new(job_t j, const char *name, int *fds, size_t fd_cnt)
5828
{
5829
struct socketgroup *sg = calloc(1, sizeof(struct socketgroup) + strlen(name) + 1);
5830
5831
if (!job_assumes(j, sg != NULL)) {
5832
return false;
5833
}
5834
5835
sg->fds = calloc(1, fd_cnt * sizeof(int));
5836
sg->fd_cnt = fd_cnt;
5837
5838
if (!job_assumes(j, sg->fds != NULL)) {
5839
free(sg);
5840
return false;
5841
}
5842
5843
memcpy(sg->fds, fds, fd_cnt * sizeof(int));
5844
strcpy(sg->name_init, name);
5845
5846
SLIST_INSERT_HEAD(&j->sockets, sg, sle);
5847
5848
runtime_add_weak_ref();
5849
5850
return true;
5851
}
5852
5853
void
5854
socketgroup_delete(job_t j, struct socketgroup *sg)
5855
{
5856
unsigned int i;
5857
5858
for (i = 0; i < sg->fd_cnt; i++) {
5859
#if 0
5860
struct sockaddr_storage ss;
5861
struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
5862
socklen_t ss_len = sizeof(ss);
5863
5864
// 5480306
5865
if (job_assumes_zero(j, getsockname(sg->fds[i], (struct sockaddr *)&ss, &ss_len) != -1)
5866
&& job_assumes(j, ss_len > 0) && (ss.ss_family == AF_UNIX)) {
5867
(void)job_assumes(j, unlink(sun->sun_path) != -1);
5868
// We might conditionally need to delete a directory here
5869
}
5870
#endif
5871
(void)job_assumes_zero_p(j, runtime_close(sg->fds[i]));
5872
}
5873
5874
SLIST_REMOVE(&j->sockets, sg, socketgroup, sle);
5875
5876
free(sg->fds);
5877
free(sg);
5878
5879
runtime_del_weak_ref();
5880
}
5881
5882
void
5883
socketgroup_kevent_mod(job_t j, struct socketgroup *sg, bool do_add)
5884
{
5885
struct kevent kev[sg->fd_cnt];
5886
char buf[10000];
5887
unsigned int i, buf_off = 0;
5888
5889
for (i = 0; i < sg->fd_cnt; i++) {
5890
EV_SET(&kev[i], sg->fds[i], EVFILT_READ, do_add ? EV_ADD : EV_DELETE, 0, 0, j);
5891
buf_off += snprintf(buf + buf_off, sizeof(buf) - buf_off, " %d", sg->fds[i]);
5892
}
5893
5894
job_log(j, LOG_DEBUG, "%s Sockets:%s", do_add ? "Watching" : "Ignoring", buf);
5895
5896
(void)job_assumes_zero_p(j, kevent_bulk_mod(kev, sg->fd_cnt));
5897
5898
for (i = 0; i < sg->fd_cnt; i++) {
5899
(void)job_assumes(j, kev[i].flags & EV_ERROR);
5900
errno = (typeof(errno)) kev[i].data;
5901
(void)job_assumes_zero(j, kev[i].data);
5902
}
5903
}
5904
5905
void
5906
socketgroup_ignore(job_t j, struct socketgroup *sg)
5907
{
5908
socketgroup_kevent_mod(j, sg, false);
5909
}
5910
5911
void
5912
socketgroup_watch(job_t j, struct socketgroup *sg)
5913
{
5914
socketgroup_kevent_mod(j, sg, true);
5915
}
5916
5917
void
5918
socketgroup_callback(job_t j)
5919
{
5920
job_dispatch(j, true);
5921
}
5922
5923
bool
5924
envitem_new(job_t j, const char *k, const char *v, bool global)
5925
{
5926
if (global && !launchd_allow_global_dyld_envvars) {
5927
if (strncmp("DYLD_", k, sizeof("DYLD_") - 1) == 0) {
5928
job_log(j, LOG_ERR, "Ignoring global environment variable submitted by job (variable=value): %s=%s", k, v);
5929
return false;
5930
}
5931
}
5932
5933
struct envitem *ei = calloc(1, sizeof(struct envitem) + strlen(k) + 1 + strlen(v) + 1);
5934
5935
if (!job_assumes(j, ei != NULL)) {
5936
return false;
5937
}
5938
5939
strcpy(ei->key_init, k);
5940
ei->value = ei->key_init + strlen(k) + 1;
5941
strcpy(ei->value, v);
5942
5943
if (global) {
5944
if (SLIST_EMPTY(&j->global_env)) {
5945
LIST_INSERT_HEAD(&j->mgr->global_env_jobs, j, global_env_sle);
5946
}
5947
SLIST_INSERT_HEAD(&j->global_env, ei, sle);
5948
} else {
5949
SLIST_INSERT_HEAD(&j->env, ei, sle);
5950
}
5951
5952
job_log(j, LOG_DEBUG, "Added environmental variable: %s=%s", k, v);
5953
5954
return true;
5955
}
5956
5957
void
5958
envitem_delete(job_t j, struct envitem *ei, bool global)
5959
{
5960
if (global) {
5961
SLIST_REMOVE(&j->global_env, ei, envitem, sle);
5962
if (SLIST_EMPTY(&j->global_env)) {
5963
LIST_REMOVE(j, global_env_sle);
5964
}
5965
} else {
5966
SLIST_REMOVE(&j->env, ei, envitem, sle);
5967
}
5968
5969
free(ei);
5970
}
5971
5972
void
5973
envitem_setup(launch_data_t obj, const char *key, void *context)
5974
{
5975
job_t j = context;
5976
5977
if (launch_data_get_type(obj) != LAUNCH_DATA_STRING) {
5978
return;
5979
}
5980
5981
if (strncmp(LAUNCHD_TRUSTED_FD_ENV, key, sizeof(LAUNCHD_TRUSTED_FD_ENV) - 1) != 0) {
5982
envitem_new(j, key, launch_data_get_string(obj), j->importing_global_env);
5983
} else {
5984
job_log(j, LOG_DEBUG, "Ignoring reserved environmental variable: %s", key);
5985
}
5986
}
5987
5988
bool
5989
limititem_update(job_t j, int w, rlim_t r)
5990
{
5991
struct limititem *li;
5992
5993
SLIST_FOREACH(li, &j->limits, sle) {
5994
if (li->which == w) {
5995
break;
5996
}
5997
}
5998
5999
if (li == NULL) {
6000
li = calloc(1, sizeof(struct limititem));
6001
6002
if (!job_assumes(j, li != NULL)) {
6003
return false;
6004
}
6005
6006
SLIST_INSERT_HEAD(&j->limits, li, sle);
6007
6008
li->which = w;
6009
}
6010
6011
if (j->importing_hard_limits) {
6012
li->lim.rlim_max = r;
6013
li->sethard = true;
6014
} else {
6015
li->lim.rlim_cur = r;
6016
li->setsoft = true;
6017
}
6018
6019
return true;
6020
}
6021
6022
void
6023
limititem_delete(job_t j, struct limititem *li)
6024
{
6025
SLIST_REMOVE(&j->limits, li, limititem, sle);
6026
6027
free(li);
6028
}
6029
6030
#if HAVE_SANDBOX
6031
void
6032
seatbelt_setup_flags(launch_data_t obj, const char *key, void *context)
6033
{
6034
job_t j = context;
6035
6036
if (launch_data_get_type(obj) != LAUNCH_DATA_BOOL) {
6037
job_log(j, LOG_WARNING, "Sandbox flag value must be boolean: %s", key);
6038
return;
6039
}
6040
6041
if (launch_data_get_bool(obj) == false) {
6042
return;
6043
}
6044
6045
if (strcasecmp(key, LAUNCH_JOBKEY_SANDBOX_NAMED) == 0) {
6046
j->seatbelt_flags |= SANDBOX_NAMED;
6047
}
6048
}
6049
#endif
6050
6051
void
6052
limititem_setup(launch_data_t obj, const char *key, void *context)
6053
{
6054
job_t j = context;
6055
size_t i, limits_cnt = (sizeof(launchd_keys2limits) / sizeof(launchd_keys2limits[0]));
6056
rlim_t rl;
6057
6058
if (launch_data_get_type(obj) != LAUNCH_DATA_INTEGER) {
6059
return;
6060
}
6061
6062
rl = launch_data_get_integer(obj);
6063
6064
for (i = 0; i < limits_cnt; i++) {
6065
if (strcasecmp(launchd_keys2limits[i].key, key) == 0) {
6066
break;
6067
}
6068
}
6069
6070
if (i == limits_cnt) {
6071
return;
6072
}
6073
6074
limititem_update(j, launchd_keys2limits[i].val, rl);
6075
}
6076
6077
bool
6078
job_useless(job_t j)
6079
{
6080
if ((j->legacy_LS_job || j->only_once) && j->start_time != 0) {
6081
if (j->legacy_LS_job && j->j_port) {
6082
return false;
6083
}
6084
job_log(j, LOG_INFO, "Exited. Was only configured to run once.");
6085
return true;
6086
} else if (j->removal_pending) {
6087
job_log(j, LOG_DEBUG, "Exited while removal was pending.");
6088
return true;
6089
} else if (j->shutdown_monitor) {
6090
return false;
6091
} else if (j->mgr->shutting_down && !j->mgr->parentmgr) {
6092
job_log(j, LOG_DEBUG, "Exited while shutdown in progress. Processes remaining: %lu/%lu", total_children, total_anon_children);
6093
if (total_children == 0 && !j->anonymous) {
6094
job_log(j, LOG_DEBUG | LOG_CONSOLE, "Job was last to exit during shutdown of: %s.", j->mgr->name);
6095
}
6096
return true;
6097
} else if (j->legacy_mach_job) {
6098
if (SLIST_EMPTY(&j->machservices)) {
6099
job_log(j, LOG_INFO, "Garbage collecting");
6100
return true;
6101
} else if (!j->checkedin) {
6102
job_log(j, LOG_WARNING, "Failed to check-in!");
6103
return true;
6104
}
6105
} else {
6106
/* If the job's executable does not have any valid architectures (for
6107
* example, if it's a PowerPC-only job), then we don't even bother
6108
* trying to relaunch it, as we have no reasonable expectation that
6109
* the situation will change.
6110
*
6111
* <rdar://problem/9106979>
6112
*/
6113
if (!j->did_exec && WEXITSTATUS(j->last_exit_status) == EBADARCH) {
6114
job_log(j, LOG_ERR, "Job executable does not contain supported architectures. Unloading it. Its plist should be removed.");
6115
return true;
6116
}
6117
}
6118
6119
return false;
6120
}
6121
6122
bool
6123
job_keepalive(job_t j)
6124
{
6125
mach_msg_type_number_t statusCnt;
6126
mach_port_status_t status;
6127
struct semaphoreitem *si;
6128
struct machservice *ms;
6129
bool good_exit = (WIFEXITED(j->last_exit_status) && WEXITSTATUS(j->last_exit_status) == 0);
6130
bool is_not_kextd = (launchd_apple_internal || (strcmp(j->label, "com.apple.kextd") != 0));
6131
6132
if (unlikely(j->mgr->shutting_down)) {
6133
return false;
6134
}
6135
6136
/*
6137
* 5066316
6138
*
6139
* We definitely need to revisit this after Leopard ships. Please see
6140
* launchctl.c for the other half of this hack.
6141
*/
6142
if (unlikely((j->mgr->global_on_demand_cnt > 0) && is_not_kextd)) {
6143
return false;
6144
}
6145
6146
if (unlikely(j->needs_kickoff)) {
6147
job_log(j, LOG_DEBUG, "KeepAlive check: Job needs to be kicked off on-demand before KeepAlive sets in.");
6148
return false;
6149
}
6150
6151
if (j->start_pending) {
6152
job_log(j, LOG_DEBUG, "KeepAlive check: Pent-up non-IPC launch criteria.");
6153
return true;
6154
}
6155
6156
if (!j->ondemand) {
6157
job_log(j, LOG_DEBUG, "KeepAlive check: job configured to run continuously.");
6158
return true;
6159
}
6160
6161
SLIST_FOREACH(ms, &j->machservices, sle) {
6162
statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
6163
if (mach_port_get_attributes(mach_task_self(), ms->port, MACH_PORT_RECEIVE_STATUS,
6164
(mach_port_info_t)&status, &statusCnt) != KERN_SUCCESS) {
6165
continue;
6166
}
6167
if (status.mps_msgcount) {
6168
job_log(j, LOG_DEBUG, "KeepAlive check: %d queued Mach messages on service: %s",
6169
status.mps_msgcount, ms->name);
6170
return true;
6171
}
6172
}
6173
6174
/* TODO: Coalesce external events and semaphore items, since they're basically
6175
* the same thing.
6176
*/
6177
struct externalevent *ei = NULL;
6178
LIST_FOREACH(ei, &j->events, job_le) {
6179
if (ei->state == ei->wanted_state) {
6180
return true;
6181
}
6182
}
6183
6184
SLIST_FOREACH(si, &j->semaphores, sle) {
6185
bool wanted_state = false;
6186
job_t other_j;
6187
6188
switch (si->why) {
6189
case NETWORK_UP:
6190
wanted_state = true;
6191
case NETWORK_DOWN:
6192
if (network_up == wanted_state) {
6193
job_log(j, LOG_DEBUG, "KeepAlive: The network is %s.", wanted_state ? "up" : "down");
6194
return true;
6195
}
6196
break;
6197
case SUCCESSFUL_EXIT:
6198
wanted_state = true;
6199
case FAILED_EXIT:
6200
if (good_exit == wanted_state) {
6201
job_log(j, LOG_DEBUG, "KeepAlive: The exit state was %s.", wanted_state ? "successful" : "failure");
6202
return true;
6203
}
6204
break;
6205
case CRASHED:
6206
wanted_state = true;
6207
case DID_NOT_CRASH:
6208
if (j->crashed == wanted_state) {
6209
return true;
6210
}
6211
break;
6212
case OTHER_JOB_ENABLED:
6213
wanted_state = true;
6214
case OTHER_JOB_DISABLED:
6215
if ((bool)job_find(NULL, si->what) == wanted_state) {
6216
job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "enabled" : "disabled", si->what);
6217
return true;
6218
}
6219
break;
6220
case OTHER_JOB_ACTIVE:
6221
wanted_state = true;
6222
case OTHER_JOB_INACTIVE:
6223
if ((other_j = job_find(NULL, si->what))) {
6224
if ((bool)other_j->p == wanted_state) {
6225
job_log(j, LOG_DEBUG, "KeepAlive: The following job is %s: %s", wanted_state ? "active" : "inactive", si->what);
6226
return true;
6227
}
6228
}
6229
break;
6230
}
6231
}
6232
6233
return false;
6234
}
6235
6236
const char *
6237
job_active(job_t j)
6238
{
6239
if (j->p && j->shutdown_monitor) {
6240
return "Monitoring shutdown";
6241
}
6242
if (j->p) {
6243
return "PID is still valid";
6244
}
6245
6246
if (j->priv_port_has_senders) {
6247
return "Privileged Port still has outstanding senders";
6248
}
6249
6250
struct machservice *ms;
6251
SLIST_FOREACH(ms, &j->machservices, sle) {
6252
/* If we've simulated an exit, we mark the job as non-active, even
6253
* though doing so will leave it in an unsafe state. We do this so that
6254
* shutdown can proceed. See <rdar://problem/11126530>.
6255
*/
6256
if (!j->workaround9359725 && ms->recv && machservice_active(ms)) {
6257
job_log(j, LOG_INFO, "Mach service is still active: %s", ms->name);
6258
return "Mach service is still active";
6259
}
6260
}
6261
6262
return NULL;
6263
}
6264
6265
void
6266
machservice_watch(job_t j, struct machservice *ms)
6267
{
6268
if (ms->recv) {
6269
if (job_assumes_zero(j, runtime_add_mport(ms->port, NULL)) == KERN_INVALID_RIGHT) {
6270
ms->recv_race_hack = true;
6271
}
6272
}
6273
}
6274
6275
void
6276
machservice_ignore(job_t j, struct machservice *ms)
6277
{
6278
/* We only add ports whose receive rights we control into the port set, so
6279
* don't attempt to remove te service from the port set if we didn't put it
6280
* there in the first place. Otherwise, we could wind up trying to access a
6281
* bogus index (like MACH_PORT_DEAD) or zeroing a valid one out.
6282
*
6283
* <rdar://problem/10898014>
6284
*/
6285
if (ms->recv) {
6286
(void)job_assumes_zero(j, runtime_remove_mport(ms->port));
6287
}
6288
}
6289
6290
void
6291
machservice_resetport(job_t j, struct machservice *ms)
6292
{
6293
LIST_REMOVE(ms, port_hash_sle);
6294
(void)job_assumes_zero(j, launchd_mport_close_recv(ms->port));
6295
(void)job_assumes_zero(j, launchd_mport_deallocate(ms->port));
6296
6297
ms->gen_num++;
6298
(void)job_assumes_zero(j, launchd_mport_create_recv(&ms->port));
6299
(void)job_assumes_zero(j, launchd_mport_make_send(ms->port));
6300
LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
6301
}
6302
6303
void
6304
machservice_stamp_port(job_t j, struct machservice *ms)
6305
{
6306
mach_port_context_t ctx = 0;
6307
char *where2get = j->prog ? j->prog : j->argv[0];
6308
6309
char *prog = NULL;
6310
if ((prog = strrchr(where2get, '/'))) {
6311
prog++;
6312
} else {
6313
prog = where2get;
6314
}
6315
6316
(void)strncpy((char *)&ctx, prog, sizeof(ctx));
6317
#if __LITTLE_ENDIAN__
6318
#if __LP64__
6319
ctx = OSSwapBigToHostInt64(ctx);
6320
#else
6321
ctx = OSSwapBigToHostInt32(ctx);
6322
#endif
6323
#endif
6324
6325
(void)job_assumes_zero(j, mach_port_set_context(mach_task_self(), ms->port, ctx));
6326
}
6327
6328
struct machservice *
6329
machservice_new(job_t j, const char *name, mach_port_t *serviceport, bool pid_local)
6330
{
6331
/* Don't create new MachServices for dead ports. This is primarily for
6332
* clients who use bootstrap_register2(). They can pass in a send right, but
6333
* then that port can immediately go dead. Hilarity ensues.
6334
*
6335
* <rdar://problem/10898014>
6336
*/
6337
if (*serviceport == MACH_PORT_DEAD) {
6338
return NULL;
6339
}
6340
6341
struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(name) + 1);
6342
if (!job_assumes(j, ms != NULL)) {
6343
return NULL;
6344
}
6345
6346
strcpy((char *)ms->name, name);
6347
ms->job = j;
6348
ms->gen_num = 1;
6349
ms->per_pid = pid_local;
6350
6351
if (likely(*serviceport == MACH_PORT_NULL)) {
6352
if (job_assumes_zero(j, launchd_mport_create_recv(&ms->port)) != KERN_SUCCESS) {
6353
goto out_bad;
6354
}
6355
6356
if (job_assumes_zero(j, launchd_mport_make_send(ms->port)) != KERN_SUCCESS) {
6357
goto out_bad2;
6358
}
6359
*serviceport = ms->port;
6360
ms->recv = true;
6361
} else {
6362
ms->port = *serviceport;
6363
ms->isActive = true;
6364
}
6365
6366
SLIST_INSERT_HEAD(&j->machservices, ms, sle);
6367
6368
jobmgr_t where2put = j->mgr;
6369
// XPC domains are separate from Mach bootstraps.
6370
if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
6371
if (launchd_flat_mach_namespace && !(j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
6372
where2put = root_jobmgr;
6373
}
6374
}
6375
6376
/* Don't allow MachServices added by multiple-instance jobs to be looked up
6377
* by others. We could just do this with a simple bit, but then we'd have to
6378
* uniquify the names ourselves to avoid collisions. This is just easier.
6379
*/
6380
if (!j->dedicated_instance) {
6381
LIST_INSERT_HEAD(&where2put->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
6382
}
6383
LIST_INSERT_HEAD(&port_hash[HASH_PORT(ms->port)], ms, port_hash_sle);
6384
6385
if (ms->recv) {
6386
machservice_stamp_port(j, ms);
6387
}
6388
6389
job_log(j, LOG_DEBUG, "Mach service added%s: %s", (j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) ? " to private namespace" : "", name);
6390
6391
return ms;
6392
out_bad2:
6393
(void)job_assumes_zero(j, launchd_mport_close_recv(ms->port));
6394
out_bad:
6395
free(ms);
6396
return NULL;
6397
}
6398
6399
struct machservice *
6400
machservice_new_alias(job_t j, struct machservice *orig)
6401
{
6402
struct machservice *ms = calloc(1, sizeof(struct machservice) + strlen(orig->name) + 1);
6403
if (job_assumes(j, ms != NULL)) {
6404
strcpy((char *)ms->name, orig->name);
6405
ms->alias = orig;
6406
ms->job = j;
6407
6408
LIST_INSERT_HEAD(&j->mgr->ms_hash[hash_ms(ms->name)], ms, name_hash_sle);
6409
SLIST_INSERT_HEAD(&j->machservices, ms, sle);
6410
jobmgr_log(j->mgr, LOG_DEBUG, "Service aliased into job manager: %s", orig->name);
6411
}
6412
6413
return ms;
6414
}
6415
6416
bootstrap_status_t
6417
machservice_status(struct machservice *ms)
6418
{
6419
ms = ms->alias ? ms->alias : ms;
6420
if (ms->isActive) {
6421
return BOOTSTRAP_STATUS_ACTIVE;
6422
} else if (ms->job->ondemand) {
6423
return BOOTSTRAP_STATUS_ON_DEMAND;
6424
} else {
6425
return BOOTSTRAP_STATUS_INACTIVE;
6426
}
6427
}
6428
6429
void
6430
job_setup_exception_port(job_t j, task_t target_task)
6431
{
6432
struct machservice *ms;
6433
thread_state_flavor_t f = 0;
6434
mach_port_t exc_port = the_exception_server;
6435
6436
if (unlikely(j->alt_exc_handler)) {
6437
ms = jobmgr_lookup_service(j->mgr, j->alt_exc_handler, true, 0);
6438
if (likely(ms)) {
6439
exc_port = machservice_port(ms);
6440
} else {
6441
job_log(j, LOG_WARNING, "Falling back to default Mach exception handler. Could not find: %s", j->alt_exc_handler);
6442
}
6443
} else if (unlikely(j->internal_exc_handler)) {
6444
exc_port = runtime_get_kernel_port();
6445
} else if (unlikely(!exc_port)) {
6446
return;
6447
}
6448
6449
#if defined (__ppc__) || defined(__ppc64__)
6450
f = PPC_THREAD_STATE64;
6451
#elif defined(__i386__) || defined(__x86_64__)
6452
f = x86_THREAD_STATE;
6453
#elif defined(__arm__)
6454
f = ARM_THREAD_STATE;
6455
#else
6456
#error "unknown architecture"
6457
#endif
6458
6459
if (likely(target_task)) {
6460
kern_return_t kr = task_set_exception_ports(target_task, EXC_MASK_CRASH | EXC_MASK_GUARD | EXC_MASK_RESOURCE, exc_port, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f);
6461
if (kr) {
6462
if (kr != MACH_SEND_INVALID_DEST) {
6463
(void)job_assumes_zero(j, kr);
6464
} else {
6465
job_log(j, LOG_WARNING, "Task died before exception port could be set.");
6466
}
6467
}
6468
} else if (pid1_magic && the_exception_server) {
6469
mach_port_t mhp = mach_host_self();
6470
(void)job_assumes_zero(j, host_set_exception_ports(mhp, EXC_MASK_CRASH | EXC_MASK_GUARD | EXC_MASK_RESOURCE, the_exception_server, EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, f));
6471
(void)job_assumes_zero(j, launchd_mport_deallocate(mhp));
6472
}
6473
}
6474
6475
void
6476
job_set_exception_port(job_t j, mach_port_t port)
6477
{
6478
if (unlikely(!the_exception_server)) {
6479
the_exception_server = port;
6480
job_setup_exception_port(j, 0);
6481
} else {
6482
job_log(j, LOG_WARNING, "The exception server is already claimed!");
6483
}
6484
}
6485
6486
void
6487
machservice_setup_options(launch_data_t obj, const char *key, void *context)
6488
{
6489
struct machservice *ms = context;
6490
mach_port_t mhp = mach_host_self();
6491
int which_port;
6492
bool b;
6493
6494
if (!job_assumes(ms->job, mhp != MACH_PORT_NULL)) {
6495
return;
6496
}
6497
6498
switch (launch_data_get_type(obj)) {
6499
case LAUNCH_DATA_INTEGER:
6500
which_port = (int)launch_data_get_integer(obj); // XXX we should bound check this...
6501
if (strcasecmp(key, LAUNCH_JOBKEY_MACH_TASKSPECIALPORT) == 0) {
6502
switch (which_port) {
6503
case TASK_KERNEL_PORT:
6504
case TASK_HOST_PORT:
6505
case TASK_NAME_PORT:
6506
case TASK_BOOTSTRAP_PORT:
6507
/* I find it a little odd that zero isn't reserved in the header.
6508
* Normally Mach is fairly good about this convention...
6509
*/
6510
case 0:
6511
job_log(ms->job, LOG_WARNING, "Tried to set a reserved task special port: %d", which_port);
6512
break;
6513
default:
6514
ms->special_port_num = which_port;
6515
SLIST_INSERT_HEAD(&special_ports, ms, special_port_sle);
6516
break;
6517
}
6518
} else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HOSTSPECIALPORT) == 0 && pid1_magic) {
6519
if (which_port > HOST_MAX_SPECIAL_KERNEL_PORT) {
6520
(void)job_assumes_zero(ms->job, (errno = host_set_special_port(mhp, which_port, ms->port)));
6521
} else {
6522
job_log(ms->job, LOG_WARNING, "Tried to set a reserved host special port: %d", which_port);
6523
}
6524
}
6525
case LAUNCH_DATA_BOOL:
6526
b = launch_data_get_bool(obj);
6527
if (strcasecmp(key, LAUNCH_JOBKEY_MACH_ENTERKERNELDEBUGGERONCLOSE) == 0) {
6528
ms->debug_on_close = b;
6529
} else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_RESETATCLOSE) == 0) {
6530
ms->reset = b;
6531
} else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_HIDEUNTILCHECKIN) == 0) {
6532
ms->hide = b;
6533
} else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_EXCEPTIONSERVER) == 0) {
6534
job_set_exception_port(ms->job, ms->port);
6535
} else if (strcasecmp(key, LAUNCH_JOBKEY_MACH_KUNCSERVER) == 0) {
6536
ms->kUNCServer = b;
6537
(void)job_assumes_zero(ms->job, host_set_UNDServer(mhp, ms->port));
6538
}
6539
break;
6540
case LAUNCH_DATA_STRING:
6541
if (strcasecmp(key, LAUNCH_JOBKEY_MACH_DRAINMESSAGESONCRASH) == 0) {
6542
const char *option = launch_data_get_string(obj);
6543
if (strcasecmp(option, "One") == 0) {
6544
ms->drain_one_on_crash = true;
6545
} else if (strcasecmp(option, "All") == 0) {
6546
ms->drain_all_on_crash = true;
6547
}
6548
}
6549
break;
6550
case LAUNCH_DATA_DICTIONARY:
6551
if (launch_data_dict_get_count(obj) == 0) {
6552
job_set_exception_port(ms->job, ms->port);
6553
}
6554
break;
6555
default:
6556
break;
6557
}
6558
6559
(void)job_assumes_zero(ms->job, launchd_mport_deallocate(mhp));
6560
}
6561
6562
void
6563
machservice_setup(launch_data_t obj, const char *key, void *context)
6564
{
6565
job_t j = context;
6566
struct machservice *ms;
6567
mach_port_t p = MACH_PORT_NULL;
6568
6569
if (unlikely(ms = jobmgr_lookup_service(j->mgr, key, false, 0))) {
6570
job_log(j, LOG_WARNING, "Conflict with job: %s over Mach service: %s", ms->job->label, key);
6571
return;
6572
}
6573
6574
if (!job_assumes(j, (ms = machservice_new(j, key, &p, false)) != NULL)) {
6575
return;
6576
}
6577
6578
ms->isActive = false;
6579
ms->upfront = true;
6580
6581
if (launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY) {
6582
launch_data_dict_iterate(obj, machservice_setup_options, ms);
6583
}
6584
6585
kern_return_t kr = mach_port_set_attributes(mach_task_self(), ms->port, MACH_PORT_TEMPOWNER, NULL, 0);
6586
(void)job_assumes_zero(j, kr);
6587
}
6588
6589
jobmgr_t
6590
jobmgr_do_garbage_collection(jobmgr_t jm)
6591
{
6592
jobmgr_t jmi = NULL, jmn = NULL;
6593
SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
6594
jobmgr_do_garbage_collection(jmi);
6595
}
6596
6597
if (!jm->shutting_down) {
6598
return jm;
6599
}
6600
6601
if (SLIST_EMPTY(&jm->submgrs)) {
6602
jobmgr_log(jm, LOG_DEBUG, "No submanagers left.");
6603
} else {
6604
jobmgr_log(jm, LOG_DEBUG, "Still have submanagers.");
6605
SLIST_FOREACH(jmi, &jm->submgrs, sle) {
6606
jobmgr_log(jm, LOG_DEBUG, "Submanager: %s", jmi->name);
6607
}
6608
}
6609
6610
size_t actives = 0;
6611
job_t ji = NULL, jn = NULL;
6612
LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
6613
if (ji->anonymous) {
6614
continue;
6615
}
6616
6617
// Let the shutdown monitor be up until the very end.
6618
if (ji->shutdown_monitor) {
6619
continue;
6620
}
6621
6622
/* On our first pass through, open a transaction for all the jobs that
6623
* need to be dirty at shutdown. We'll close these transactions once the
6624
* jobs that do not need to be dirty at shutdown have all exited.
6625
*/
6626
if (ji->dirty_at_shutdown && !jm->shutdown_jobs_dirtied) {
6627
job_open_shutdown_transaction(ji);
6628
}
6629
6630
const char *active = job_active(ji);
6631
if (!active) {
6632
job_remove(ji);
6633
} else {
6634
job_log(ji, LOG_DEBUG, "Job is active: %s", active);
6635
job_stop(ji);
6636
6637
if (!ji->dirty_at_shutdown) {
6638
actives++;
6639
}
6640
6641
if (ji->clean_kill) {
6642
job_log(ji, LOG_DEBUG, "Job was killed cleanly.");
6643
} else {
6644
job_log(ji, LOG_DEBUG, "Job was sent SIGTERM%s.", ji->sent_sigkill ? " and SIGKILL" : "");
6645
}
6646
}
6647
}
6648
6649
jm->shutdown_jobs_dirtied = true;
6650
if (actives == 0) {
6651
if (!jm->shutdown_jobs_cleaned) {
6652
/* Once all normal jobs have exited, we clean the dirty-at-shutdown
6653
* jobs and make them into normal jobs so that the above loop will
6654
* handle them appropriately.
6655
*/
6656
LIST_FOREACH(ji, &jm->jobs, sle) {
6657
if (ji->anonymous) {
6658
continue;
6659
}
6660
6661
if (!job_active(ji)) {
6662
continue;
6663
}
6664
6665
if (ji->shutdown_monitor) {
6666
continue;
6667
}
6668
6669
job_close_shutdown_transaction(ji);
6670
actives++;
6671
}
6672
6673
jm->shutdown_jobs_cleaned = true;
6674
}
6675
6676
if (SLIST_EMPTY(&jm->submgrs) && actives == 0) {
6677
/* We may be in a situation where the shutdown monitor is all that's
6678
* left, in which case we want to stop it. Like dirty-at-shutdown
6679
* jobs, we turn it back into a normal job so that the main loop
6680
* treats it appropriately.
6681
*
6682
* See:
6683
* <rdar://problem/10756306>
6684
* <rdar://problem/11034971>
6685
* <rdar://problem/11549541>
6686
*/
6687
if (jm->monitor_shutdown && _launchd_shutdown_monitor) {
6688
/* The rest of shutdown has completed, so we can kill the shutdown
6689
* monitor now like it was any other job.
6690
*/
6691
_launchd_shutdown_monitor->shutdown_monitor = false;
6692
6693
job_log(_launchd_shutdown_monitor, LOG_NOTICE | LOG_CONSOLE, "Stopping shutdown monitor.");
6694
job_stop(_launchd_shutdown_monitor);
6695
_launchd_shutdown_monitor = NULL;
6696
} else {
6697
jobmgr_log(jm, LOG_DEBUG, "Removing.");
6698
jobmgr_remove(jm);
6699
return NULL;
6700
}
6701
}
6702
}
6703
6704
return jm;
6705
}
6706
6707
void
6708
jobmgr_kill_stray_children(jobmgr_t jm, pid_t *p, size_t np)
6709
{
6710
/* I maintain that stray processes should be at the mercy of launchd during
6711
* shutdown, but nevertheless, things like diskimages-helper can stick
6712
* around, and SIGKILLing them can result in data loss. So we send SIGTERM
6713
* to all the strays and don't wait for them to exit before moving on.
6714
*
6715
* See rdar://problem/6562592
6716
*/
6717
size_t i = 0;
6718
for (i = 0; i < np; i++) {
6719
if (p[i] != 0) {
6720
jobmgr_log(jm, LOG_DEBUG | LOG_CONSOLE, "Sending SIGTERM to PID %u and continuing...", p[i]);
6721
(void)jobmgr_assumes_zero_p(jm, kill2(p[i], SIGTERM));
6722
}
6723
}
6724
}
6725
6726
void
6727
jobmgr_log_stray_children(jobmgr_t jm, bool kill_strays)
6728
{
6729
size_t kp_skipped = 0, len = sizeof(pid_t) * get_kern_max_proc();
6730
pid_t *pids = NULL;
6731
int i = 0, kp_cnt = 0;
6732
6733
if (likely(jm->parentmgr || !pid1_magic)) {
6734
return;
6735
}
6736
6737
if (!jobmgr_assumes(jm, (pids = malloc(len)) != NULL)) {
6738
return;
6739
}
6740
6741
runtime_ktrace0(RTKT_LAUNCHD_FINDING_ALL_STRAYS);
6742
6743
if (jobmgr_assumes_zero_p(jm, (kp_cnt = proc_listallpids(pids, len))) == -1) {
6744
goto out;
6745
}
6746
6747
pid_t *ps = (pid_t *)calloc(sizeof(pid_t), kp_cnt);
6748
for (i = 0; i < kp_cnt; i++) {
6749
struct proc_bsdshortinfo proc;
6750
if (proc_pidinfo(pids[i], PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
6751
if (errno != ESRCH) {
6752
(void)jobmgr_assumes_zero(jm, errno);
6753
}
6754
6755
kp_skipped++;
6756
continue;
6757
}
6758
6759
pid_t p_i = pids[i];
6760
pid_t pp_i = proc.pbsi_ppid;
6761
pid_t pg_i = proc.pbsi_pgid;
6762
const char *z = (proc.pbsi_status == SZOMB) ? "zombie " : "";
6763
const char *n = proc.pbsi_comm;
6764
6765
if (unlikely(p_i == 0 || p_i == 1)) {
6766
kp_skipped++;
6767
continue;
6768
}
6769
6770
if (_launchd_shutdown_monitor && pp_i == _launchd_shutdown_monitor->p) {
6771
kp_skipped++;
6772
continue;
6773
}
6774
6775
// We might have some jobs hanging around that we've decided to shut down in spite of.
6776
job_t j = jobmgr_find_by_pid(jm, p_i, false);
6777
if (!j || (j && j->anonymous)) {
6778
jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Stray %s%s at shutdown: PID %u PPID %u PGID %u %s", z, j ? "anonymous job" : "process", p_i, pp_i, pg_i, n);
6779
6780
int status = 0;
6781
if (pp_i == getpid() && !jobmgr_assumes(jm, proc.pbsi_status != SZOMB)) {
6782
if (jobmgr_assumes_zero(jm, waitpid(p_i, &status, WNOHANG)) == 0) {
6783
jobmgr_log(jm, LOG_INFO | LOG_CONSOLE, "Unreaped zombie stray exited with status %i.", WEXITSTATUS(status));
6784
}
6785
kp_skipped++;
6786
} else {
6787
job_t leader = jobmgr_find_by_pid(jm, pg_i, false);
6788
/* See rdar://problem/6745714. Some jobs have child processes that back kernel state,
6789
* so we don't want to terminate them. Long-term, I'd really like to provide shutdown
6790
* hints to the kernel along the way, so that it could shutdown certain subsystems when
6791
* their userspace emissaries go away, before the call to reboot(2).
6792
*/
6793
if (leader && leader->ignore_pg_at_shutdown) {
6794
kp_skipped++;
6795
} else {
6796
ps[i] = p_i;
6797
}
6798
}
6799
} else {
6800
kp_skipped++;
6801
}
6802
}
6803
6804
if ((kp_cnt - kp_skipped > 0) && kill_strays) {
6805
jobmgr_kill_stray_children(jm, ps, kp_cnt - kp_skipped);
6806
}
6807
6808
free(ps);
6809
out:
6810
free(pids);
6811
}
6812
6813
jobmgr_t
6814
jobmgr_parent(jobmgr_t jm)
6815
{
6816
return jm->parentmgr;
6817
}
6818
6819
void
6820
job_uncork_fork(job_t j)
6821
{
6822
pid_t c = j->p;
6823
6824
job_log(j, LOG_DEBUG, "Uncorking the fork().");
6825
/* this unblocks the child and avoids a race
6826
* between the above fork() and the kevent_mod() */
6827
(void)job_assumes(j, write(j->fork_fd, &c, sizeof(c)) == sizeof(c));
6828
(void)job_assumes_zero_p(j, runtime_close(j->fork_fd));
6829
j->fork_fd = 0;
6830
}
6831
6832
jobmgr_t
6833
jobmgr_new(jobmgr_t jm, mach_port_t requestorport, mach_port_t transfer_port, bool sflag, const char *name, bool skip_init, mach_port_t asport)
6834
{
6835
job_t bootstrapper = NULL;
6836
jobmgr_t jmr;
6837
6838
__OS_COMPILETIME_ASSERT__(offsetof(struct jobmgr_s, kqjobmgr_callback) == 0);
6839
6840
if (unlikely(jm && requestorport == MACH_PORT_NULL)) {
6841
jobmgr_log(jm, LOG_ERR, "Mach sub-bootstrap create request requires a requester port");
6842
return NULL;
6843
}
6844
6845
jmr = calloc(1, sizeof(struct jobmgr_s) + (name ? (strlen(name) + 1) : NAME_MAX + 1));
6846
6847
if (!jobmgr_assumes(jm, jmr != NULL)) {
6848
return NULL;
6849
}
6850
6851
if (jm == NULL) {
6852
root_jobmgr = jmr;
6853
}
6854
6855
jmr->kqjobmgr_callback = jobmgr_callback;
6856
strcpy(jmr->name_init, name ? name : "Under construction");
6857
6858
jmr->req_port = requestorport;
6859
6860
if ((jmr->parentmgr = jm)) {
6861
SLIST_INSERT_HEAD(&jm->submgrs, jmr, sle);
6862
}
6863
6864
if (jm && jobmgr_assumes_zero(jmr, launchd_mport_notify_req(jmr->req_port, MACH_NOTIFY_DEAD_NAME)) != KERN_SUCCESS) {
6865
goto out_bad;
6866
}
6867
6868
if (transfer_port != MACH_PORT_NULL) {
6869
(void)jobmgr_assumes(jmr, jm != NULL);
6870
jmr->jm_port = transfer_port;
6871
} else if (!jm && !pid1_magic) {
6872
char *trusted_fd = getenv(LAUNCHD_TRUSTED_FD_ENV);
6873
name_t service_buf;
6874
6875
snprintf(service_buf, sizeof(service_buf), "com.apple.launchd.peruser.%u", getuid());
6876
6877
if (jobmgr_assumes_zero(jmr, bootstrap_check_in(bootstrap_port, service_buf, &jmr->jm_port)) != 0) {
6878
goto out_bad;
6879
}
6880
6881
if (trusted_fd) {
6882
int dfd, lfd = (int) strtol(trusted_fd, NULL, 10);
6883
6884
if ((dfd = dup(lfd)) >= 0) {
6885
(void)jobmgr_assumes_zero_p(jmr, runtime_close(dfd));
6886
(void)jobmgr_assumes_zero_p(jmr, runtime_close(lfd));
6887
}
6888
6889
unsetenv(LAUNCHD_TRUSTED_FD_ENV);
6890
}
6891
6892
// cut off the Libc cache, we don't want to deadlock against ourself
6893
inherited_bootstrap_port = bootstrap_port;
6894
bootstrap_port = MACH_PORT_NULL;
6895
os_assert_zero(launchd_mport_notify_req(inherited_bootstrap_port, MACH_NOTIFY_DEAD_NAME));
6896
6897
// We set this explicitly as we start each child
6898
os_assert_zero(launchd_set_bport(MACH_PORT_NULL));
6899
} else if (jobmgr_assumes_zero(jmr, launchd_mport_create_recv(&jmr->jm_port)) != KERN_SUCCESS) {
6900
goto out_bad;
6901
}
6902
6903
if (!name) {
6904
sprintf(jmr->name_init, "%u", MACH_PORT_INDEX(jmr->jm_port));
6905
}
6906
6907
if (!jm) {
6908
(void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
6909
(void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGUSR1, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
6910
(void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGUSR2, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
6911
(void)jobmgr_assumes_zero_p(jmr, kevent_mod(SIGINFO, EVFILT_SIGNAL, EV_ADD, 0, 0, jmr));
6912
(void)jobmgr_assumes_zero_p(jmr, kevent_mod(0, EVFILT_FS, EV_ADD, VQ_MOUNT|VQ_UNMOUNT|VQ_UPDATE, 0, jmr));
6913
}
6914
6915
if (name && !skip_init) {
6916
bootstrapper = jobmgr_init_session(jmr, name, sflag);
6917
}
6918
6919
if (!bootstrapper || !bootstrapper->weird_bootstrap) {
6920
if (jobmgr_assumes_zero(jmr, runtime_add_mport(jmr->jm_port, job_server)) != KERN_SUCCESS) {
6921
goto out_bad;
6922
}
6923
}
6924
6925
jobmgr_log(jmr, LOG_DEBUG, "Created job manager%s%s", jm ? " with parent: " : ".", jm ? jm->name : "");
6926
6927
if (bootstrapper) {
6928
bootstrapper->asport = asport;
6929
6930
jobmgr_log(jmr, LOG_DEBUG, "Bootstrapping new job manager with audit session %u", asport);
6931
(void)jobmgr_assumes(jmr, job_dispatch(bootstrapper, true) != NULL);
6932
} else {
6933
jmr->req_asport = asport;
6934
}
6935
6936
if (asport != MACH_PORT_NULL) {
6937
(void)jobmgr_assumes_zero(jmr, launchd_mport_copy_send(asport));
6938
}
6939
6940
if (jmr->parentmgr) {
6941
runtime_add_weak_ref();
6942
}
6943
6944
return jmr;
6945
6946
out_bad:
6947
if (jmr) {
6948
jobmgr_remove(jmr);
6949
if (jm == NULL) {
6950
root_jobmgr = NULL;
6951
}
6952
}
6953
return NULL;
6954
}
6955
6956
jobmgr_t
6957
jobmgr_new_xpc_singleton_domain(jobmgr_t jm, name_t name)
6958
{
6959
jobmgr_t new = NULL;
6960
6961
/* These job managers are basically singletons, so we use the root Mach
6962
* bootstrap port as their requestor ports so they'll never go away.
6963
*/
6964
mach_port_t req_port = root_jobmgr->jm_port;
6965
if (jobmgr_assumes_zero(jm, launchd_mport_make_send(req_port)) == KERN_SUCCESS) {
6966
new = jobmgr_new(root_jobmgr, req_port, MACH_PORT_NULL, false, name, true, MACH_PORT_NULL);
6967
if (new) {
6968
new->properties |= BOOTSTRAP_PROPERTY_XPC_SINGLETON;
6969
new->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
6970
new->xpc_singleton = true;
6971
}
6972
}
6973
6974
return new;
6975
}
6976
6977
jobmgr_t
6978
jobmgr_find_xpc_per_user_domain(jobmgr_t jm, uid_t uid)
6979
{
6980
jobmgr_t jmi = NULL;
6981
LIST_FOREACH(jmi, &_s_xpc_user_domains, xpc_le) {
6982
if (jmi->req_euid == uid) {
6983
return jmi;
6984
}
6985
}
6986
6987
name_t name;
6988
(void)snprintf(name, sizeof(name), "com.apple.xpc.domain.peruser.%u", uid);
6989
jmi = jobmgr_new_xpc_singleton_domain(jm, name);
6990
if (jobmgr_assumes(jm, jmi != NULL)) {
6991
/* We need to create a per-user launchd for this UID if there isn't one
6992
* already so we can grab the bootstrap port.
6993
*/
6994
job_t puj = jobmgr_lookup_per_user_context_internal(NULL, uid, &jmi->req_bsport);
6995
if (jobmgr_assumes(jmi, puj != NULL)) {
6996
(void)jobmgr_assumes_zero(jmi, launchd_mport_copy_send(puj->asport));
6997
(void)jobmgr_assumes_zero(jmi, launchd_mport_copy_send(jmi->req_bsport));
6998
jmi->shortdesc = "per-user";
6999
jmi->req_asport = puj->asport;
7000
jmi->req_asid = puj->asid;
7001
jmi->req_euid = uid;
7002
jmi->req_egid = -1;
7003
7004
LIST_INSERT_HEAD(&_s_xpc_user_domains, jmi, xpc_le);
7005
} else {
7006
jobmgr_remove(jmi);
7007
}
7008
}
7009
7010
return jmi;
7011
}
7012
7013
jobmgr_t
7014
jobmgr_find_xpc_per_session_domain(jobmgr_t jm, au_asid_t asid)
7015
{
7016
jobmgr_t jmi = NULL;
7017
LIST_FOREACH(jmi, &_s_xpc_session_domains, xpc_le) {
7018
if (jmi->req_asid == asid) {
7019
return jmi;
7020
}
7021
}
7022
7023
name_t name;
7024
(void)snprintf(name, sizeof(name), "com.apple.xpc.domain.persession.%i", asid);
7025
jmi = jobmgr_new_xpc_singleton_domain(jm, name);
7026
if (jobmgr_assumes(jm, jmi != NULL)) {
7027
(void)jobmgr_assumes_zero(jmi, launchd_mport_make_send(root_jobmgr->jm_port));
7028
jmi->shortdesc = "per-session";
7029
jmi->req_bsport = root_jobmgr->jm_port;
7030
(void)jobmgr_assumes_zero(jmi, audit_session_port(asid, &jmi->req_asport));
7031
jmi->req_asid = asid;
7032
jmi->req_euid = -1;
7033
jmi->req_egid = -1;
7034
7035
LIST_INSERT_HEAD(&_s_xpc_session_domains, jmi, xpc_le);
7036
} else {
7037
jobmgr_remove(jmi);
7038
}
7039
7040
return jmi;
7041
}
7042
7043
job_t
7044
jobmgr_init_session(jobmgr_t jm, const char *session_type, bool sflag)
7045
{
7046
const char *bootstrap_tool[] = { "/bin/launchctl", "bootstrap", "-S", session_type, sflag ? "-s" : NULL, NULL };
7047
char thelabel[1000];
7048
job_t bootstrapper;
7049
7050
snprintf(thelabel, sizeof(thelabel), "com.apple.launchctl.%s", session_type);
7051
bootstrapper = job_new(jm, thelabel, NULL, bootstrap_tool);
7052
7053
if (jobmgr_assumes(jm, bootstrapper != NULL) && (jm->parentmgr || !pid1_magic)) {
7054
bootstrapper->is_bootstrapper = true;
7055
char buf[100];
7056
7057
// <rdar://problem/5042202> launchd-201: can't ssh in with AFP OD account (hangs)
7058
snprintf(buf, sizeof(buf), "0x%X:0:0", getuid());
7059
envitem_new(bootstrapper, "__CF_USER_TEXT_ENCODING", buf, false);
7060
bootstrapper->weird_bootstrap = true;
7061
(void)jobmgr_assumes(jm, job_setup_machport(bootstrapper));
7062
} else if (bootstrapper && strncmp(session_type, VPROCMGR_SESSION_SYSTEM, sizeof(VPROCMGR_SESSION_SYSTEM)) == 0) {
7063
#if TARGET_OS_EMBEDDED
7064
bootstrapper->psproctype = POSIX_SPAWN_PROC_TYPE_DAEMON_INTERACTIVE;
7065
#endif
7066
bootstrapper->is_bootstrapper = true;
7067
if (jobmgr_assumes(jm, pid1_magic)) {
7068
// Have our system bootstrapper print out to the console.
7069
bootstrapper->stdoutpath = strdup(_PATH_CONSOLE);
7070
bootstrapper->stderrpath = strdup(_PATH_CONSOLE);
7071
7072
if (launchd_console) {
7073
(void)jobmgr_assumes_zero_p(jm, kevent_mod((uintptr_t)fileno(launchd_console), EVFILT_VNODE, EV_ADD | EV_ONESHOT, NOTE_REVOKE, 0, jm));
7074
}
7075
}
7076
}
7077
7078
jm->session_initialized = true;
7079
return bootstrapper;
7080
}
7081
7082
jobmgr_t
7083
jobmgr_delete_anything_with_port(jobmgr_t jm, mach_port_t port)
7084
{
7085
struct machservice *ms, *next_ms;
7086
jobmgr_t jmi, jmn;
7087
7088
/* Mach ports, unlike Unix descriptors, are reference counted. In other
7089
* words, when some program hands us a second or subsequent send right to a
7090
* port we already have open, the Mach kernel gives us the same port number
7091
* back and increments an reference count associated with the port. This
7092
* This forces us, when discovering that a receive right at the other end
7093
* has been deleted, to wander all of our objects to see what weird places
7094
* clients might have handed us the same send right to use.
7095
*/
7096
7097
if (jm == root_jobmgr) {
7098
if (port == inherited_bootstrap_port) {
7099
(void)jobmgr_assumes_zero(jm, launchd_mport_deallocate(port));
7100
inherited_bootstrap_port = MACH_PORT_NULL;
7101
7102
return jobmgr_shutdown(jm);
7103
}
7104
7105
LIST_FOREACH_SAFE(ms, &port_hash[HASH_PORT(port)], port_hash_sle, next_ms) {
7106
if (ms->port == port && !ms->recv) {
7107
machservice_delete(ms->job, ms, true);
7108
}
7109
}
7110
}
7111
7112
SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
7113
jobmgr_delete_anything_with_port(jmi, port);
7114
}
7115
7116
if (jm->req_port == port) {
7117
jobmgr_log(jm, LOG_DEBUG, "Request port died: %i", MACH_PORT_INDEX(port));
7118
return jobmgr_shutdown(jm);
7119
}
7120
7121
struct waiting4attach *w4ai = NULL;
7122
struct waiting4attach *w4ait = NULL;
7123
LIST_FOREACH_SAFE(w4ai, &jm->attaches, le, w4ait) {
7124
if (port == w4ai->port) {
7125
waiting4attach_delete(jm, w4ai);
7126
break;
7127
}
7128
}
7129
7130
return jm;
7131
}
7132
7133
struct machservice *
7134
jobmgr_lookup_service(jobmgr_t jm, const char *name, bool check_parent, pid_t target_pid)
7135
{
7136
struct machservice *ms;
7137
job_t target_j;
7138
7139
jobmgr_log(jm, LOG_DEBUG, "Looking up %sservice %s", target_pid ? "per-PID " : "", name);
7140
7141
if (target_pid) {
7142
/* This is a hack to let FileSyncAgent look up per-PID Mach services from the Background
7143
* bootstrap in other bootstraps.
7144
*/
7145
7146
// Start in the given bootstrap.
7147
if (unlikely((target_j = jobmgr_find_by_pid(jm, target_pid, false)) == NULL)) {
7148
// If we fail, do a deep traversal.
7149
if (unlikely((target_j = jobmgr_find_by_pid_deep(root_jobmgr, target_pid, true)) == NULL)) {
7150
jobmgr_log(jm, LOG_DEBUG, "Didn't find PID %i", target_pid);
7151
return NULL;
7152
}
7153
}
7154
7155
SLIST_FOREACH(ms, &target_j->machservices, sle) {
7156
if (ms->per_pid && strcmp(name, ms->name) == 0) {
7157
return ms;
7158
}
7159
}
7160
7161
job_log(target_j, LOG_DEBUG, "Didn't find per-PID Mach service: %s", name);
7162
return NULL;
7163
}
7164
7165
jobmgr_t where2look = jm;
7166
// XPC domains are separate from Mach bootstraps.
7167
if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
7168
if (launchd_flat_mach_namespace && !(jm->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET)) {
7169
where2look = root_jobmgr;
7170
}
7171
}
7172
7173
LIST_FOREACH(ms, &where2look->ms_hash[hash_ms(name)], name_hash_sle) {
7174
if (!ms->per_pid && strcmp(name, ms->name) == 0) {
7175
return ms;
7176
}
7177
}
7178
7179
if (jm->parentmgr == NULL || !check_parent) {
7180
return NULL;
7181
}
7182
7183
return jobmgr_lookup_service(jm->parentmgr, name, true, 0);
7184
}
7185
7186
mach_port_t
7187
machservice_port(struct machservice *ms)
7188
{
7189
return ms->port;
7190
}
7191
7192
job_t
7193
machservice_job(struct machservice *ms)
7194
{
7195
return ms->job;
7196
}
7197
7198
bool
7199
machservice_hidden(struct machservice *ms)
7200
{
7201
return ms->hide;
7202
}
7203
7204
bool
7205
machservice_active(struct machservice *ms)
7206
{
7207
return ms->isActive;
7208
}
7209
7210
const char *
7211
machservice_name(struct machservice *ms)
7212
{
7213
return ms->name;
7214
}
7215
7216
void
7217
machservice_drain_port(struct machservice *ms)
7218
{
7219
bool drain_one = ms->drain_one_on_crash;
7220
bool drain_all = ms->drain_all_on_crash;
7221
7222
if (!job_assumes(ms->job, (drain_one || drain_all) == true)) {
7223
return;
7224
}
7225
7226
job_log(ms->job, LOG_INFO, "Draining %s...", ms->name);
7227
7228
char req_buff[sizeof(union __RequestUnion__catch_mach_exc_subsystem) * 2];
7229
char rep_buff[sizeof(union __ReplyUnion__catch_mach_exc_subsystem)];
7230
mig_reply_error_t *req_hdr = (mig_reply_error_t *)&req_buff;
7231
mig_reply_error_t *rep_hdr = (mig_reply_error_t *)&rep_buff;
7232
7233
mach_msg_return_t mr = ~MACH_MSG_SUCCESS;
7234
7235
do {
7236
/* This should be a direct check on the Mach service to see if it's an exception-handling
7237
* port, and it will break things if ReportCrash or SafetyNet start advertising other
7238
* Mach services. But for now, it should be okay.
7239
*/
7240
if (ms->job->alt_exc_handler || ms->job->internal_exc_handler) {
7241
mr = launchd_exc_runtime_once(ms->port, sizeof(req_buff), sizeof(rep_buff), req_hdr, rep_hdr, 0);
7242
} else {
7243
mach_msg_options_t options = MACH_RCV_MSG |
7244
MACH_RCV_TIMEOUT ;
7245
7246
mr = mach_msg((mach_msg_header_t *)req_hdr, options, 0, sizeof(req_buff), ms->port, 0, MACH_PORT_NULL);
7247
switch (mr) {
7248
case MACH_MSG_SUCCESS:
7249
mach_msg_destroy((mach_msg_header_t *)req_hdr);
7250
break;
7251
case MACH_RCV_TIMED_OUT:
7252
break;
7253
case MACH_RCV_TOO_LARGE:
7254
launchd_syslog(LOG_WARNING, "Tried to receive message that was larger than %lu bytes", sizeof(req_buff));
7255
break;
7256
default:
7257
break;
7258
}
7259
}
7260
} while (drain_all && mr != MACH_RCV_TIMED_OUT);
7261
}
7262
7263
void
7264
machservice_delete(job_t j, struct machservice *ms, bool port_died)
7265
{
7266
if (ms->alias) {
7267
/* HACK: Egregious code duplication. But dealing with aliases is a
7268
* pretty simple affair since they can't and shouldn't have any complex
7269
* behaviors associated with them.
7270
*/
7271
LIST_REMOVE(ms, name_hash_sle);
7272
SLIST_REMOVE(&j->machservices, ms, machservice, sle);
7273
free(ms);
7274
return;
7275
}
7276
7277
if (unlikely(ms->debug_on_close)) {
7278
job_log(j, LOG_NOTICE, "About to enter kernel debugger because of Mach port: 0x%x", ms->port);
7279
(void)job_assumes_zero(j, host_reboot(mach_host_self(), HOST_REBOOT_DEBUGGER));
7280
}
7281
7282
if (ms->recv && job_assumes(j, !machservice_active(ms))) {
7283
job_log(j, LOG_DEBUG, "Closing receive right for %s", ms->name);
7284
(void)job_assumes_zero(j, launchd_mport_close_recv(ms->port));
7285
}
7286
7287
(void)job_assumes_zero(j, launchd_mport_deallocate(ms->port));
7288
7289
if (unlikely(ms->port == the_exception_server)) {
7290
the_exception_server = 0;
7291
}
7292
7293
job_log(j, LOG_DEBUG, "Mach service deleted%s: %s", port_died ? " (port died)" : "", ms->name);
7294
7295
if (ms->special_port_num) {
7296
SLIST_REMOVE(&special_ports, ms, machservice, special_port_sle);
7297
}
7298
SLIST_REMOVE(&j->machservices, ms, machservice, sle);
7299
7300
if (!(j->dedicated_instance || ms->event_channel)) {
7301
LIST_REMOVE(ms, name_hash_sle);
7302
}
7303
LIST_REMOVE(ms, port_hash_sle);
7304
7305
free(ms);
7306
}
7307
7308
void
7309
machservice_request_notifications(struct machservice *ms)
7310
{
7311
mach_msg_id_t which = MACH_NOTIFY_DEAD_NAME;
7312
7313
ms->isActive = true;
7314
7315
if (ms->recv) {
7316
which = MACH_NOTIFY_PORT_DESTROYED;
7317
job_checkin(ms->job);
7318
}
7319
7320
(void)job_assumes_zero(ms->job, launchd_mport_notify_req(ms->port, which));
7321
}
7322
7323
#define NELEM(x) (sizeof(x)/sizeof(x[0]))
7324
#define END_OF(x) (&(x)[NELEM(x)])
7325
7326
char **
7327
mach_cmd2argv(const char *string)
7328
{
7329
char *argv[100], args[1000];
7330
const char *cp;
7331
char *argp = args, term, **argv_ret, *co;
7332
unsigned int nargs = 0, i;
7333
7334
for (cp = string; *cp;) {
7335
while (isspace(*cp))
7336
cp++;
7337
term = (*cp == '"') ? *cp++ : '\0';
7338
if (nargs < NELEM(argv)) {
7339
argv[nargs++] = argp;
7340
}
7341
while (*cp && (term ? *cp != term : !isspace(*cp)) && argp < END_OF(args)) {
7342
if (*cp == '\\') {
7343
cp++;
7344
}
7345
*argp++ = *cp;
7346
if (*cp) {
7347
cp++;
7348
}
7349
}
7350
*argp++ = '\0';
7351
}
7352
argv[nargs] = NULL;
7353
7354
if (nargs == 0) {
7355
return NULL;
7356
}
7357
7358
argv_ret = malloc((nargs + 1) * sizeof(char *) + strlen(string) + 1);
7359
7360
if (!argv_ret) {
7361
(void)os_assumes_zero(errno);
7362
return NULL;
7363
}
7364
7365
co = (char *)argv_ret + (nargs + 1) * sizeof(char *);
7366
7367
for (i = 0; i < nargs; i++) {
7368
strcpy(co, argv[i]);
7369
argv_ret[i] = co;
7370
co += strlen(argv[i]) + 1;
7371
}
7372
argv_ret[i] = NULL;
7373
7374
return argv_ret;
7375
}
7376
7377
void
7378
job_checkin(job_t j)
7379
{
7380
j->checkedin = true;
7381
}
7382
7383
bool job_is_god(job_t j)
7384
{
7385
return j->embedded_god;
7386
}
7387
7388
bool
7389
job_ack_port_destruction(mach_port_t p)
7390
{
7391
struct machservice *ms;
7392
job_t j;
7393
7394
LIST_FOREACH(ms, &port_hash[HASH_PORT(p)], port_hash_sle) {
7395
if (ms->recv && (ms->port == p)) {
7396
break;
7397
}
7398
}
7399
7400
if (!ms) {
7401
launchd_syslog(LOG_WARNING, "Could not find MachService to match receive right: 0x%x", p);
7402
return false;
7403
}
7404
7405
j = ms->job;
7406
7407
jobmgr_log(root_jobmgr, LOG_DEBUG, "Receive right returned to us: %s", ms->name);
7408
7409
/* Without being the exception handler, NOTE_EXIT is our only way to tell if
7410
* the job crashed, and we can't rely on NOTE_EXIT always being processed
7411
* after all the job's receive rights have been returned.
7412
*
7413
* So when we get receive rights back, check to see if the job has been
7414
* reaped yet. If not, then we add this service to a list of services to be
7415
* drained on crash if it's requested that behavior. So, for a job with N
7416
* receive rights all requesting that they be drained on crash, we can
7417
* safely handle the following sequence of events.
7418
*
7419
* ReceiveRight0Returned
7420
* ReceiveRight1Returned
7421
* ReceiveRight2Returned
7422
* NOTE_EXIT (reap, get exit status)
7423
* ReceiveRight3Returned
7424
* .
7425
* .
7426
* .
7427
* ReceiveRight(N - 1)Returned
7428
*/
7429
if (ms->drain_one_on_crash || ms->drain_all_on_crash) {
7430
if (j->crashed && j->reaped) {
7431
job_log(j, LOG_DEBUG, "Job has crashed. Draining port...");
7432
machservice_drain_port(ms);
7433
} else if (!(j->crashed || j->reaped)) {
7434
job_log(j, LOG_DEBUG, "Job's exit status is still unknown. Deferring drain.");
7435
}
7436
}
7437
7438
ms->isActive = false;
7439
if (ms->delete_on_destruction) {
7440
machservice_delete(j, ms, false);
7441
} else if (ms->reset) {
7442
machservice_resetport(j, ms);
7443
}
7444
7445
kern_return_t kr = mach_port_set_attributes(mach_task_self(), ms->port, MACH_PORT_TEMPOWNER, NULL, 0);
7446
(void)job_assumes_zero(j, kr);
7447
machservice_stamp_port(j, ms);
7448
job_dispatch(j, false);
7449
7450
if (ms->recv_race_hack) {
7451
ms->recv_race_hack = false;
7452
machservice_watch(ms->job, ms);
7453
}
7454
7455
root_jobmgr = jobmgr_do_garbage_collection(root_jobmgr);
7456
7457
return true;
7458
}
7459
7460
void
7461
job_ack_no_senders(job_t j)
7462
{
7463
j->priv_port_has_senders = false;
7464
7465
(void)job_assumes_zero(j, launchd_mport_close_recv(j->j_port));
7466
j->j_port = 0;
7467
7468
job_log(j, LOG_DEBUG, "No more senders on privileged Mach bootstrap port");
7469
7470
job_dispatch(j, false);
7471
}
7472
7473
bool
7474
semaphoreitem_new(job_t j, semaphore_reason_t why, const char *what)
7475
{
7476
struct semaphoreitem *si;
7477
size_t alloc_sz = sizeof(struct semaphoreitem);
7478
7479
if (what) {
7480
alloc_sz += strlen(what) + 1;
7481
}
7482
7483
if (job_assumes(j, si = calloc(1, alloc_sz)) == NULL) {
7484
return false;
7485
}
7486
7487
si->why = why;
7488
7489
if (what) {
7490
strcpy(si->what_init, what);
7491
}
7492
7493
SLIST_INSERT_HEAD(&j->semaphores, si, sle);
7494
7495
if ((why == OTHER_JOB_ENABLED || why == OTHER_JOB_DISABLED) && !j->nosy) {
7496
job_log(j, LOG_DEBUG, "Job is interested in \"%s\".", what);
7497
SLIST_INSERT_HEAD(&s_curious_jobs, j, curious_jobs_sle);
7498
j->nosy = true;
7499
}
7500
7501
semaphoreitem_runtime_mod_ref(si, true);
7502
7503
return true;
7504
}
7505
7506
void
7507
semaphoreitem_runtime_mod_ref(struct semaphoreitem *si, bool add)
7508
{
7509
/*
7510
* External events need to be tracked.
7511
* Internal events do NOT need to be tracked.
7512
*/
7513
7514
switch (si->why) {
7515
case SUCCESSFUL_EXIT:
7516
case FAILED_EXIT:
7517
case OTHER_JOB_ENABLED:
7518
case OTHER_JOB_DISABLED:
7519
case OTHER_JOB_ACTIVE:
7520
case OTHER_JOB_INACTIVE:
7521
return;
7522
default:
7523
break;
7524
}
7525
7526
if (add) {
7527
runtime_add_weak_ref();
7528
} else {
7529
runtime_del_weak_ref();
7530
}
7531
}
7532
7533
void
7534
semaphoreitem_delete(job_t j, struct semaphoreitem *si)
7535
{
7536
semaphoreitem_runtime_mod_ref(si, false);
7537
7538
SLIST_REMOVE(&j->semaphores, si, semaphoreitem, sle);
7539
7540
// We'll need to rethink this if it ever becomes possible to dynamically add or remove semaphores.
7541
if ((si->why == OTHER_JOB_ENABLED || si->why == OTHER_JOB_DISABLED) && j->nosy) {
7542
j->nosy = false;
7543
SLIST_REMOVE(&s_curious_jobs, j, job_s, curious_jobs_sle);
7544
}
7545
7546
free(si);
7547
}
7548
7549
void
7550
semaphoreitem_setup_dict_iter(launch_data_t obj, const char *key, void *context)
7551
{
7552
struct semaphoreitem_dict_iter_context *sdic = context;
7553
semaphore_reason_t why;
7554
7555
why = launch_data_get_bool(obj) ? sdic->why_true : sdic->why_false;
7556
7557
semaphoreitem_new(sdic->j, why, key);
7558
}
7559
7560
void
7561
semaphoreitem_setup(launch_data_t obj, const char *key, void *context)
7562
{
7563
struct semaphoreitem_dict_iter_context sdic = { context, 0, 0 };
7564
job_t j = context;
7565
semaphore_reason_t why;
7566
7567
switch (launch_data_get_type(obj)) {
7568
case LAUNCH_DATA_BOOL:
7569
if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_NETWORKSTATE) == 0) {
7570
why = launch_data_get_bool(obj) ? NETWORK_UP : NETWORK_DOWN;
7571
semaphoreitem_new(j, why, NULL);
7572
} else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_SUCCESSFULEXIT) == 0) {
7573
why = launch_data_get_bool(obj) ? SUCCESSFUL_EXIT : FAILED_EXIT;
7574
semaphoreitem_new(j, why, NULL);
7575
j->start_pending = true;
7576
} else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_AFTERINITIALDEMAND) == 0) {
7577
j->needs_kickoff = launch_data_get_bool(obj);
7578
} else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_CRASHED) == 0) {
7579
why = launch_data_get_bool(obj) ? CRASHED : DID_NOT_CRASH;
7580
semaphoreitem_new(j, why, NULL);
7581
j->start_pending = true;
7582
} else {
7583
job_log(j, LOG_ERR, "Unrecognized KeepAlive attribute: %s", key);
7584
}
7585
break;
7586
case LAUNCH_DATA_DICTIONARY:
7587
if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBACTIVE) == 0) {
7588
sdic.why_true = OTHER_JOB_ACTIVE;
7589
sdic.why_false = OTHER_JOB_INACTIVE;
7590
} else if (strcasecmp(key, LAUNCH_JOBKEY_KEEPALIVE_OTHERJOBENABLED) == 0) {
7591
sdic.why_true = OTHER_JOB_ENABLED;
7592
sdic.why_false = OTHER_JOB_DISABLED;
7593
} else {
7594
job_log(j, LOG_ERR, "Unrecognized KeepAlive attribute: %s", key);
7595
break;
7596
}
7597
7598
launch_data_dict_iterate(obj, semaphoreitem_setup_dict_iter, &sdic);
7599
break;
7600
default:
7601
job_log(j, LOG_ERR, "Unrecognized KeepAlive type: %u", launch_data_get_type(obj));
7602
break;
7603
}
7604
}
7605
7606
bool
7607
externalevent_new(job_t j, struct eventsystem *sys, const char *evname, xpc_object_t event, uint64_t flags)
7608
{
7609
if (j->event_monitor) {
7610
job_log(j, LOG_ERR, "The event monitor job cannot use LaunchEvents or XPC Events.");
7611
return false;
7612
}
7613
7614
struct externalevent *ee = (struct externalevent *)calloc(1, sizeof(struct externalevent) + strlen(evname) + 1);
7615
if (!ee) {
7616
return false;
7617
}
7618
7619
ee->event = xpc_retain(event);
7620
(void)strcpy(ee->name, evname);
7621
ee->job = j;
7622
ee->id = sys->curid;
7623
ee->sys = sys;
7624
ee->state = false;
7625
ee->wanted_state = true;
7626
sys->curid++;
7627
7628
if (flags & XPC_EVENT_FLAG_ENTITLEMENTS) {
7629
struct ldcred *ldc = runtime_get_caller_creds();
7630
if (ldc) {
7631
ee->entitlements = xpc_copy_entitlements_for_pid(ldc->pid);
7632
}
7633
}
7634
7635
if (sys == _launchd_support_system) {
7636
ee->internal = true;
7637
}
7638
7639
LIST_INSERT_HEAD(&j->events, ee, job_le);
7640
LIST_INSERT_HEAD(&sys->events, ee, sys_le);
7641
7642
job_log(j, LOG_DEBUG, "New event: %s/%s", sys->name, evname);
7643
7644
eventsystem_ping();
7645
return true;
7646
}
7647
7648
void
7649
externalevent_delete(struct externalevent *ee)
7650
{
7651
xpc_release(ee->event);
7652
if (ee->entitlements) {
7653
xpc_release(ee->entitlements);
7654
}
7655
LIST_REMOVE(ee, job_le);
7656
LIST_REMOVE(ee, sys_le);
7657
7658
free(ee);
7659
7660
eventsystem_ping();
7661
}
7662
7663
void
7664
externalevent_setup(launch_data_t obj, const char *key, void *context)
7665
{
7666
/* This method can ONLY be called on the job_import() path, as it assumes
7667
* the input is a launch_data_t.
7668
*/
7669
struct externalevent_iter_ctx *ctx = (struct externalevent_iter_ctx *)context;
7670
7671
xpc_object_t xobj = ld2xpc(obj);
7672
if (xobj) {
7673
job_log(ctx->j, LOG_DEBUG, "Importing stream/event: %s/%s", ctx->sys->name, key);
7674
externalevent_new(ctx->j, ctx->sys, key, xobj, 0);
7675
xpc_release(xobj);
7676
} else {
7677
job_log(ctx->j, LOG_ERR, "Could not import event for job: %s", key);
7678
}
7679
}
7680
7681
struct externalevent *
7682
externalevent_find(const char *sysname, uint64_t id)
7683
{
7684
struct externalevent *ei = NULL;
7685
7686
struct eventsystem *es = eventsystem_find(sysname);
7687
if (es != NULL) {
7688
LIST_FOREACH(ei, &es->events, sys_le) {
7689
if (ei->id == id) {
7690
break;
7691
}
7692
}
7693
} else {
7694
launchd_syslog(LOG_ERR, "Could not find event system: %s", sysname);
7695
}
7696
7697
return ei;
7698
}
7699
7700
struct eventsystem *
7701
eventsystem_new(const char *name)
7702
{
7703
struct eventsystem *es = (struct eventsystem *)calloc(1, sizeof(struct eventsystem) + strlen(name) + 1);
7704
if (es != NULL) {
7705
es->curid = 1;
7706
(void)strcpy(es->name, name);
7707
LIST_INSERT_HEAD(&_s_event_systems, es, global_le);
7708
} else {
7709
(void)os_assumes_zero(errno);
7710
}
7711
7712
return es;
7713
}
7714
7715
void
7716
eventsystem_delete(struct eventsystem *es)
7717
{
7718
struct externalevent *ei = NULL;
7719
while ((ei = LIST_FIRST(&es->events))) {
7720
externalevent_delete(ei);
7721
}
7722
7723
LIST_REMOVE(es, global_le);
7724
7725
free(es);
7726
}
7727
7728
void
7729
eventsystem_setup(launch_data_t obj, const char *key, void *context)
7730
{
7731
job_t j = (job_t)context;
7732
if (!job_assumes(j, launch_data_get_type(obj) == LAUNCH_DATA_DICTIONARY)) {
7733
return;
7734
}
7735
7736
struct eventsystem *sys = eventsystem_find(key);
7737
if (unlikely(sys == NULL)) {
7738
sys = eventsystem_new(key);
7739
job_log(j, LOG_DEBUG, "New event system: %s", key);
7740
}
7741
7742
if (job_assumes(j, sys != NULL)) {
7743
struct externalevent_iter_ctx ctx = {
7744
.j = j,
7745
.sys = sys,
7746
};
7747
7748
job_log(j, LOG_DEBUG, "Importing events for stream: %s", key);
7749
launch_data_dict_iterate(obj, externalevent_setup, &ctx);
7750
}
7751
}
7752
7753
struct eventsystem *
7754
eventsystem_find(const char *name)
7755
{
7756
struct eventsystem *esi = NULL;
7757
LIST_FOREACH(esi, &_s_event_systems, global_le) {
7758
if (strcmp(name, esi->name) == 0) {
7759
break;
7760
}
7761
}
7762
7763
return esi;
7764
}
7765
7766
void
7767
eventsystem_ping(void)
7768
{
7769
if (!_launchd_event_monitor) {
7770
return;
7771
}
7772
7773
if (!_launchd_event_monitor->p) {
7774
(void)job_dispatch(_launchd_event_monitor, true);
7775
} else {
7776
if (_launchd_event_monitor->event_monitor_ready2signal) {
7777
(void)job_assumes_zero_p(_launchd_event_monitor, kill(_launchd_event_monitor->p, SIGUSR1));
7778
}
7779
}
7780
}
7781
7782
void
7783
jobmgr_dispatch_all_semaphores(jobmgr_t jm)
7784
{
7785
jobmgr_t jmi, jmn;
7786
job_t ji, jn;
7787
7788
7789
SLIST_FOREACH_SAFE(jmi, &jm->submgrs, sle, jmn) {
7790
jobmgr_dispatch_all_semaphores(jmi);
7791
}
7792
7793
LIST_FOREACH_SAFE(ji, &jm->jobs, sle, jn) {
7794
if (!SLIST_EMPTY(&ji->semaphores)) {
7795
job_dispatch(ji, false);
7796
}
7797
}
7798
}
7799
7800
time_t
7801
cronemu(int mon, int mday, int hour, int min)
7802
{
7803
struct tm workingtm;
7804
time_t now;
7805
7806
now = time(NULL);
7807
workingtm = *localtime(&now);
7808
7809
workingtm.tm_isdst = -1;
7810
workingtm.tm_sec = 0;
7811
workingtm.tm_min++;
7812
7813
while (!cronemu_mon(&workingtm, mon, mday, hour, min)) {
7814
workingtm.tm_year++;
7815
workingtm.tm_mon = 0;
7816
workingtm.tm_mday = 1;
7817
workingtm.tm_hour = 0;
7818
workingtm.tm_min = 0;
7819
mktime(&workingtm);
7820
}
7821
7822
return mktime(&workingtm);
7823
}
7824
7825
time_t
7826
cronemu_wday(int wday, int hour, int min)
7827
{
7828
struct tm workingtm;
7829
time_t now;
7830
7831
now = time(NULL);
7832
workingtm = *localtime(&now);
7833
7834
workingtm.tm_isdst = -1;
7835
workingtm.tm_sec = 0;
7836
workingtm.tm_min++;
7837
7838
if (wday == 7) {
7839
wday = 0;
7840
}
7841
7842
while (!(workingtm.tm_wday == wday && cronemu_hour(&workingtm, hour, min))) {
7843
workingtm.tm_mday++;
7844
workingtm.tm_hour = 0;
7845
workingtm.tm_min = 0;
7846
mktime(&workingtm);
7847
}
7848
7849
return mktime(&workingtm);
7850
}
7851
7852
bool
7853
cronemu_mon(struct tm *wtm, int mon, int mday, int hour, int min)
7854
{
7855
if (mon == -1) {
7856
struct tm workingtm = *wtm;
7857
int carrytest;
7858
7859
while (!cronemu_mday(&workingtm, mday, hour, min)) {
7860
workingtm.tm_mon++;
7861
workingtm.tm_mday = 1;
7862
workingtm.tm_hour = 0;
7863
workingtm.tm_min = 0;
7864
carrytest = workingtm.tm_mon;
7865
mktime(&workingtm);
7866
if (carrytest != workingtm.tm_mon) {
7867
return false;
7868
}
7869
}
7870
*wtm = workingtm;
7871
return true;
7872
}
7873
7874
if (mon < wtm->tm_mon) {
7875
return false;
7876
}
7877
7878
if (mon > wtm->tm_mon) {
7879
wtm->tm_mon = mon;
7880
wtm->tm_mday = 1;
7881
wtm->tm_hour = 0;
7882
wtm->tm_min = 0;
7883
}
7884
7885
return cronemu_mday(wtm, mday, hour, min);
7886
}
7887
7888
bool
7889
cronemu_mday(struct tm *wtm, int mday, int hour, int min)
7890
{
7891
if (mday == -1) {
7892
struct tm workingtm = *wtm;
7893
int carrytest;
7894
7895
while (!cronemu_hour(&workingtm, hour, min)) {
7896
workingtm.tm_mday++;
7897
workingtm.tm_hour = 0;
7898
workingtm.tm_min = 0;
7899
carrytest = workingtm.tm_mday;
7900
mktime(&workingtm);
7901
if (carrytest != workingtm.tm_mday) {
7902
return false;
7903
}
7904
}
7905
*wtm = workingtm;
7906
return true;
7907
}
7908
7909
if (mday < wtm->tm_mday) {
7910
return false;
7911
}
7912
7913
if (mday > wtm->tm_mday) {
7914
wtm->tm_mday = mday;
7915
wtm->tm_hour = 0;
7916
wtm->tm_min = 0;
7917
}
7918
7919
return cronemu_hour(wtm, hour, min);
7920
}
7921
7922
bool
7923
cronemu_hour(struct tm *wtm, int hour, int min)
7924
{
7925
if (hour == -1) {
7926
struct tm workingtm = *wtm;
7927
int carrytest;
7928
7929
while (!cronemu_min(&workingtm, min)) {
7930
workingtm.tm_hour++;
7931
workingtm.tm_min = 0;
7932
carrytest = workingtm.tm_hour;
7933
mktime(&workingtm);
7934
if (carrytest != workingtm.tm_hour) {
7935
return false;
7936
}
7937
}
7938
*wtm = workingtm;
7939
return true;
7940
}
7941
7942
if (hour < wtm->tm_hour) {
7943
return false;
7944
}
7945
7946
if (hour > wtm->tm_hour) {
7947
wtm->tm_hour = hour;
7948
wtm->tm_min = 0;
7949
}
7950
7951
return cronemu_min(wtm, min);
7952
}
7953
7954
bool
7955
cronemu_min(struct tm *wtm, int min)
7956
{
7957
if (min == -1) {
7958
return true;
7959
}
7960
7961
if (min < wtm->tm_min) {
7962
return false;
7963
}
7964
7965
if (min > wtm->tm_min) {
7966
wtm->tm_min = min;
7967
}
7968
7969
return true;
7970
}
7971
7972
kern_return_t
7973
job_mig_create_server(job_t j, cmd_t server_cmd, uid_t server_uid, boolean_t on_demand, mach_port_t *server_portp)
7974
{
7975
struct ldcred *ldc = runtime_get_caller_creds();
7976
job_t js;
7977
7978
if (!j) {
7979
return BOOTSTRAP_NO_MEMORY;
7980
}
7981
7982
if (unlikely(j->deny_job_creation)) {
7983
return BOOTSTRAP_NOT_PRIVILEGED;
7984
}
7985
7986
#if HAVE_SANDBOX
7987
const char **argv = (const char **)mach_cmd2argv(server_cmd);
7988
if (unlikely(argv == NULL)) {
7989
return BOOTSTRAP_NO_MEMORY;
7990
}
7991
if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_PATH, argv[0]) > 0)) {
7992
free(argv);
7993
return BOOTSTRAP_NOT_PRIVILEGED;
7994
}
7995
free(argv);
7996
#endif
7997
7998
job_log(j, LOG_DEBUG, "Server create attempt: %s", server_cmd);
7999
8000
if (pid1_magic) {
8001
if (ldc->euid || ldc->uid) {
8002
job_log(j, LOG_WARNING, "Server create attempt moved to per-user launchd: %s", server_cmd);
8003
return VPROC_ERR_TRY_PER_USER;
8004
}
8005
} else {
8006
if (unlikely(server_uid != getuid())) {
8007
job_log(j, LOG_WARNING, "Server create: \"%s\": As UID %d, we will not be able to switch to UID %d",
8008
server_cmd, getuid(), server_uid);
8009
}
8010
server_uid = 0; // zero means "do nothing"
8011
}
8012
8013
js = job_new_via_mach_init(j, server_cmd, server_uid, on_demand);
8014
8015
if (unlikely(js == NULL)) {
8016
return BOOTSTRAP_NO_MEMORY;
8017
}
8018
8019
*server_portp = js->j_port;
8020
return BOOTSTRAP_SUCCESS;
8021
}
8022
8023
kern_return_t
8024
job_mig_send_signal(job_t j, mach_port_t srp, name_t targetlabel, int sig)
8025
{
8026
struct ldcred *ldc = runtime_get_caller_creds();
8027
job_t otherj;
8028
8029
if (!j) {
8030
return BOOTSTRAP_NO_MEMORY;
8031
}
8032
8033
if (unlikely(ldc->euid != 0 && ldc->euid != getuid()) || j->deny_job_creation) {
8034
#if TARGET_OS_EMBEDDED
8035
if (!j->embedded_god) {
8036
return BOOTSTRAP_NOT_PRIVILEGED;
8037
}
8038
#else
8039
return BOOTSTRAP_NOT_PRIVILEGED;
8040
#endif
8041
}
8042
8043
#if HAVE_SANDBOX
8044
if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8045
return BOOTSTRAP_NOT_PRIVILEGED;
8046
}
8047
#endif
8048
8049
if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
8050
return BOOTSTRAP_UNKNOWN_SERVICE;
8051
}
8052
8053
#if TARGET_OS_EMBEDDED
8054
if (j->embedded_god) {
8055
if (j->username && otherj->username) {
8056
if (strcmp(j->username, otherj->username) != 0) {
8057
return BOOTSTRAP_NOT_PRIVILEGED;
8058
}
8059
} else {
8060
return BOOTSTRAP_NOT_PRIVILEGED;
8061
}
8062
}
8063
#endif
8064
8065
if (sig == VPROC_MAGIC_UNLOAD_SIGNAL) {
8066
bool do_block = otherj->p;
8067
8068
if (otherj->anonymous) {
8069
return BOOTSTRAP_NOT_PRIVILEGED;
8070
}
8071
8072
job_remove(otherj);
8073
8074
if (do_block) {
8075
job_log(j, LOG_DEBUG, "Blocking MIG return of job_remove(): %s", otherj->label);
8076
// this is messy. We shouldn't access 'otherj' after job_remove(), but we check otherj->p first...
8077
(void)job_assumes(otherj, waiting4removal_new(otherj, srp));
8078
return MIG_NO_REPLY;
8079
} else {
8080
return 0;
8081
}
8082
} else if (otherj->p) {
8083
(void)job_assumes_zero_p(j, kill2(otherj->p, sig));
8084
}
8085
8086
return 0;
8087
}
8088
8089
kern_return_t
8090
job_mig_log_forward(job_t j, vm_offset_t inval, mach_msg_type_number_t invalCnt)
8091
{
8092
struct ldcred *ldc = runtime_get_caller_creds();
8093
8094
if (!j) {
8095
return BOOTSTRAP_NO_MEMORY;
8096
}
8097
8098
if (!job_assumes(j, j->per_user)) {
8099
return BOOTSTRAP_NOT_PRIVILEGED;
8100
}
8101
8102
return launchd_log_forward(ldc->euid, ldc->egid, inval, invalCnt);
8103
}
8104
8105
kern_return_t
8106
job_mig_log_drain(job_t j, mach_port_t srp, vm_offset_t *outval, mach_msg_type_number_t *outvalCnt)
8107
{
8108
struct ldcred *ldc = runtime_get_caller_creds();
8109
8110
if (!j) {
8111
return BOOTSTRAP_NO_MEMORY;
8112
}
8113
8114
if (unlikely(ldc->euid)) {
8115
return BOOTSTRAP_NOT_PRIVILEGED;
8116
}
8117
8118
return launchd_log_drain(srp, outval, outvalCnt);
8119
}
8120
8121
kern_return_t
8122
job_mig_swap_complex(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey,
8123
vm_offset_t inval, mach_msg_type_number_t invalCnt, vm_offset_t *outval,
8124
mach_msg_type_number_t *outvalCnt)
8125
{
8126
const char *action;
8127
launch_data_t input_obj = NULL, output_obj = NULL;
8128
size_t data_offset = 0;
8129
size_t packed_size;
8130
struct ldcred *ldc = runtime_get_caller_creds();
8131
8132
if (!j) {
8133
return BOOTSTRAP_NO_MEMORY;
8134
}
8135
8136
if (inkey && ldc->pid != j->p) {
8137
if (ldc->euid && ldc->euid != getuid()) {
8138
return BOOTSTRAP_NOT_PRIVILEGED;
8139
}
8140
}
8141
8142
if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
8143
return 1;
8144
}
8145
8146
if (inkey && outkey) {
8147
action = "Swapping";
8148
} else if (inkey) {
8149
action = "Setting";
8150
} else {
8151
action = "Getting";
8152
}
8153
8154
job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
8155
8156
*outvalCnt = 20 * 1024 * 1024;
8157
mig_allocate(outval, *outvalCnt);
8158
if (!job_assumes(j, *outval != 0)) {
8159
return 1;
8160
}
8161
8162
/* Note to future maintainers: launch_data_unpack() does NOT return a heap
8163
* object. The data is decoded in-place. So do not call launch_data_free()
8164
* on input_obj.
8165
*/
8166
runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
8167
if (unlikely(invalCnt && !job_assumes(j, (input_obj = launch_data_unpack((void *)inval, invalCnt, NULL, 0, &data_offset, NULL)) != NULL))) {
8168
goto out_bad;
8169
}
8170
8171
char *store = NULL;
8172
switch (outkey) {
8173
case VPROC_GSK_ENVIRONMENT:
8174
if (!job_assumes(j, (output_obj = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
8175
goto out_bad;
8176
}
8177
jobmgr_export_env_from_other_jobs(j->mgr, output_obj);
8178
runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
8179
if (!job_assumes(j, launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL) != 0)) {
8180
goto out_bad;
8181
}
8182
launch_data_free(output_obj);
8183
break;
8184
case VPROC_GSK_ALLJOBS:
8185
if (!job_assumes(j, (output_obj = job_export_all()) != NULL)) {
8186
goto out_bad;
8187
}
8188
ipc_revoke_fds(output_obj);
8189
runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
8190
packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
8191
if (!job_assumes(j, packed_size != 0)) {
8192
goto out_bad;
8193
}
8194
launch_data_free(output_obj);
8195
break;
8196
case VPROC_GSK_MGR_NAME:
8197
if (!job_assumes(j, (output_obj = launch_data_new_string(j->mgr->name)) != NULL)) {
8198
goto out_bad;
8199
}
8200
packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
8201
if (!job_assumes(j, packed_size != 0)) {
8202
goto out_bad;
8203
}
8204
8205
launch_data_free(output_obj);
8206
break;
8207
case VPROC_GSK_JOB_OVERRIDES_DB:
8208
store = launchd_copy_persistent_store(LAUNCHD_PERSISTENT_STORE_DB, "overrides.plist");
8209
if (!store || !job_assumes(j, (output_obj = launch_data_new_string(store)) != NULL)) {
8210
free(store);
8211
goto out_bad;
8212
}
8213
8214
free(store);
8215
packed_size = launch_data_pack(output_obj, (void *)*outval, *outvalCnt, NULL, NULL);
8216
if (!job_assumes(j, packed_size != 0)) {
8217
goto out_bad;
8218
}
8219
8220
launch_data_free(output_obj);
8221
break;
8222
case VPROC_GSK_ZERO:
8223
mig_deallocate(*outval, *outvalCnt);
8224
*outval = 0;
8225
*outvalCnt = 0;
8226
break;
8227
default:
8228
goto out_bad;
8229
}
8230
8231
mig_deallocate(inval, invalCnt);
8232
return 0;
8233
8234
out_bad:
8235
mig_deallocate(inval, invalCnt);
8236
if (*outval) {
8237
mig_deallocate(*outval, *outvalCnt);
8238
}
8239
if (output_obj) {
8240
launch_data_free(output_obj);
8241
}
8242
8243
return 1;
8244
}
8245
8246
kern_return_t
8247
job_mig_swap_integer(job_t j, vproc_gsk_t inkey, vproc_gsk_t outkey, int64_t inval, int64_t *outval)
8248
{
8249
const char *action;
8250
kern_return_t kr = 0;
8251
struct ldcred *ldc = runtime_get_caller_creds();
8252
int oldmask;
8253
8254
if (!j) {
8255
return BOOTSTRAP_NO_MEMORY;
8256
}
8257
8258
if (inkey && ldc->pid != j->p) {
8259
if (ldc->euid && ldc->euid != getuid()) {
8260
return BOOTSTRAP_NOT_PRIVILEGED;
8261
}
8262
}
8263
8264
if (unlikely(inkey && outkey && !job_assumes(j, inkey == outkey))) {
8265
return 1;
8266
}
8267
8268
if (inkey && outkey) {
8269
action = "Swapping";
8270
} else if (inkey) {
8271
action = "Setting";
8272
} else {
8273
action = "Getting";
8274
}
8275
8276
job_log(j, LOG_DEBUG, "%s key: %u", action, inkey ? inkey : outkey);
8277
8278
switch (outkey) {
8279
case VPROC_GSK_ABANDON_PROCESS_GROUP:
8280
*outval = j->abandon_pg;
8281
break;
8282
case VPROC_GSK_LAST_EXIT_STATUS:
8283
*outval = j->last_exit_status;
8284
break;
8285
case VPROC_GSK_MGR_UID:
8286
*outval = getuid();
8287
break;
8288
case VPROC_GSK_MGR_PID:
8289
*outval = getpid();
8290
break;
8291
case VPROC_GSK_IS_MANAGED:
8292
*outval = j->anonymous ? 0 : 1;
8293
break;
8294
case VPROC_GSK_BASIC_KEEPALIVE:
8295
*outval = !j->ondemand;
8296
break;
8297
case VPROC_GSK_START_INTERVAL:
8298
*outval = j->start_interval;
8299
break;
8300
case VPROC_GSK_IDLE_TIMEOUT:
8301
*outval = j->timeout;
8302
break;
8303
case VPROC_GSK_EXIT_TIMEOUT:
8304
*outval = j->exit_timeout;
8305
break;
8306
case VPROC_GSK_GLOBAL_LOG_MASK:
8307
oldmask = runtime_setlogmask(LOG_UPTO(LOG_DEBUG));
8308
*outval = oldmask;
8309
runtime_setlogmask(oldmask);
8310
break;
8311
case VPROC_GSK_GLOBAL_UMASK:
8312
oldmask = umask(0);
8313
*outval = oldmask;
8314
umask(oldmask);
8315
break;
8316
case VPROC_GSK_TRANSACTIONS_ENABLED:
8317
job_log(j, LOG_DEBUG, "Reading EnableTransactions value.");
8318
*outval = j->enable_transactions;
8319
break;
8320
case VPROC_GSK_WAITFORDEBUGGER:
8321
*outval = j->wait4debugger;
8322
break;
8323
case VPROC_GSK_EMBEDDEDROOTEQUIVALENT:
8324
*outval = j->embedded_god;
8325
break;
8326
case VPROC_GSK_ZERO:
8327
*outval = 0;
8328
break;
8329
default:
8330
kr = 1;
8331
break;
8332
}
8333
8334
switch (inkey) {
8335
case VPROC_GSK_ABANDON_PROCESS_GROUP:
8336
j->abandon_pg = (bool)inval;
8337
break;
8338
case VPROC_GSK_GLOBAL_ON_DEMAND:
8339
job_log(j, LOG_DEBUG, "Job has set global on-demand mode to: %s", inval ? "true" : "false");
8340
kr = job_set_global_on_demand(j, inval);
8341
break;
8342
case VPROC_GSK_BASIC_KEEPALIVE:
8343
j->ondemand = !inval;
8344
break;
8345
case VPROC_GSK_START_INTERVAL:
8346
if (inval > UINT32_MAX || inval < 0) {
8347
kr = 1;
8348
} else if (inval) {
8349
if (j->start_interval == 0) {
8350
runtime_add_weak_ref();
8351
}
8352
j->start_interval = (typeof(j->start_interval)) inval;
8353
(void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, j->start_interval, j));
8354
} else if (j->start_interval) {
8355
(void)job_assumes_zero_p(j, kevent_mod((uintptr_t)&j->start_interval, EVFILT_TIMER, EV_DELETE, 0, 0, NULL));
8356
if (j->start_interval != 0) {
8357
runtime_del_weak_ref();
8358
}
8359
j->start_interval = 0;
8360
}
8361
break;
8362
case VPROC_GSK_IDLE_TIMEOUT:
8363
if (inval < 0 || inval > UINT32_MAX) {
8364
kr = 1;
8365
} else {
8366
j->timeout = (typeof(j->timeout)) inval;
8367
}
8368
break;
8369
case VPROC_GSK_EXIT_TIMEOUT:
8370
if (inval < 0 || inval > UINT32_MAX) {
8371
kr = 1;
8372
} else {
8373
j->exit_timeout = (typeof(j->exit_timeout)) inval;
8374
}
8375
break;
8376
case VPROC_GSK_GLOBAL_LOG_MASK:
8377
if (inval < 0 || inval > UINT32_MAX) {
8378
kr = 1;
8379
} else {
8380
runtime_setlogmask((int) inval);
8381
}
8382
break;
8383
case VPROC_GSK_GLOBAL_UMASK:
8384
__OS_COMPILETIME_ASSERT__(sizeof (mode_t) == 2);
8385
if (inval < 0 || inval > UINT16_MAX) {
8386
kr = 1;
8387
} else {
8388
#if HAVE_SANDBOX
8389
if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8390
kr = 1;
8391
} else {
8392
umask((mode_t) inval);
8393
}
8394
#endif
8395
}
8396
break;
8397
case VPROC_GSK_TRANSACTIONS_ENABLED:
8398
/* No-op. */
8399
break;
8400
case VPROC_GSK_WEIRD_BOOTSTRAP:
8401
if (job_assumes(j, j->weird_bootstrap)) {
8402
job_log(j, LOG_DEBUG, "Unsetting weird bootstrap.");
8403
8404
mach_msg_size_t mxmsgsz = (typeof(mxmsgsz)) sizeof(union __RequestUnion__job_mig_job_subsystem);
8405
8406
if (job_mig_job_subsystem.maxsize > mxmsgsz) {
8407
mxmsgsz = job_mig_job_subsystem.maxsize;
8408
}
8409
8410
(void)job_assumes_zero(j, runtime_add_mport(j->mgr->jm_port, job_server));
8411
j->weird_bootstrap = false;
8412
}
8413
break;
8414
case VPROC_GSK_WAITFORDEBUGGER:
8415
j->wait4debugger_oneshot = inval;
8416
break;
8417
case VPROC_GSK_PERUSER_SUSPEND:
8418
if (job_assumes(j, pid1_magic && ldc->euid == 0)) {
8419
mach_port_t junk = MACH_PORT_NULL;
8420
job_t jpu = jobmgr_lookup_per_user_context_internal(j, (uid_t)inval, &junk);
8421
if (job_assumes(j, jpu != NULL)) {
8422
struct suspended_peruser *spi = NULL;
8423
LIST_FOREACH(spi, &j->suspended_perusers, sle) {
8424
if ((int64_t)(spi->j->mach_uid) == inval) {
8425
job_log(j, LOG_WARNING, "Job tried to suspend per-user launchd for UID %lli twice.", inval);
8426
break;
8427
}
8428
}
8429
8430
if (spi == NULL) {
8431
job_log(j, LOG_INFO, "Job is suspending the per-user launchd for UID %lli.", inval);
8432
spi = (struct suspended_peruser *)calloc(sizeof(struct suspended_peruser), 1);
8433
if (job_assumes(j, spi != NULL)) {
8434
/* Stop listening for events.
8435
*
8436
* See <rdar://problem/9014146>.
8437
*/
8438
if (jpu->peruser_suspend_count == 0) {
8439
job_ignore(jpu);
8440
}
8441
8442
spi->j = jpu;
8443
spi->j->peruser_suspend_count++;
8444
LIST_INSERT_HEAD(&j->suspended_perusers, spi, sle);
8445
job_stop(spi->j);
8446
*outval = jpu->p;
8447
} else {
8448
kr = BOOTSTRAP_NO_MEMORY;
8449
}
8450
}
8451
}
8452
} else {
8453
kr = 1;
8454
}
8455
break;
8456
case VPROC_GSK_PERUSER_RESUME:
8457
if (job_assumes(j, pid1_magic == true)) {
8458
struct suspended_peruser *spi = NULL, *spt = NULL;
8459
LIST_FOREACH_SAFE(spi, &j->suspended_perusers, sle, spt) {
8460
if ((int64_t)(spi->j->mach_uid) == inval) {
8461
spi->j->peruser_suspend_count--;
8462
LIST_REMOVE(spi, sle);
8463
job_log(j, LOG_INFO, "Job is resuming the per-user launchd for UID %lli.", inval);
8464
break;
8465
}
8466
}
8467
8468
if (!job_assumes(j, spi != NULL)) {
8469
job_log(j, LOG_WARNING, "Job tried to resume per-user launchd for UID %lli that it did not suspend.", inval);
8470
kr = BOOTSTRAP_NOT_PRIVILEGED;
8471
} else if (spi->j->peruser_suspend_count == 0) {
8472
job_watch(spi->j);
8473
job_dispatch(spi->j, false);
8474
free(spi);
8475
}
8476
} else {
8477
kr = 1;
8478
}
8479
break;
8480
case VPROC_GSK_ZERO:
8481
break;
8482
default:
8483
kr = 1;
8484
break;
8485
}
8486
8487
return kr;
8488
}
8489
8490
kern_return_t
8491
job_mig_post_fork_ping(job_t j, task_t child_task, mach_port_t *asport)
8492
{
8493
if (!j) {
8494
return BOOTSTRAP_NO_MEMORY;
8495
}
8496
8497
job_log(j, LOG_DEBUG, "Post fork ping.");
8498
8499
struct machservice *ms;
8500
job_setup_exception_port(j, child_task);
8501
SLIST_FOREACH(ms, &special_ports, special_port_sle) {
8502
if (j->per_user && (ms->special_port_num != TASK_ACCESS_PORT)) {
8503
// The TASK_ACCESS_PORT funny business is to workaround 5325399.
8504
continue;
8505
}
8506
8507
errno = task_set_special_port(child_task, ms->special_port_num, ms->port);
8508
if (errno) {
8509
if (errno == MACH_SEND_INVALID_DEST) {
8510
job_log(j, LOG_WARNING, "Task died before special ports could be set.");
8511
break;
8512
}
8513
8514
int desired_log_level = LOG_ERR;
8515
if (j->anonymous) {
8516
// 5338127
8517
8518
desired_log_level = LOG_WARNING;
8519
8520
if (ms->special_port_num == TASK_SEATBELT_PORT) {
8521
desired_log_level = LOG_DEBUG;
8522
}
8523
}
8524
8525
job_log(j, desired_log_level, "Could not setup Mach task special port %u: %s", ms->special_port_num, mach_error_string(errno));
8526
}
8527
}
8528
8529
/* MIG will not zero-initialize this pointer, so we must always do so.
8530
*
8531
* <rdar://problem/8562593>.
8532
*/
8533
*asport = MACH_PORT_NULL;
8534
#if !TARGET_OS_EMBEDDED
8535
if (!j->anonymous) {
8536
/* XPC services will spawn into the root security session by default.
8537
* xpcproxy will switch them away if needed.
8538
*/
8539
if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
8540
job_log(j, LOG_DEBUG, "Returning session port: 0x%x", j->asport);
8541
*asport = j->asport;
8542
}
8543
}
8544
#endif
8545
(void)job_assumes_zero(j, launchd_mport_deallocate(child_task));
8546
8547
return 0;
8548
}
8549
8550
kern_return_t
8551
job_mig_get_listener_port_rights(job_t j, mach_port_array_t *sports, mach_msg_type_number_t *sports_cnt)
8552
{
8553
if (!j) {
8554
return BOOTSTRAP_NO_MEMORY;
8555
}
8556
8557
size_t cnt = 0;
8558
struct machservice *msi = NULL;
8559
SLIST_FOREACH(msi, &j->machservices, sle) {
8560
if (msi->upfront && job_assumes(j, msi->recv)) {
8561
cnt++;
8562
}
8563
}
8564
8565
if (cnt == 0) {
8566
return BOOTSTRAP_UNKNOWN_SERVICE;
8567
}
8568
8569
mach_port_array_t sports2 = NULL;
8570
mig_allocate((vm_address_t *)&sports2, cnt * sizeof(sports2[0]));
8571
if (!sports2) {
8572
return BOOTSTRAP_NO_MEMORY;
8573
}
8574
8575
size_t i = 0;
8576
SLIST_FOREACH(msi, &j->machservices, sle) {
8577
if (msi->upfront && msi->recv) {
8578
sports2[i] = msi->port;
8579
i++;
8580
}
8581
}
8582
8583
*sports = sports2;
8584
*sports_cnt = cnt;
8585
8586
return KERN_SUCCESS;
8587
}
8588
8589
kern_return_t
8590
job_mig_register_gui_session(job_t j, mach_port_t asport)
8591
{
8592
if (!j->per_user) {
8593
return BOOTSTRAP_NOT_PRIVILEGED;
8594
}
8595
8596
jobmgr_t jm = jobmgr_find_xpc_per_user_domain(root_jobmgr, j->mach_uid);
8597
if (!jm) {
8598
return BOOTSTRAP_UNKNOWN_SERVICE;
8599
}
8600
8601
if (jm->req_gui_asport) {
8602
// This job manager persists, so we need to allow the per-user launchd
8603
// to update the GUI session as it comes and goes.
8604
jobmgr_assumes_zero(jm, launchd_mport_deallocate(jm->req_gui_asport));
8605
}
8606
8607
jm->req_gui_asport = asport;
8608
return KERN_SUCCESS;
8609
}
8610
8611
kern_return_t
8612
job_mig_reboot2(job_t j, uint64_t flags)
8613
{
8614
char who_started_the_reboot[2048] = "";
8615
struct proc_bsdshortinfo proc;
8616
struct ldcred *ldc = runtime_get_caller_creds();
8617
pid_t pid_to_log;
8618
8619
if (!j) {
8620
return BOOTSTRAP_NO_MEMORY;
8621
}
8622
8623
if (unlikely(!pid1_magic)) {
8624
return BOOTSTRAP_NOT_PRIVILEGED;
8625
}
8626
8627
#if !TARGET_OS_EMBEDDED
8628
if (unlikely(ldc->euid)) {
8629
#else
8630
if (unlikely(ldc->euid) && !j->embedded_god) {
8631
#endif
8632
return BOOTSTRAP_NOT_PRIVILEGED;
8633
}
8634
8635
for (pid_to_log = ldc->pid; pid_to_log; pid_to_log = proc.pbsi_ppid) {
8636
size_t who_offset;
8637
if (proc_pidinfo(pid_to_log, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
8638
if (errno != ESRCH) {
8639
(void)job_assumes_zero(j, errno);
8640
}
8641
return 1;
8642
}
8643
8644
if (!job_assumes(j, pid_to_log != (pid_t)proc.pbsi_ppid)) {
8645
job_log(j, LOG_WARNING, "Job which is its own parent started reboot.");
8646
snprintf(who_started_the_reboot, sizeof(who_started_the_reboot), "%s[%u]->%s[%u]->%s[%u]->...", proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log, proc.pbsi_comm, pid_to_log);
8647
break;
8648
}
8649
8650
who_offset = strlen(who_started_the_reboot);
8651
snprintf(who_started_the_reboot + who_offset, sizeof(who_started_the_reboot) - who_offset,
8652
" %s[%u]%s", proc.pbsi_comm, pid_to_log, proc.pbsi_ppid ? " ->" : "");
8653
}
8654
8655
root_jobmgr->reboot_flags = (int)flags;
8656
job_log(j, LOG_DEBUG, "reboot2() initiated by:%s", who_started_the_reboot);
8657
launchd_shutdown();
8658
8659
return 0;
8660
}
8661
8662
kern_return_t
8663
job_mig_getsocket(job_t j, name_t spr)
8664
{
8665
if (!j) {
8666
return BOOTSTRAP_NO_MEMORY;
8667
}
8668
8669
if (j->deny_job_creation) {
8670
return BOOTSTRAP_NOT_PRIVILEGED;
8671
}
8672
8673
#if HAVE_SANDBOX
8674
struct ldcred *ldc = runtime_get_caller_creds();
8675
if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
8676
return BOOTSTRAP_NOT_PRIVILEGED;
8677
}
8678
#endif
8679
8680
ipc_server_init();
8681
8682
if (unlikely(!sockpath)) {
8683
return BOOTSTRAP_NO_MEMORY;
8684
}
8685
8686
strncpy(spr, sockpath, sizeof(name_t));
8687
8688
return BOOTSTRAP_SUCCESS;
8689
}
8690
8691
kern_return_t
8692
job_mig_log(job_t j, int pri, int err, logmsg_t msg)
8693
{
8694
if (!j) {
8695
return BOOTSTRAP_NO_MEMORY;
8696
}
8697
8698
if ((errno = err)) {
8699
job_log_error(j, pri, "%s", msg);
8700
} else {
8701
job_log(j, pri, "%s", msg);
8702
}
8703
8704
return 0;
8705
}
8706
8707
void
8708
job_setup_per_user_directory(job_t j, uid_t uid, const char *path)
8709
{
8710
struct stat sb;
8711
8712
bool created = false;
8713
int r = stat(path, &sb);
8714
if ((r == -1 && errno == ENOENT) || (r == 0 && !S_ISDIR(sb.st_mode))) {
8715
if (r == 0) {
8716
job_log(j, LOG_NOTICE, "File at location of per-user launchd directory is not a directory. Moving aside: %s", path);
8717
8718
char old[PATH_MAX];
8719
snprintf(old, sizeof(old), "%s.movedaside", path);
8720
(void)job_assumes_zero_p(j, rename(path, old));
8721
}
8722
8723
(void)job_assumes_zero_p(j, mkdir(path, S_IRWXU));
8724
(void)job_assumes_zero_p(j, chown(path, uid, 0));
8725
created = true;
8726
}
8727
8728
if (!created) {
8729
if (sb.st_uid != uid) {
8730
job_log(j, LOG_NOTICE, "Per-user launchd directory has improper user ownership. Repairing: %s", path);
8731
(void)job_assumes_zero_p(j, chown(path, uid, 0));
8732
}
8733
if (sb.st_gid != 0) {
8734
job_log(j, LOG_NOTICE, "Per-user launchd directory has improper group ownership. Repairing: %s", path);
8735
(void)job_assumes_zero_p(j, chown(path, uid, 0));
8736
}
8737
if (sb.st_mode != (S_IRWXU | S_IFDIR)) {
8738
job_log(j, LOG_NOTICE, "Per-user launchd directory has improper mode. Repairing: %s", path);
8739
(void)job_assumes_zero_p(j, chmod(path, S_IRWXU));
8740
}
8741
}
8742
}
8743
8744
void
8745
job_setup_per_user_directories(job_t j, uid_t uid, const char *label)
8746
{
8747
char path[PATH_MAX];
8748
8749
(void)snprintf(path, sizeof(path), LAUNCHD_DB_PREFIX "/%s", label);
8750
job_setup_per_user_directory(j, uid, path);
8751
8752
(void)snprintf(path, sizeof(path), LAUNCHD_LOG_PREFIX "/%s", label);
8753
job_setup_per_user_directory(j, uid, path);
8754
}
8755
8756
job_t
8757
jobmgr_lookup_per_user_context_internal(job_t j, uid_t which_user, mach_port_t *mp)
8758
{
8759
job_t ji = NULL;
8760
LIST_FOREACH(ji, &root_jobmgr->jobs, sle) {
8761
if (!ji->per_user) {
8762
continue;
8763
}
8764
if (ji->mach_uid != which_user) {
8765
continue;
8766
}
8767
if (SLIST_EMPTY(&ji->machservices)) {
8768
continue;
8769
}
8770
if (!SLIST_FIRST(&ji->machservices)->per_user_hack) {
8771
continue;
8772
}
8773
break;
8774
}
8775
8776
if (unlikely(ji == NULL)) {
8777
struct machservice *ms;
8778
char lbuf[1024];
8779
8780
job_log(j, LOG_DEBUG, "Creating per user launchd job for UID: %u", which_user);
8781
8782
sprintf(lbuf, "com.apple.launchd.peruser.%u", which_user);
8783
8784
ji = job_new(root_jobmgr, lbuf, "/sbin/launchd", NULL);
8785
8786
if (ji != NULL) {
8787
auditinfo_addr_t auinfo = {
8788
.ai_termid = {
8789
.at_type = AU_IPv4
8790
},
8791
.ai_auid = which_user,
8792
.ai_asid = AU_ASSIGN_ASID,
8793
};
8794
8795
if (setaudit_addr(&auinfo, sizeof(auinfo)) == 0) {
8796
job_log(ji, LOG_DEBUG, "Created new security session for per-user launchd: %u", auinfo.ai_asid);
8797
(void)job_assumes(ji, (ji->asport = audit_session_self()) != MACH_PORT_NULL);
8798
8799
/* Kinda lame that we have to do this, but we can't create an
8800
* audit session without joining it.
8801
*/
8802
(void)job_assumes(ji, audit_session_join(launchd_audit_port));
8803
ji->asid = auinfo.ai_asid;
8804
} else {
8805
job_log(ji, LOG_WARNING, "Could not set audit session!");
8806
job_remove(ji);
8807
return NULL;
8808
}
8809
8810
ji->mach_uid = which_user;
8811
ji->per_user = true;
8812
ji->enable_transactions = true;
8813
job_setup_per_user_directories(ji, which_user, lbuf);
8814
8815
if ((ms = machservice_new(ji, lbuf, mp, false)) == NULL) {
8816
job_remove(ji);
8817
ji = NULL;
8818
} else {
8819
ms->upfront = true;
8820
ms->per_user_hack = true;
8821
ms->hide = true;
8822
8823
ji = job_dispatch(ji, false);
8824
}
8825
}
8826
} else {
8827
*mp = machservice_port(SLIST_FIRST(&ji->machservices));
8828
job_log(j, LOG_DEBUG, "Per user launchd job found for UID: %u", which_user);
8829
}
8830
8831
return ji;
8832
}
8833
8834
kern_return_t
8835
job_mig_lookup_per_user_context(job_t j, uid_t which_user, mach_port_t *up_cont)
8836
{
8837
struct ldcred *ldc = runtime_get_caller_creds();
8838
job_t jpu;
8839
8840
if (!j) {
8841
return BOOTSTRAP_NO_MEMORY;
8842
}
8843
8844
if (launchd_osinstaller) {
8845
return BOOTSTRAP_UNKNOWN_SERVICE;
8846
}
8847
8848
#if TARGET_OS_EMBEDDED
8849
// There is no need for per-user launchd's on embedded.
8850
job_log(j, LOG_ERR, "Per-user launchds are not supported on this platform.");
8851
return BOOTSTRAP_UNKNOWN_SERVICE;
8852
#endif
8853
8854
#if HAVE_SANDBOX
8855
if (unlikely(sandbox_check(ldc->pid, "mach-per-user-lookup", SANDBOX_FILTER_NONE) > 0)) {
8856
return BOOTSTRAP_NOT_PRIVILEGED;
8857
}
8858
#endif
8859
8860
job_log(j, LOG_INFO, "Looking up per user launchd for UID: %u", which_user);
8861
8862
if (unlikely(!pid1_magic)) {
8863
job_log(j, LOG_ERR, "Only PID 1 supports per user launchd lookups.");
8864
return BOOTSTRAP_NOT_PRIVILEGED;
8865
}
8866
8867
if (ldc->euid || ldc->uid) {
8868
which_user = ldc->euid ?: ldc->uid;
8869
}
8870
8871
*up_cont = MACH_PORT_NULL;
8872
8873
jpu = jobmgr_lookup_per_user_context_internal(j, which_user, up_cont);
8874
8875
return 0;
8876
}
8877
8878
kern_return_t
8879
job_mig_check_in2(job_t j, name_t servicename, mach_port_t *serviceportp, uuid_t instance_id, uint64_t flags)
8880
{
8881
bool per_pid_service = flags & BOOTSTRAP_PER_PID_SERVICE;
8882
bool strict = flags & BOOTSTRAP_STRICT_CHECKIN;
8883
struct ldcred *ldc = runtime_get_caller_creds();
8884
struct machservice *ms = NULL;
8885
job_t jo;
8886
8887
if (!j) {
8888
return BOOTSTRAP_NO_MEMORY;
8889
}
8890
8891
if (j->dedicated_instance) {
8892
struct machservice *msi = NULL;
8893
SLIST_FOREACH(msi, &j->machservices, sle) {
8894
if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
8895
uuid_copy(instance_id, j->instance_id);
8896
ms = msi;
8897
break;
8898
}
8899
}
8900
} else {
8901
ms = jobmgr_lookup_service(j->mgr, servicename, false, per_pid_service ? ldc->pid : 0);
8902
}
8903
8904
if (strict) {
8905
if (likely(ms != NULL)) {
8906
if (ms->job != j) {
8907
return BOOTSTRAP_NOT_PRIVILEGED;
8908
} else if (ms->isActive) {
8909
return BOOTSTRAP_SERVICE_ACTIVE;
8910
}
8911
} else {
8912
return BOOTSTRAP_UNKNOWN_SERVICE;
8913
}
8914
} else if (ms == NULL) {
8915
if (job_assumes(j, !j->dedicated_instance)) {
8916
*serviceportp = MACH_PORT_NULL;
8917
8918
#if HAVE_SANDBOX
8919
if (unlikely(sandbox_check(ldc->pid, "mach-register", per_pid_service ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
8920
return BOOTSTRAP_NOT_PRIVILEGED;
8921
}
8922
#endif
8923
if (unlikely((ms = machservice_new(j, servicename, serviceportp, per_pid_service)) == NULL)) {
8924
return BOOTSTRAP_NO_MEMORY;
8925
}
8926
8927
// Treat this like a legacy job.
8928
if (!j->legacy_mach_job) {
8929
ms->isActive = true;
8930
ms->recv = false;
8931
}
8932
8933
if (!(j->anonymous || j->legacy_LS_job || j->legacy_mach_job)) {
8934
job_log(j, LOG_APPLEONLY, "Please add the following service to the configuration file for this job: %s", servicename);
8935
}
8936
} else {
8937
return BOOTSTRAP_UNKNOWN_SERVICE;
8938
}
8939
} else {
8940
if (unlikely((jo = machservice_job(ms)) != j)) {
8941
static pid_t last_warned_pid;
8942
8943
if (last_warned_pid != ldc->pid) {
8944
job_log(jo, LOG_WARNING, "The following job tried to hijack the service \"%s\" from this job: %s", servicename, j->label);
8945
last_warned_pid = ldc->pid;
8946
}
8947
8948
return BOOTSTRAP_NOT_PRIVILEGED;
8949
}
8950
if (unlikely(machservice_active(ms))) {
8951
job_log(j, LOG_WARNING, "Check-in of Mach service failed. Already active: %s", servicename);
8952
return BOOTSTRAP_SERVICE_ACTIVE;
8953
}
8954
}
8955
8956
job_checkin(j);
8957
machservice_request_notifications(ms);
8958
8959
job_log(j, LOG_INFO, "Check-in of service: %s", servicename);
8960
8961
*serviceportp = machservice_port(ms);
8962
return BOOTSTRAP_SUCCESS;
8963
}
8964
8965
kern_return_t
8966
job_mig_register2(job_t j, name_t servicename, mach_port_t serviceport, uint64_t flags)
8967
{
8968
struct machservice *ms;
8969
struct ldcred *ldc = runtime_get_caller_creds();
8970
bool per_pid_service = flags & BOOTSTRAP_PER_PID_SERVICE;
8971
8972
if (!j) {
8973
return BOOTSTRAP_NO_MEMORY;
8974
}
8975
8976
if (!per_pid_service && !j->legacy_LS_job) {
8977
job_log(j, LOG_APPLEONLY, "Performance: bootstrap_register() is deprecated. Service: %s", servicename);
8978
}
8979
8980
job_log(j, LOG_DEBUG, "%sMach service registration attempt: %s", flags & BOOTSTRAP_PER_PID_SERVICE ? "Per PID " : "", servicename);
8981
8982
#if HAVE_SANDBOX
8983
if (unlikely(sandbox_check(ldc->pid, "mach-register", per_pid_service ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
8984
return BOOTSTRAP_NOT_PRIVILEGED;
8985
}
8986
#endif
8987
8988
// 5641783 for the embedded hack
8989
#if !TARGET_OS_EMBEDDED
8990
/*
8991
* From a per-user/session launchd's perspective, SecurityAgent (UID
8992
* 92) is a rogue application (not our UID, not root and not a child of
8993
* us). We'll have to reconcile this design friction at a later date.
8994
*/
8995
if (unlikely(j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->uid != getuid() && ldc->uid != 92)) {
8996
if (pid1_magic) {
8997
return VPROC_ERR_TRY_PER_USER;
8998
} else {
8999
return BOOTSTRAP_NOT_PRIVILEGED;
9000
}
9001
}
9002
#endif
9003
9004
ms = jobmgr_lookup_service(j->mgr, servicename, false, flags & BOOTSTRAP_PER_PID_SERVICE ? ldc->pid : 0);
9005
9006
if (unlikely(ms)) {
9007
if (machservice_job(ms) != j) {
9008
return BOOTSTRAP_NOT_PRIVILEGED;
9009
}
9010
if (machservice_active(ms)) {
9011
job_log(j, LOG_DEBUG, "Mach service registration failed. Already active: %s", servicename);
9012
return BOOTSTRAP_SERVICE_ACTIVE;
9013
}
9014
if (ms->recv && (serviceport != MACH_PORT_NULL)) {
9015
job_log(j, LOG_ERR, "bootstrap_register() erroneously called instead of bootstrap_check_in(). Mach service: %s", servicename);
9016
return BOOTSTRAP_NOT_PRIVILEGED;
9017
}
9018
job_checkin(j);
9019
machservice_delete(j, ms, false);
9020
}
9021
9022
if (likely(serviceport != MACH_PORT_NULL)) {
9023
if (likely(ms = machservice_new(j, servicename, &serviceport, flags & BOOTSTRAP_PER_PID_SERVICE ? true : false))) {
9024
machservice_request_notifications(ms);
9025
} else {
9026
return BOOTSTRAP_NO_MEMORY;
9027
}
9028
}
9029
9030
9031
return BOOTSTRAP_SUCCESS;
9032
}
9033
9034
kern_return_t
9035
job_mig_look_up2(job_t j, mach_port_t srp, name_t servicename, mach_port_t *serviceportp, pid_t target_pid, uuid_t instance_id, uint64_t flags)
9036
{
9037
struct machservice *ms = NULL;
9038
struct ldcred *ldc = runtime_get_caller_creds();
9039
kern_return_t kr;
9040
bool per_pid_lookup = flags & BOOTSTRAP_PER_PID_SERVICE;
9041
bool specific_instance = flags & BOOTSTRAP_SPECIFIC_INSTANCE;
9042
bool strict_lookup = flags & BOOTSTRAP_STRICT_LOOKUP;
9043
bool privileged = flags & BOOTSTRAP_PRIVILEGED_SERVER;
9044
9045
if (!j) {
9046
return BOOTSTRAP_NO_MEMORY;
9047
}
9048
9049
bool xpc_req = (j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN);
9050
9051
// 5641783 for the embedded hack
9052
#if !TARGET_OS_EMBEDDED
9053
if (unlikely(pid1_magic && j->anonymous && j->mgr->parentmgr == NULL && ldc->uid != 0 && ldc->euid != 0)) {
9054
return VPROC_ERR_TRY_PER_USER;
9055
}
9056
#endif
9057
9058
#if HAVE_SANDBOX
9059
/* We don't do sandbox checking for XPC domains because, by definition, all
9060
* the services within your domain should be accessible to you.
9061
*/
9062
if (!xpc_req && unlikely(sandbox_check(ldc->pid, "mach-lookup", per_pid_lookup ? SANDBOX_FILTER_LOCAL_NAME : SANDBOX_FILTER_GLOBAL_NAME, servicename) > 0)) {
9063
return BOOTSTRAP_NOT_PRIVILEGED;
9064
}
9065
#endif
9066
9067
if (per_pid_lookup) {
9068
ms = jobmgr_lookup_service(j->mgr, servicename, false, target_pid);
9069
} else {
9070
if (xpc_req) {
9071
// Requests from XPC domains stay local.
9072
ms = jobmgr_lookup_service(j->mgr, servicename, false, 0);
9073
} else {
9074
/* A strict lookup which is privileged won't even bother trying to
9075
* find a service if we're not hosting the root Mach bootstrap.
9076
*/
9077
if (strict_lookup && privileged) {
9078
if (inherited_bootstrap_port == MACH_PORT_NULL) {
9079
ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
9080
}
9081
} else {
9082
ms = jobmgr_lookup_service(j->mgr, servicename, true, 0);
9083
}
9084
}
9085
}
9086
9087
if (likely(ms)) {
9088
ms = ms->alias ? ms->alias : ms;
9089
if (unlikely(specific_instance && ms->job->multiple_instances)) {
9090
job_t ji = NULL;
9091
job_t instance = NULL;
9092
LIST_FOREACH(ji, &ms->job->subjobs, subjob_sle) {
9093
if (uuid_compare(instance_id, ji->instance_id) == 0) {
9094
instance = ji;
9095
break;
9096
}
9097
}
9098
9099
if (unlikely(instance == NULL)) {
9100
job_log(ms->job, LOG_DEBUG, "Creating new instance of job based on lookup of service %s", ms->name);
9101
instance = job_new_subjob(ms->job, instance_id);
9102
if (job_assumes(j, instance != NULL)) {
9103
/* Disable this support for now. We only support having
9104
* multi-instance jobs within private XPC domains.
9105
*/
9106
#if 0
9107
/* If the job is multi-instance, in a singleton XPC domain
9108
* and the request is not coming from within that singleton
9109
* domain, we need to alias the new job into the requesting
9110
* domain.
9111
*/
9112
if (!j->mgr->xpc_singleton && xpc_req) {
9113
(void)job_assumes(instance, job_new_alias(j->mgr, instance));
9114
}
9115
#endif
9116
job_dispatch(instance, false);
9117
}
9118
}
9119
9120
ms = NULL;
9121
if (job_assumes(j, instance != NULL)) {
9122
struct machservice *msi = NULL;
9123
SLIST_FOREACH(msi, &instance->machservices, sle) {
9124
/* sizeof(servicename) will return the size of a pointer,
9125
* even though it's an array type, because when passing
9126
* arrays as parameters in C, they implicitly degrade to
9127
* pointers.
9128
*/
9129
if (strncmp(servicename, msi->name, sizeof(name_t) - 1) == 0) {
9130
ms = msi;
9131
break;
9132
}
9133
}
9134
}
9135
} else {
9136
if (machservice_hidden(ms) && !machservice_active(ms)) {
9137
ms = NULL;
9138
} else if (unlikely(ms->per_user_hack)) {
9139
ms = NULL;
9140
}
9141
}
9142
}
9143
9144
if (likely(ms)) {
9145
(void)job_assumes(j, machservice_port(ms) != MACH_PORT_NULL);
9146
job_log(j, LOG_DEBUG, "%sMach service lookup: %s", per_pid_lookup ? "Per PID " : "", servicename);
9147
*serviceportp = machservice_port(ms);
9148
9149
kr = BOOTSTRAP_SUCCESS;
9150
} else if (strict_lookup && !privileged) {
9151
/* Hack: We need to simulate XPC's desire not to establish a hierarchy.
9152
* So if XPC is doing the lookup, and it's not a privileged lookup, we
9153
* won't forward. But if it is a privileged lookup, then we must
9154
* forward.
9155
*/
9156
return BOOTSTRAP_UNKNOWN_SERVICE;
9157
} else if (inherited_bootstrap_port != MACH_PORT_NULL) {
9158
// Requests from within an XPC domain don't get forwarded.
9159
job_log(j, LOG_DEBUG, "Mach service lookup forwarded: %s", servicename);
9160
/* Clients potentially check the audit token of the reply to verify that
9161
* the returned send right is trustworthy.
9162
*/
9163
(void)job_assumes_zero(j, vproc_mig_look_up2_forward(inherited_bootstrap_port, srp, servicename, target_pid, instance_id, flags));
9164
return MIG_NO_REPLY;
9165
} else if (pid1_magic && j->anonymous && ldc->euid >= 500 && strcasecmp(j->mgr->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
9166
/* 5240036 Should start background session when a lookup of CCacheServer
9167
* occurs
9168
*
9169
* This is a total hack. We sniff out loginwindow session, and attempt
9170
* to guess what it is up to. If we find a EUID that isn't root, we
9171
* force it over to the per-user context.
9172
*/
9173
return VPROC_ERR_TRY_PER_USER;
9174
} else {
9175
job_log(j, LOG_DEBUG, "%sMach service lookup failed: %s", per_pid_lookup ? "Per PID " : "", servicename);
9176
kr = BOOTSTRAP_UNKNOWN_SERVICE;
9177
}
9178
9179
return kr;
9180
}
9181
9182
kern_return_t
9183
job_mig_parent(job_t j, mach_port_t srp, mach_port_t *parentport)
9184
{
9185
if (!j) {
9186
return BOOTSTRAP_NO_MEMORY;
9187
}
9188
9189
job_log(j, LOG_DEBUG, "Requested parent bootstrap port");
9190
jobmgr_t jm = j->mgr;
9191
9192
if (jobmgr_parent(jm)) {
9193
*parentport = jobmgr_parent(jm)->jm_port;
9194
} else if (MACH_PORT_NULL == inherited_bootstrap_port) {
9195
*parentport = jm->jm_port;
9196
} else {
9197
(void)job_assumes_zero(j, vproc_mig_parent_forward(inherited_bootstrap_port, srp));
9198
// The previous routine moved the reply port, we're forced to return MIG_NO_REPLY now
9199
return MIG_NO_REPLY;
9200
}
9201
return BOOTSTRAP_SUCCESS;
9202
}
9203
9204
kern_return_t
9205
job_mig_get_root_bootstrap(job_t j, mach_port_t *rootbsp)
9206
{
9207
if (!j) {
9208
return BOOTSTRAP_NO_MEMORY;
9209
}
9210
9211
if (inherited_bootstrap_port == MACH_PORT_NULL) {
9212
*rootbsp = root_jobmgr->jm_port;
9213
(void)job_assumes_zero(j, launchd_mport_make_send(root_jobmgr->jm_port));
9214
} else {
9215
*rootbsp = inherited_bootstrap_port;
9216
(void)job_assumes_zero(j, launchd_mport_copy_send(inherited_bootstrap_port));
9217
}
9218
9219
return BOOTSTRAP_SUCCESS;
9220
}
9221
9222
kern_return_t
9223
job_mig_info(job_t j, name_array_t *servicenamesp,
9224
unsigned int *servicenames_cnt, name_array_t *servicejobsp,
9225
unsigned int *servicejobs_cnt, bootstrap_status_array_t *serviceactivesp,
9226
unsigned int *serviceactives_cnt, uint64_t flags)
9227
{
9228
name_array_t service_names = NULL;
9229
name_array_t service_jobs = NULL;
9230
bootstrap_status_array_t service_actives = NULL;
9231
unsigned int cnt = 0, cnt2 = 0;
9232
jobmgr_t jm;
9233
9234
if (!j) {
9235
return BOOTSTRAP_NO_MEMORY;
9236
}
9237
9238
#if TARGET_OS_EMBEDDED
9239
struct ldcred *ldc = runtime_get_caller_creds();
9240
if (ldc->euid) {
9241
return EPERM;
9242
}
9243
#endif // TARGET_OS_EMBEDDED
9244
9245
if (launchd_flat_mach_namespace) {
9246
if ((j->mgr->properties & BOOTSTRAP_PROPERTY_EXPLICITSUBSET) || (flags & BOOTSTRAP_FORCE_LOCAL)) {
9247
jm = j->mgr;
9248
} else {
9249
jm = root_jobmgr;
9250
}
9251
} else {
9252
jm = j->mgr;
9253
}
9254
9255
unsigned int i = 0;
9256
struct machservice *msi = NULL;
9257
for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
9258
LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
9259
cnt += !msi->per_pid ? 1 : 0;
9260
}
9261
}
9262
9263
if (cnt == 0) {
9264
goto out;
9265
}
9266
9267
mig_allocate((vm_address_t *)&service_names, cnt * sizeof(service_names[0]));
9268
if (!job_assumes(j, service_names != NULL)) {
9269
goto out_bad;
9270
}
9271
9272
mig_allocate((vm_address_t *)&service_jobs, cnt * sizeof(service_jobs[0]));
9273
if (!job_assumes(j, service_jobs != NULL)) {
9274
goto out_bad;
9275
}
9276
9277
mig_allocate((vm_address_t *)&service_actives, cnt * sizeof(service_actives[0]));
9278
if (!job_assumes(j, service_actives != NULL)) {
9279
goto out_bad;
9280
}
9281
9282
for (i = 0; i < MACHSERVICE_HASH_SIZE; i++) {
9283
LIST_FOREACH(msi, &jm->ms_hash[i], name_hash_sle) {
9284
if (!msi->per_pid) {
9285
strlcpy(service_names[cnt2], machservice_name(msi), sizeof(service_names[0]));
9286
msi = msi->alias ? msi->alias : msi;
9287
if (msi->job->mgr->shortdesc) {
9288
strlcpy(service_jobs[cnt2], msi->job->mgr->shortdesc, sizeof(service_jobs[0]));
9289
} else {
9290
strlcpy(service_jobs[cnt2], msi->job->label, sizeof(service_jobs[0]));
9291
}
9292
service_actives[cnt2] = machservice_status(msi);
9293
cnt2++;
9294
}
9295
}
9296
}
9297
9298
(void)job_assumes(j, cnt == cnt2);
9299
9300
out:
9301
*servicenamesp = service_names;
9302
*servicejobsp = service_jobs;
9303
*serviceactivesp = service_actives;
9304
*servicenames_cnt = *servicejobs_cnt = *serviceactives_cnt = cnt;
9305
9306
return BOOTSTRAP_SUCCESS;
9307
9308
out_bad:
9309
if (service_names) {
9310
mig_deallocate((vm_address_t)service_names, cnt * sizeof(service_names[0]));
9311
}
9312
if (service_jobs) {
9313
mig_deallocate((vm_address_t)service_jobs, cnt * sizeof(service_jobs[0]));
9314
}
9315
if (service_actives) {
9316
mig_deallocate((vm_address_t)service_actives, cnt * sizeof(service_actives[0]));
9317
}
9318
9319
return BOOTSTRAP_NO_MEMORY;
9320
}
9321
9322
kern_return_t
9323
job_mig_lookup_children(job_t j, mach_port_array_t *child_ports,
9324
mach_msg_type_number_t *child_ports_cnt, name_array_t *child_names,
9325
mach_msg_type_number_t *child_names_cnt,
9326
bootstrap_property_array_t *child_properties,
9327
mach_msg_type_number_t *child_properties_cnt)
9328
{
9329
kern_return_t kr = BOOTSTRAP_NO_MEMORY;
9330
if (!j) {
9331
return BOOTSTRAP_NO_MEMORY;
9332
}
9333
9334
struct ldcred *ldc = runtime_get_caller_creds();
9335
9336
/* Only allow root processes to look up children, even if we're in the per-user launchd.
9337
* Otherwise, this could be used to cross sessions, which counts as a security vulnerability
9338
* in a non-flat namespace.
9339
*/
9340
if (ldc->euid != 0) {
9341
job_log(j, LOG_WARNING, "Attempt to look up children of bootstrap by unprivileged job.");
9342
return BOOTSTRAP_NOT_PRIVILEGED;
9343
}
9344
9345
unsigned int cnt = 0;
9346
9347
jobmgr_t jmr = j->mgr;
9348
jobmgr_t jmi = NULL;
9349
SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
9350
cnt++;
9351
}
9352
9353
// Find our per-user launchds if we're PID 1.
9354
job_t ji = NULL;
9355
if (pid1_magic) {
9356
LIST_FOREACH(ji, &jmr->jobs, sle) {
9357
cnt += ji->per_user ? 1 : 0;
9358
}
9359
}
9360
9361
if (cnt == 0) {
9362
return BOOTSTRAP_NO_CHILDREN;
9363
}
9364
9365
mach_port_array_t _child_ports = NULL;
9366
name_array_t _child_names = NULL;
9367
bootstrap_property_array_t _child_properties = NULL;
9368
9369
mig_allocate((vm_address_t *)&_child_ports, cnt * sizeof(_child_ports[0]));
9370
if (!job_assumes(j, _child_ports != NULL)) {
9371
kr = BOOTSTRAP_NO_MEMORY;
9372
goto out_bad;
9373
}
9374
9375
mig_allocate((vm_address_t *)&_child_names, cnt * sizeof(_child_names[0]));
9376
if (!job_assumes(j, _child_names != NULL)) {
9377
kr = BOOTSTRAP_NO_MEMORY;
9378
goto out_bad;
9379
}
9380
9381
mig_allocate((vm_address_t *)&_child_properties, cnt * sizeof(_child_properties[0]));
9382
if (!job_assumes(j, _child_properties != NULL)) {
9383
kr = BOOTSTRAP_NO_MEMORY;
9384
goto out_bad;
9385
}
9386
9387
unsigned int cnt2 = 0;
9388
SLIST_FOREACH(jmi, &jmr->submgrs, sle) {
9389
if (jobmgr_assumes_zero(jmi, launchd_mport_make_send(jmi->jm_port)) == KERN_SUCCESS) {
9390
_child_ports[cnt2] = jmi->jm_port;
9391
} else {
9392
_child_ports[cnt2] = MACH_PORT_NULL;
9393
}
9394
9395
strlcpy(_child_names[cnt2], jmi->name, sizeof(_child_names[0]));
9396
_child_properties[cnt2] = jmi->properties;
9397
9398
cnt2++;
9399
}
9400
9401
if (pid1_magic) LIST_FOREACH(ji, &jmr->jobs, sle) {
9402
if (ji->per_user) {
9403
if (job_assumes(ji, SLIST_FIRST(&ji->machservices)->per_user_hack == true)) {
9404
mach_port_t port = machservice_port(SLIST_FIRST(&ji->machservices));
9405
9406
if (job_assumes_zero(ji, launchd_mport_copy_send(port)) == KERN_SUCCESS) {
9407
_child_ports[cnt2] = port;
9408
} else {
9409
_child_ports[cnt2] = MACH_PORT_NULL;
9410
}
9411
} else {
9412
_child_ports[cnt2] = MACH_PORT_NULL;
9413
}
9414
9415
strlcpy(_child_names[cnt2], ji->label, sizeof(_child_names[0]));
9416
_child_properties[cnt2] |= BOOTSTRAP_PROPERTY_PERUSER;
9417
9418
cnt2++;
9419
}
9420
}
9421
9422
*child_names_cnt = cnt;
9423
*child_ports_cnt = cnt;
9424
*child_properties_cnt = cnt;
9425
9426
*child_names = _child_names;
9427
*child_ports = _child_ports;
9428
*child_properties = _child_properties;
9429
9430
unsigned int i = 0;
9431
for (i = 0; i < cnt; i++) {
9432
job_log(j, LOG_DEBUG, "child_names[%u] = %s", i, (char *)_child_names[i]);
9433
}
9434
9435
return BOOTSTRAP_SUCCESS;
9436
out_bad:
9437
if (_child_ports) {
9438
mig_deallocate((vm_address_t)_child_ports, cnt * sizeof(_child_ports[0]));
9439
}
9440
9441
if (_child_names) {
9442
mig_deallocate((vm_address_t)_child_names, cnt * sizeof(_child_names[0]));
9443
}
9444
9445
if (_child_properties) {
9446
mig_deallocate((vm_address_t)_child_properties, cnt * sizeof(_child_properties[0]));
9447
}
9448
9449
return kr;
9450
}
9451
9452
kern_return_t
9453
job_mig_pid_is_managed(job_t j __attribute__((unused)), pid_t p, boolean_t *managed)
9454
{
9455
struct ldcred *ldc = runtime_get_caller_creds();
9456
if ((ldc->euid != geteuid()) && (ldc->euid != 0)) {
9457
return BOOTSTRAP_NOT_PRIVILEGED;
9458
}
9459
9460
/* This is so loginwindow doesn't try to quit GUI apps that have been launched
9461
* directly by launchd as agents.
9462
*/
9463
job_t j_for_pid = jobmgr_find_by_pid_deep(root_jobmgr, p, false);
9464
if (j_for_pid && !j_for_pid->anonymous && !j_for_pid->legacy_LS_job) {
9465
*managed = true;
9466
}
9467
9468
return BOOTSTRAP_SUCCESS;
9469
}
9470
9471
kern_return_t
9472
job_mig_port_for_label(job_t j __attribute__((unused)), name_t label, mach_port_t *mp)
9473
{
9474
if (!j) {
9475
return BOOTSTRAP_NO_MEMORY;
9476
}
9477
9478
struct ldcred *ldc = runtime_get_caller_creds();
9479
kern_return_t kr = BOOTSTRAP_NOT_PRIVILEGED;
9480
9481
#if HAVE_SANDBOX
9482
if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
9483
return BOOTSTRAP_NOT_PRIVILEGED;
9484
}
9485
#endif
9486
9487
mach_port_t _mp = MACH_PORT_NULL;
9488
if (!j->deny_job_creation && (ldc->euid == 0 || ldc->euid == geteuid())) {
9489
job_t target_j = job_find(NULL, label);
9490
if (jobmgr_assumes(root_jobmgr, target_j != NULL)) {
9491
if (target_j->j_port == MACH_PORT_NULL) {
9492
(void)job_assumes(target_j, job_setup_machport(target_j) == true);
9493
}
9494
9495
_mp = target_j->j_port;
9496
kr = _mp != MACH_PORT_NULL ? BOOTSTRAP_SUCCESS : BOOTSTRAP_NO_MEMORY;
9497
} else {
9498
kr = BOOTSTRAP_NO_MEMORY;
9499
}
9500
}
9501
9502
*mp = _mp;
9503
return kr;
9504
}
9505
9506
kern_return_t
9507
job_mig_set_security_session(job_t j, uuid_t uuid, mach_port_t asport)
9508
{
9509
#if TARGET_OS_EMBEDDED
9510
return KERN_SUCCESS;
9511
#endif
9512
9513
if (!j) {
9514
return BOOTSTRAP_NO_MEMORY;
9515
}
9516
9517
uuid_string_t uuid_str;
9518
uuid_unparse(uuid, uuid_str);
9519
job_log(j, LOG_DEBUG, "Setting session %u for UUID %s...", asport, uuid_str);
9520
9521
job_t ji = NULL, jt = NULL;
9522
LIST_FOREACH_SAFE(ji, &s_needing_sessions, sle, jt) {
9523
uuid_string_t uuid_str2;
9524
uuid_unparse(ji->expected_audit_uuid, uuid_str2);
9525
9526
if (uuid_compare(uuid, ji->expected_audit_uuid) == 0) {
9527
uuid_clear(ji->expected_audit_uuid);
9528
if (asport != MACH_PORT_NULL) {
9529
job_log(ji, LOG_DEBUG, "Job should join session with port 0x%x", asport);
9530
(void)job_assumes_zero(j, launchd_mport_copy_send(asport));
9531
} else {
9532
job_log(ji, LOG_DEBUG, "No session to set for job. Using our session.");
9533
}
9534
9535
ji->asport = asport;
9536
LIST_REMOVE(ji, needing_session_sle);
9537
9538
if (ji->event_monitor) {
9539
eventsystem_ping();
9540
} else {
9541
job_dispatch(ji, false);
9542
}
9543
}
9544
}
9545
9546
/* Each job that the session port was set for holds a reference. At the end of
9547
* the loop, there will be one extra reference belonging to this MiG protocol.
9548
* We need to release it so that the session goes away when all the jobs
9549
* referencing it are unloaded.
9550
*/
9551
(void)job_assumes_zero(j, launchd_mport_deallocate(asport));
9552
9553
return KERN_SUCCESS;
9554
}
9555
9556
jobmgr_t
9557
jobmgr_find_by_name(jobmgr_t jm, const char *where)
9558
{
9559
jobmgr_t jmi, jmi2;
9560
9561
// NULL is only passed for our custom API for LaunchServices. If that is the case, we do magic.
9562
if (where == NULL) {
9563
if (strcasecmp(jm->name, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
9564
where = VPROCMGR_SESSION_LOGINWINDOW;
9565
} else {
9566
where = VPROCMGR_SESSION_AQUA;
9567
}
9568
}
9569
9570
if (strcasecmp(jm->name, where) == 0) {
9571
return jm;
9572
}
9573
9574
if (strcasecmp(where, VPROCMGR_SESSION_BACKGROUND) == 0 && !pid1_magic) {
9575
jmi = root_jobmgr;
9576
goto jm_found;
9577
}
9578
9579
SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
9580
if (unlikely(jmi->shutting_down)) {
9581
continue;
9582
} else if (jmi->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN) {
9583
continue;
9584
} else if (strcasecmp(jmi->name, where) == 0) {
9585
goto jm_found;
9586
} else if (strcasecmp(jmi->name, VPROCMGR_SESSION_BACKGROUND) == 0 && pid1_magic) {
9587
SLIST_FOREACH(jmi2, &jmi->submgrs, sle) {
9588
if (strcasecmp(jmi2->name, where) == 0) {
9589
jmi = jmi2;
9590
goto jm_found;
9591
}
9592
}
9593
}
9594
}
9595
9596
jm_found:
9597
return jmi;
9598
}
9599
9600
kern_return_t
9601
job_mig_move_subset(job_t j, mach_port_t target_subset, name_t session_type, mach_port_t asport, uint64_t flags)
9602
{
9603
mach_msg_type_number_t l2l_i, l2l_port_cnt = 0;
9604
mach_port_array_t l2l_ports = NULL;
9605
mach_port_t reqport, rcvright;
9606
kern_return_t kr = 1;
9607
launch_data_t out_obj_array = NULL;
9608
struct ldcred *ldc = runtime_get_caller_creds();
9609
jobmgr_t jmr = NULL;
9610
9611
if (!j) {
9612
return BOOTSTRAP_NO_MEMORY;
9613
}
9614
9615
if (job_mig_intran2(root_jobmgr, target_subset, ldc->pid)) {
9616
job_log(j, LOG_ERR, "Moving a session to ourself is bogus.");
9617
9618
kr = BOOTSTRAP_NOT_PRIVILEGED;
9619
goto out;
9620
}
9621
9622
job_log(j, LOG_DEBUG, "Move subset attempt: 0x%x", target_subset);
9623
9624
kr = _vproc_grab_subset(target_subset, &reqport, &rcvright, &out_obj_array, &l2l_ports, &l2l_port_cnt);
9625
if (job_assumes_zero(j, kr) != 0) {
9626
goto out;
9627
}
9628
9629
if (launch_data_array_get_count(out_obj_array) != l2l_port_cnt) {
9630
os_assert_zero(l2l_port_cnt);
9631
}
9632
9633
if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, reqport, rcvright, false, session_type, false, asport)) != NULL)) {
9634
kr = BOOTSTRAP_NO_MEMORY;
9635
goto out;
9636
}
9637
9638
if (strcmp(session_type, VPROCMGR_SESSION_AQUA) == 0) {
9639
jobmgr_log(jmr, LOG_NOTICE, "Registering new GUI session.");
9640
kr = vproc_mig_register_gui_session(inherited_bootstrap_port, asport);
9641
if (kr) {
9642
jobmgr_log(jmr, LOG_ERR, "Failed to register GUI session with PID 1: 0x%x/0x%x", inherited_bootstrap_port, kr);
9643
}
9644
}
9645
9646
jmr->properties |= BOOTSTRAP_PROPERTY_MOVEDSUBSET;
9647
9648
/* This is a hack. We should be doing this in jobmgr_new(), but since we're in the middle of
9649
* processing an IPC request, we'll do this action before the new job manager can get any IPC
9650
* requests. This serialization is guaranteed since we are single-threaded in that respect.
9651
*/
9652
if (flags & LAUNCH_GLOBAL_ON_DEMAND) {
9653
// This is so awful.
9654
// Remove the job from its current job manager.
9655
LIST_REMOVE(j, sle);
9656
LIST_REMOVE(j, pid_hash_sle);
9657
9658
// Put the job into the target job manager.
9659
LIST_INSERT_HEAD(&jmr->jobs, j, sle);
9660
LIST_INSERT_HEAD(&jmr->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
9661
9662
j->mgr = jmr;
9663
job_set_global_on_demand(j, true);
9664
9665
if (!j->holds_ref) {
9666
job_log(j, LOG_PERF, "Job moved subset into: %s", j->mgr->name);
9667
j->holds_ref = true;
9668
runtime_add_ref();
9669
}
9670
}
9671
9672
for (l2l_i = 0; l2l_i < l2l_port_cnt; l2l_i++) {
9673
launch_data_t tmp, obj_at_idx;
9674
struct machservice *ms;
9675
job_t j_for_service;
9676
const char *serv_name;
9677
pid_t target_pid;
9678
bool serv_perpid;
9679
9680
(void)job_assumes(j, obj_at_idx = launch_data_array_get_index(out_obj_array, l2l_i));
9681
(void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PID));
9682
target_pid = (pid_t)launch_data_get_integer(tmp);
9683
(void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_PERPID));
9684
serv_perpid = launch_data_get_bool(tmp);
9685
(void)job_assumes(j, tmp = launch_data_dict_lookup(obj_at_idx, TAKE_SUBSET_NAME));
9686
serv_name = launch_data_get_string(tmp);
9687
9688
j_for_service = jobmgr_find_by_pid(jmr, target_pid, true);
9689
9690
if (unlikely(!j_for_service)) {
9691
// The PID probably exited
9692
(void)job_assumes_zero(j, launchd_mport_deallocate(l2l_ports[l2l_i]));
9693
continue;
9694
}
9695
9696
if (likely(ms = machservice_new(j_for_service, serv_name, &l2l_ports[l2l_i], serv_perpid))) {
9697
job_log(j, LOG_DEBUG, "Importing %s into new bootstrap.", serv_name);
9698
machservice_request_notifications(ms);
9699
}
9700
}
9701
9702
kr = 0;
9703
9704
out:
9705
if (out_obj_array) {
9706
launch_data_free(out_obj_array);
9707
}
9708
9709
if (l2l_ports) {
9710
mig_deallocate((vm_address_t)l2l_ports, l2l_port_cnt * sizeof(l2l_ports[0]));
9711
}
9712
9713
if (kr == 0) {
9714
if (target_subset) {
9715
(void)job_assumes_zero(j, launchd_mport_deallocate(target_subset));
9716
}
9717
if (asport) {
9718
(void)job_assumes_zero(j, launchd_mport_deallocate(asport));
9719
}
9720
} else if (jmr) {
9721
jobmgr_shutdown(jmr);
9722
}
9723
9724
return kr;
9725
}
9726
9727
kern_return_t
9728
job_mig_init_session(job_t j, name_t session_type, mach_port_t asport)
9729
{
9730
if (!j) {
9731
return BOOTSTRAP_NO_MEMORY;
9732
}
9733
9734
job_t j2;
9735
9736
kern_return_t kr = BOOTSTRAP_NO_MEMORY;
9737
if (j->mgr->session_initialized) {
9738
job_log(j, LOG_ERR, "Tried to initialize an already setup session!");
9739
kr = BOOTSTRAP_NOT_PRIVILEGED;
9740
} else if (strcmp(session_type, VPROCMGR_SESSION_LOGINWINDOW) == 0) {
9741
jobmgr_t jmi;
9742
9743
/*
9744
* 5330262
9745
*
9746
* We're working around LoginWindow and the WindowServer.
9747
*
9748
* In practice, there is only one LoginWindow session. Unfortunately, for certain
9749
* scenarios, the WindowServer spawns loginwindow, and in those cases, it frequently
9750
* spawns a replacement loginwindow session before cleaning up the previous one.
9751
*
9752
* We're going to use the creation of a new LoginWindow context as a clue that the
9753
* previous LoginWindow context is on the way out and therefore we should just
9754
* kick-start the shutdown of it.
9755
*/
9756
9757
SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
9758
if (unlikely(jmi->shutting_down)) {
9759
continue;
9760
} else if (strcasecmp(jmi->name, session_type) == 0) {
9761
jobmgr_shutdown(jmi);
9762
break;
9763
}
9764
}
9765
} else if (strcmp(session_type, VPROCMGR_SESSION_AQUA) == 0) {
9766
(void)job_assumes_zero(j, runtime_remove_mport(j->mgr->jm_port));
9767
}
9768
9769
jobmgr_log(j->mgr, LOG_DEBUG, "Initializing as %s", session_type);
9770
strcpy(j->mgr->name_init, session_type);
9771
9772
if (job_assumes(j, (j2 = jobmgr_init_session(j->mgr, session_type, false)))) {
9773
j2->asport = asport;
9774
(void)job_assumes(j, job_dispatch(j2, true));
9775
kr = BOOTSTRAP_SUCCESS;
9776
}
9777
9778
return kr;
9779
}
9780
9781
kern_return_t
9782
job_mig_switch_to_session(job_t j, mach_port_t requestor_port, name_t session_name, mach_port_t asport, mach_port_t *new_bsport)
9783
{
9784
struct ldcred *ldc = runtime_get_caller_creds();
9785
if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
9786
jobmgr_log(root_jobmgr, LOG_ERR, "%s() called with NULL job: PID %d", __func__, ldc->pid);
9787
return BOOTSTRAP_NO_MEMORY;
9788
}
9789
9790
if (j->mgr->shutting_down) {
9791
return BOOTSTRAP_UNKNOWN_SERVICE;
9792
}
9793
9794
job_log(j, LOG_DEBUG, "Job wants to move to %s session.", session_name);
9795
9796
if (!job_assumes(j, pid1_magic == false)) {
9797
job_log(j, LOG_WARNING, "Switching sessions is not allowed in the system Mach bootstrap.");
9798
return BOOTSTRAP_NOT_PRIVILEGED;
9799
}
9800
9801
if (!j->anonymous) {
9802
job_log(j, LOG_NOTICE, "Non-anonymous job tried to switch sessions. Please use LimitLoadToSessionType instead.");
9803
return BOOTSTRAP_NOT_PRIVILEGED;
9804
}
9805
9806
jobmgr_t target_jm = jobmgr_find_by_name(root_jobmgr, session_name);
9807
if (target_jm == j->mgr) {
9808
job_log(j, LOG_DEBUG, "Job is already in its desired session (%s).", session_name);
9809
(void)job_assumes_zero(j, launchd_mport_deallocate(asport));
9810
(void)job_assumes_zero(j, launchd_mport_deallocate(requestor_port));
9811
*new_bsport = target_jm->jm_port;
9812
return BOOTSTRAP_SUCCESS;
9813
}
9814
9815
if (!target_jm) {
9816
target_jm = jobmgr_new(j->mgr, requestor_port, MACH_PORT_NULL, false, session_name, false, asport);
9817
if (target_jm) {
9818
target_jm->properties |= BOOTSTRAP_PROPERTY_IMPLICITSUBSET;
9819
(void)job_assumes_zero(j, launchd_mport_deallocate(asport));
9820
}
9821
}
9822
9823
if (!job_assumes(j, target_jm != NULL)) {
9824
job_log(j, LOG_WARNING, "Could not find %s session!", session_name);
9825
return BOOTSTRAP_NO_MEMORY;
9826
}
9827
9828
// Remove the job from it's current job manager.
9829
LIST_REMOVE(j, sle);
9830
LIST_REMOVE(j, pid_hash_sle);
9831
9832
job_t ji = NULL, jit = NULL;
9833
LIST_FOREACH_SAFE(ji, &j->mgr->global_env_jobs, global_env_sle, jit) {
9834
if (ji == j) {
9835
LIST_REMOVE(ji, global_env_sle);
9836
break;
9837
}
9838
}
9839
9840
// Put the job into the target job manager.
9841
LIST_INSERT_HEAD(&target_jm->jobs, j, sle);
9842
LIST_INSERT_HEAD(&target_jm->active_jobs[ACTIVE_JOB_HASH(j->p)], j, pid_hash_sle);
9843
9844
if (ji) {
9845
LIST_INSERT_HEAD(&target_jm->global_env_jobs, j, global_env_sle);
9846
}
9847
9848
// Move our Mach services over if we're not in a flat namespace.
9849
if (!launchd_flat_mach_namespace && !SLIST_EMPTY(&j->machservices)) {
9850
struct machservice *msi = NULL, *msit = NULL;
9851
SLIST_FOREACH_SAFE(msi, &j->machservices, sle, msit) {
9852
LIST_REMOVE(msi, name_hash_sle);
9853
LIST_INSERT_HEAD(&target_jm->ms_hash[hash_ms(msi->name)], msi, name_hash_sle);
9854
}
9855
}
9856
9857
j->mgr = target_jm;
9858
9859
if (!j->holds_ref) {
9860
/* Anonymous jobs which move around are particularly interesting to us, so we want to
9861
* stick around while they're still around.
9862
* For example, login calls into the PAM launchd module, which moves the process into
9863
* the StandardIO session by default. So we'll hold a reference on that job to prevent
9864
* ourselves from going away.
9865
*/
9866
j->holds_ref = true;
9867
job_log(j, LOG_PERF, "Job switched into manager: %s", j->mgr->name);
9868
runtime_add_ref();
9869
}
9870
9871
*new_bsport = target_jm->jm_port;
9872
9873
return KERN_SUCCESS;
9874
}
9875
9876
kern_return_t
9877
job_mig_take_subset(job_t j, mach_port_t *reqport, mach_port_t *rcvright,
9878
vm_offset_t *outdata, mach_msg_type_number_t *outdataCnt,
9879
mach_port_array_t *portsp, unsigned int *ports_cnt)
9880
{
9881
launch_data_t tmp_obj, tmp_dict, outdata_obj_array = NULL;
9882
mach_port_array_t ports = NULL;
9883
unsigned int cnt = 0, cnt2 = 0;
9884
size_t packed_size;
9885
struct machservice *ms;
9886
jobmgr_t jm;
9887
job_t ji;
9888
9889
if (!j) {
9890
return BOOTSTRAP_NO_MEMORY;
9891
}
9892
9893
jm = j->mgr;
9894
9895
if (unlikely(!pid1_magic)) {
9896
job_log(j, LOG_ERR, "Only the system launchd will transfer Mach sub-bootstraps.");
9897
return BOOTSTRAP_NOT_PRIVILEGED;
9898
}
9899
if (unlikely(jobmgr_parent(jm) == NULL)) {
9900
job_log(j, LOG_ERR, "Root Mach bootstrap cannot be transferred.");
9901
return BOOTSTRAP_NOT_PRIVILEGED;
9902
}
9903
if (unlikely(strcasecmp(jm->name, VPROCMGR_SESSION_AQUA) == 0)) {
9904
job_log(j, LOG_ERR, "Cannot transfer a setup GUI session.");
9905
return BOOTSTRAP_NOT_PRIVILEGED;
9906
}
9907
if (unlikely(!j->anonymous)) {
9908
job_log(j, LOG_ERR, "Only the anonymous job can transfer Mach sub-bootstraps.");
9909
return BOOTSTRAP_NOT_PRIVILEGED;
9910
}
9911
9912
job_log(j, LOG_DEBUG, "Transferring sub-bootstrap to the per session launchd.");
9913
9914
outdata_obj_array = launch_data_alloc(LAUNCH_DATA_ARRAY);
9915
if (!job_assumes(j, outdata_obj_array)) {
9916
goto out_bad;
9917
}
9918
9919
*outdataCnt = 20 * 1024 * 1024;
9920
mig_allocate(outdata, *outdataCnt);
9921
if (!job_assumes(j, *outdata != 0)) {
9922
return 1;
9923
}
9924
9925
LIST_FOREACH(ji, &j->mgr->jobs, sle) {
9926
if (!ji->anonymous) {
9927
continue;
9928
}
9929
SLIST_FOREACH(ms, &ji->machservices, sle) {
9930
cnt++;
9931
}
9932
}
9933
9934
mig_allocate((vm_address_t *)&ports, cnt * sizeof(ports[0]));
9935
if (!job_assumes(j, ports != NULL)) {
9936
goto out_bad;
9937
}
9938
9939
LIST_FOREACH(ji, &j->mgr->jobs, sle) {
9940
if (!ji->anonymous) {
9941
continue;
9942
}
9943
9944
SLIST_FOREACH(ms, &ji->machservices, sle) {
9945
if (job_assumes(j, (tmp_dict = launch_data_alloc(LAUNCH_DATA_DICTIONARY)))) {
9946
(void)job_assumes(j, launch_data_array_set_index(outdata_obj_array, tmp_dict, cnt2));
9947
} else {
9948
goto out_bad;
9949
}
9950
9951
if (job_assumes(j, (tmp_obj = launch_data_new_string(machservice_name(ms))))) {
9952
(void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_NAME));
9953
} else {
9954
goto out_bad;
9955
}
9956
9957
if (job_assumes(j, (tmp_obj = launch_data_new_integer((ms->job->p))))) {
9958
(void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PID));
9959
} else {
9960
goto out_bad;
9961
}
9962
9963
if (job_assumes(j, (tmp_obj = launch_data_new_bool((ms->per_pid))))) {
9964
(void)job_assumes(j, launch_data_dict_insert(tmp_dict, tmp_obj, TAKE_SUBSET_PERPID));
9965
} else {
9966
goto out_bad;
9967
}
9968
9969
ports[cnt2] = machservice_port(ms);
9970
9971
// Increment the send right by one so we can shutdown the jobmgr cleanly
9972
(void)jobmgr_assumes_zero(jm, launchd_mport_copy_send(ports[cnt2]));
9973
cnt2++;
9974
}
9975
}
9976
9977
(void)job_assumes(j, cnt == cnt2);
9978
9979
runtime_ktrace0(RTKT_LAUNCHD_DATA_PACK);
9980
packed_size = launch_data_pack(outdata_obj_array, (void *)*outdata, *outdataCnt, NULL, NULL);
9981
if (!job_assumes(j, packed_size != 0)) {
9982
goto out_bad;
9983
}
9984
9985
launch_data_free(outdata_obj_array);
9986
9987
*portsp = ports;
9988
*ports_cnt = cnt;
9989
9990
*reqport = jm->req_port;
9991
*rcvright = jm->jm_port;
9992
9993
jm->req_port = 0;
9994
jm->jm_port = 0;
9995
9996
workaround_5477111 = j;
9997
9998
jobmgr_shutdown(jm);
9999
10000
return BOOTSTRAP_SUCCESS;
10001
10002
out_bad:
10003
if (outdata_obj_array) {
10004
launch_data_free(outdata_obj_array);
10005
}
10006
if (*outdata) {
10007
mig_deallocate(*outdata, *outdataCnt);
10008
}
10009
if (ports) {
10010
mig_deallocate((vm_address_t)ports, cnt * sizeof(ports[0]));
10011
}
10012
10013
return BOOTSTRAP_NO_MEMORY;
10014
}
10015
10016
kern_return_t
10017
job_mig_subset(job_t j, mach_port_t requestorport, mach_port_t *subsetportp)
10018
{
10019
int bsdepth = 0;
10020
jobmgr_t jmr;
10021
10022
if (!j) {
10023
return BOOTSTRAP_NO_MEMORY;
10024
}
10025
if (j->mgr->shutting_down) {
10026
return BOOTSTRAP_UNKNOWN_SERVICE;
10027
}
10028
10029
jmr = j->mgr;
10030
10031
while ((jmr = jobmgr_parent(jmr)) != NULL) {
10032
bsdepth++;
10033
}
10034
10035
// Since we use recursion, we need an artificial depth for subsets
10036
if (unlikely(bsdepth > 100)) {
10037
job_log(j, LOG_ERR, "Mach sub-bootstrap create request failed. Depth greater than: %d", bsdepth);
10038
return BOOTSTRAP_NO_MEMORY;
10039
}
10040
10041
char name[NAME_MAX];
10042
snprintf(name, sizeof(name), "%s[%i].subset.%i", j->anonymous ? j->prog : j->label, j->p, MACH_PORT_INDEX(requestorport));
10043
10044
if (!job_assumes(j, (jmr = jobmgr_new(j->mgr, requestorport, MACH_PORT_NULL, false, name, true, j->asport)) != NULL)) {
10045
if (unlikely(requestorport == MACH_PORT_NULL)) {
10046
return BOOTSTRAP_NOT_PRIVILEGED;
10047
}
10048
return BOOTSTRAP_NO_MEMORY;
10049
}
10050
10051
*subsetportp = jmr->jm_port;
10052
jmr->properties |= BOOTSTRAP_PROPERTY_EXPLICITSUBSET;
10053
10054
/* A job could create multiple subsets, so only add a reference the first time
10055
* it does so we don't have to keep a count.
10056
*/
10057
if (j->anonymous && !j->holds_ref) {
10058
job_log(j, LOG_PERF, "Job created subset: %s", jmr->name);
10059
j->holds_ref = true;
10060
runtime_add_ref();
10061
}
10062
10063
job_log(j, LOG_DEBUG, "Job created a subset named \"%s\"", jmr->name);
10064
return BOOTSTRAP_SUCCESS;
10065
}
10066
10067
job_t
10068
_xpc_domain_import_service(jobmgr_t jm, launch_data_t pload)
10069
{
10070
jobmgr_t where2put = NULL;
10071
10072
if (launch_data_get_type(pload) != LAUNCH_DATA_DICTIONARY) {
10073
errno = EINVAL;
10074
return NULL;
10075
}
10076
10077
launch_data_t ldlabel = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_LABEL);
10078
if (!ldlabel || launch_data_get_type(ldlabel) != LAUNCH_DATA_STRING) {
10079
errno = EINVAL;
10080
return NULL;
10081
}
10082
10083
const char *label = launch_data_get_string(ldlabel);
10084
jobmgr_log(jm, LOG_DEBUG, "Importing service: %s", label);
10085
10086
launch_data_t destname = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_XPCDOMAIN);
10087
if (destname) {
10088
bool supported_domain = false;
10089
10090
if (launch_data_get_type(destname) == LAUNCH_DATA_STRING) {
10091
const char *str = launch_data_get_string(destname);
10092
if (strcmp(str, XPC_DOMAIN_TYPE_SYSTEM) == 0) {
10093
where2put = _s_xpc_system_domain;
10094
} else if (strcmp(str, XPC_DOMAIN_TYPE_PERUSER) == 0) {
10095
where2put = jobmgr_find_xpc_per_user_domain(jm, jm->req_euid);
10096
supported_domain = true;
10097
} else if (strcmp(str, XPC_DOMAIN_TYPE_PERSESSION) == 0) {
10098
where2put = jobmgr_find_xpc_per_session_domain(jm, jm->req_asid);
10099
} else {
10100
jobmgr_log(jm, LOG_ERR, "Invalid XPC domain type: %s", str);
10101
errno = EINVAL;
10102
}
10103
} else {
10104
jobmgr_log(jm, LOG_ERR, "XPC domain type is not a string.");
10105
errno = EINVAL;
10106
}
10107
10108
if (where2put && !supported_domain) {
10109
launch_data_t mi = NULL;
10110
if ((mi = launch_data_dict_lookup(pload, LAUNCH_JOBKEY_MULTIPLEINSTANCES))) {
10111
if (launch_data_get_type(mi) == LAUNCH_DATA_BOOL && launch_data_get_bool(mi)) {
10112
jobmgr_log(where2put, LOG_ERR, "Multiple-instance services are not supported in this domain.");
10113
where2put = NULL;
10114
errno = EINVAL;
10115
}
10116
}
10117
}
10118
} else {
10119
where2put = jm;
10120
}
10121
10122
job_t j = NULL;
10123
if (where2put) {
10124
/* Gross. If the service already exists in a singleton domain, then
10125
* jobmgr_import2() will return the existing job. But if we fail to alias
10126
* this job, we will normally want to remove it. But if we did not create
10127
* it in the first place, then we need to avoid removing it. So check
10128
* errno against EEXIST in the success case and if it's EEXIST, then do
10129
* not remove the original job in the event of a failed alias.
10130
*
10131
* This really needs to be re-thought, but I think it'll require a larger
10132
* evaluation of launchd's data structures. Right now, once a job is
10133
* imported into a singleton domain, it won't be removed until the system
10134
* shuts down, but that may not always be true. If it ever changes, we'll
10135
* have a problem because we'll have to account for all existing aliases
10136
* and clean them up somehow. Or just start ref-counting. I knew this
10137
* aliasing stuff would be trouble...
10138
*
10139
* <rdar://problem/10646503>
10140
*/
10141
jobmgr_log(where2put, LOG_DEBUG, "Importing service...");
10142
10143
errno = 0;
10144
if ((j = jobmgr_import2(where2put, pload))) {
10145
bool created = (errno != EEXIST);
10146
j->xpc_service = true;
10147
10148
if (where2put->xpc_singleton) {
10149
/* If the service was destined for one of the global domains,
10150
* then we have to alias it into our local domain to reserve the
10151
* name.
10152
*/
10153
job_t ja = NULL;
10154
if (!(ja = job_new_alias(jm, j))) {
10155
/* If we failed to alias the job because of a conflict over
10156
* the label, then we remove it from the global domain. We
10157
* don't want to risk having imported a malicious job into
10158
* one of the global domains.
10159
*/
10160
if (errno != EEXIST) {
10161
job_log(j, LOG_ERR, "Failed to alias job into: %s: %d: %s", where2put->name, errno, strerror(errno));
10162
} else {
10163
errno = 0;
10164
}
10165
10166
if (created) {
10167
jobmgr_log(jm, LOG_WARNING, "Singleton service already existed in job-local namespace. Removing: %s", j->label);
10168
job_remove(j);
10169
}
10170
10171
j = NULL;
10172
} else {
10173
jobmgr_log(jm, LOG_DEBUG, "Aliased service into local domain: %s", j->label);
10174
(void)job_dispatch(j, false);
10175
ja->xpc_service = true;
10176
j = ja;
10177
}
10178
} else {
10179
(void)job_dispatch(j, false);
10180
}
10181
}
10182
} else {
10183
jobmgr_log(jm, LOG_DEBUG, "Could not find destination for service: %s", label);
10184
}
10185
10186
return j;
10187
}
10188
10189
int
10190
_xpc_domain_import_services(job_t j, launch_data_t services)
10191
{
10192
int error = EINVAL;
10193
if (launch_data_get_type(services) != LAUNCH_DATA_ARRAY) {
10194
return error;
10195
}
10196
10197
size_t i = 0;
10198
size_t c = launch_data_array_get_count(services);
10199
jobmgr_log(j->mgr, LOG_DEBUG, "Importing new services: %lu", c);
10200
10201
for (i = 0; i < c; i++) {
10202
jobmgr_log(j->mgr, LOG_DEBUG, "Importing service at index: %lu", i);
10203
10204
job_t nj = NULL;
10205
launch_data_t ploadi = launch_data_array_get_index(services, i);
10206
if (!(nj = _xpc_domain_import_service(j->mgr, ploadi))) {
10207
if (!j->mgr->session_initialized && errno) {
10208
/* Service import failures are only fatal if the domain is being
10209
* initialized. If we're extending the domain, we can run into
10210
* errors with services already existing, so we just ignore them.
10211
* In the case of a domain extension, we don't want to halt the
10212
* operation if we run into an error with one service.
10213
*
10214
* <rdar://problem/10842779>
10215
*/
10216
jobmgr_log(j->mgr, LOG_ERR, "Failed to import service at index: %lu: %d: %s", i, errno, strerror(errno));
10217
error = errno;
10218
break;
10219
}
10220
} else {
10221
jobmgr_log(j->mgr, LOG_DEBUG, "Imported service: %s", nj->label);
10222
}
10223
}
10224
10225
if (i == c) {
10226
error = 0;
10227
}
10228
10229
return error;
10230
}
10231
10232
kern_return_t
10233
xpc_domain_import2(job_t j, mach_port_t reqport, mach_port_t dport)
10234
{
10235
if (unlikely(!pid1_magic)) {
10236
job_log(j, LOG_ERR, "XPC domains may only reside in PID 1.");
10237
return BOOTSTRAP_NOT_PRIVILEGED;
10238
}
10239
if (!j || !MACH_PORT_VALID(reqport)) {
10240
return BOOTSTRAP_UNKNOWN_SERVICE;
10241
}
10242
if (root_jobmgr->shutting_down) {
10243
jobmgr_log(root_jobmgr, LOG_ERR, "Attempt to create new domain while shutting down.");
10244
return BOOTSTRAP_NOT_PRIVILEGED;
10245
}
10246
if (!j->xpc_bootstrapper) {
10247
job_log(j, LOG_ERR, "Attempt to create new XPC domain by unprivileged job.");
10248
return BOOTSTRAP_NOT_PRIVILEGED;
10249
}
10250
10251
kern_return_t kr = BOOTSTRAP_NO_MEMORY;
10252
/* All XPC domains are children of the root job manager. What we're creating
10253
* here is really just a skeleton. By creating it, we're adding reqp to our
10254
* port set. It will have two messages on it. The first specifies the
10255
* environment of the originator. This is so we can cache it and hand it to
10256
* xpcproxy to bootstrap our services. The second is the set of jobs that is
10257
* to be bootstrapped in.
10258
*/
10259
jobmgr_t jm = jobmgr_new(root_jobmgr, reqport, dport, false, NULL, true, MACH_PORT_NULL);
10260
if (job_assumes(j, jm != NULL)) {
10261
jm->properties |= BOOTSTRAP_PROPERTY_XPC_DOMAIN;
10262
jm->shortdesc = "private";
10263
kr = BOOTSTRAP_SUCCESS;
10264
}
10265
10266
return kr;
10267
}
10268
10269
kern_return_t
10270
xpc_domain_set_environment(job_t j, mach_port_t rp, mach_port_t bsport, mach_port_t excport, vm_offset_t ctx, mach_msg_type_number_t ctx_sz)
10271
{
10272
if (!j) {
10273
/* Due to the whacky nature of XPC service bootstrapping, we can end up
10274
* getting this message long after the requesting process has gone away.
10275
* See <rdar://problem/8593143>.
10276
*/
10277
return BOOTSTRAP_UNKNOWN_SERVICE;
10278
}
10279
10280
jobmgr_t jm = j->mgr;
10281
if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
10282
return BOOTSTRAP_NOT_PRIVILEGED;
10283
}
10284
10285
if (jm->req_asport != MACH_PORT_NULL) {
10286
return BOOTSTRAP_NOT_PRIVILEGED;
10287
}
10288
10289
struct ldcred *ldc = runtime_get_caller_creds();
10290
struct proc_bsdinfowithuniqid proc;
10291
if (proc_pidinfo(ldc->pid, PROC_PIDT_BSDINFOWITHUNIQID, 1, &proc, PROC_PIDT_BSDINFOWITHUNIQID_SIZE) == 0) {
10292
if (errno != ESRCH) {
10293
(void)jobmgr_assumes_zero(jm, errno);
10294
}
10295
10296
jm->error = errno;
10297
jobmgr_remove(jm);
10298
return BOOTSTRAP_NO_MEMORY;
10299
}
10300
10301
#if !TARGET_OS_EMBEDDED
10302
if (jobmgr_assumes_zero(jm, audit_session_port(ldc->asid, &jm->req_asport)) != 0) {
10303
jm->error = EPERM;
10304
jobmgr_remove(jm);
10305
job_log(j, LOG_ERR, "Failed to get port for ASID: %u", ldc->asid);
10306
return BOOTSTRAP_NOT_PRIVILEGED;
10307
}
10308
#else
10309
jm->req_asport = MACH_PORT_DEAD;
10310
#endif
10311
10312
struct waiting4attach *w4ai = NULL;
10313
struct waiting4attach *w4ait = NULL;
10314
LIST_FOREACH_SAFE(w4ai, &_launchd_domain_waiters, le, w4ait) {
10315
if (w4ai->dest == ldc->pid) {
10316
jobmgr_log(jm, LOG_DEBUG, "Migrating attach for: %s", w4ai->name);
10317
LIST_REMOVE(w4ai, le);
10318
LIST_INSERT_HEAD(&jm->attaches, w4ai, le);
10319
w4ai->dest = 0;
10320
}
10321
}
10322
10323
(void)snprintf(jm->name_init, NAME_MAX, "com.apple.xpc.domain.%s.%d", proc.pbsd.pbi_comm, ldc->pid);
10324
strlcpy(jm->owner, proc.pbsd.pbi_comm, sizeof(jm->owner));
10325
jm->req_bsport = bsport;
10326
jm->req_excport = excport;
10327
jm->req_rport = rp;
10328
jm->req_ctx = ctx;
10329
jm->req_ctx_sz = ctx_sz;
10330
jm->req_pid = ldc->pid;
10331
jm->req_euid = ldc->euid;
10332
jm->req_egid = ldc->egid;
10333
jm->req_asid = ldc->asid;
10334
jm->req_uniqueid = proc.p_uniqidentifier.p_uniqueid;
10335
10336
return KERN_SUCCESS;
10337
}
10338
10339
kern_return_t
10340
xpc_domain_load_services(job_t j, vm_offset_t services_buff, mach_msg_type_number_t services_sz)
10341
{
10342
if (!j) {
10343
return BOOTSTRAP_UNKNOWN_SERVICE;
10344
}
10345
10346
job_t rootj = jobmgr_find_by_pid(root_jobmgr, j->p, false);
10347
if (!(rootj && rootj->xpc_bootstrapper)) {
10348
job_log(j, LOG_ERR, "Attempt to load services into XPC domain by unprivileged job.");
10349
return BOOTSTRAP_NOT_PRIVILEGED;
10350
}
10351
10352
// This is just for XPC domains (for now).
10353
if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
10354
return BOOTSTRAP_NOT_PRIVILEGED;
10355
}
10356
if (j->mgr->session_initialized) {
10357
jobmgr_log(j->mgr, LOG_ERR, "Attempt to initialize an already-initialized XPC domain.");
10358
return BOOTSTRAP_NOT_PRIVILEGED;
10359
}
10360
10361
size_t offset = 0;
10362
launch_data_t services = launch_data_unpack((void *)services_buff, services_sz, NULL, 0, &offset, NULL);
10363
if (!services) {
10364
return BOOTSTRAP_NO_MEMORY;
10365
}
10366
10367
int error = _xpc_domain_import_services(j, services);
10368
if (error) {
10369
j->mgr->error = error;
10370
jobmgr_log(j->mgr, LOG_ERR, "Obliterating domain.");
10371
jobmgr_remove(j->mgr);
10372
} else {
10373
j->mgr->session_initialized = true;
10374
(void)jobmgr_assumes_zero(j->mgr, xpc_call_wakeup(j->mgr->req_rport, BOOTSTRAP_SUCCESS));
10375
j->mgr->req_rport = MACH_PORT_NULL;
10376
10377
/* Returning a failure code will destroy the message, whereas returning
10378
* success will not, so we need to clean up here.
10379
*/
10380
mig_deallocate(services_buff, services_sz);
10381
error = BOOTSTRAP_SUCCESS;
10382
}
10383
10384
return error;
10385
}
10386
10387
kern_return_t
10388
xpc_domain_check_in(job_t j, mach_port_t *bsport, mach_port_t *sbsport,
10389
mach_port_t *excport, mach_port_t *asport, uint32_t *uid, uint32_t *gid,
10390
int32_t *asid, vm_offset_t *ctx, mach_msg_type_number_t *ctx_sz)
10391
{
10392
if (!jobmgr_assumes(root_jobmgr, j != NULL)) {
10393
return BOOTSTRAP_UNKNOWN_SERVICE;
10394
}
10395
jobmgr_t jm = j->mgr;
10396
if (!(jm->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
10397
return BOOTSTRAP_NOT_PRIVILEGED;
10398
}
10399
10400
if (jm->req_asport == MACH_PORT_NULL) {
10401
return BOOTSTRAP_NOT_PRIVILEGED;
10402
}
10403
10404
*bsport = jm->req_bsport;
10405
*sbsport = root_jobmgr->jm_port;
10406
*excport = jm->req_excport;
10407
if (j->joins_gui_session) {
10408
if (jm->req_gui_asport) {
10409
*asport = jm->req_gui_asport;
10410
} else {
10411
job_log(j, LOG_NOTICE, "No GUI session set for UID of user service. This service may not act properly.");
10412
*asport = jm->req_asport;
10413
}
10414
} else {
10415
*asport = jm->req_asport;
10416
}
10417
10418
*uid = jm->req_euid;
10419
*gid = jm->req_egid;
10420
*asid = jm->req_asid;
10421
10422
*ctx = jm->req_ctx;
10423
*ctx_sz = jm->req_ctx_sz;
10424
10425
return KERN_SUCCESS;
10426
}
10427
10428
kern_return_t
10429
xpc_domain_get_service_name(job_t j, event_name_t name)
10430
{
10431
if (!j) {
10432
return BOOTSTRAP_NO_MEMORY;
10433
}
10434
10435
if (!j->xpc_service) {
10436
jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name by non-XPC service: %s", j->label);
10437
return BOOTSTRAP_NOT_PRIVILEGED;
10438
}
10439
10440
const char *what2find = j->label;
10441
if (j->dedicated_instance) {
10442
what2find = j->original->label;
10443
}
10444
10445
struct machservice *msi = NULL;
10446
SLIST_FOREACH(msi, &j->machservices, sle) {
10447
if (strcmp(msi->name, what2find) == 0) {
10448
break;
10449
}
10450
}
10451
10452
if (!msi) {
10453
jobmgr_log(j->mgr, LOG_ERR, "Attempt to get service name that does not exist: %s", j->label);
10454
return BOOTSTRAP_UNKNOWN_SERVICE;
10455
}
10456
10457
(void)strlcpy(name, msi->name, sizeof(event_name_t));
10458
return BOOTSTRAP_SUCCESS;
10459
}
10460
10461
#if XPC_LPI_VERSION >= 20111216
10462
kern_return_t
10463
xpc_domain_add_services(job_t j, vm_offset_t services_buff, mach_msg_type_number_t services_sz)
10464
{
10465
if (!j) {
10466
return BOOTSTRAP_UNKNOWN_SERVICE;
10467
}
10468
10469
job_t rootj = jobmgr_find_by_pid(root_jobmgr, j->p, false);
10470
if (!(rootj && rootj->xpc_bootstrapper)) {
10471
job_log(j, LOG_ERR, "Attempt to add service to XPC domain by unprivileged job.");
10472
return BOOTSTRAP_NOT_PRIVILEGED;
10473
}
10474
10475
if (!(j->mgr->properties & BOOTSTRAP_PROPERTY_XPC_DOMAIN)) {
10476
return BOOTSTRAP_NOT_PRIVILEGED;
10477
}
10478
10479
size_t offset = 0;
10480
launch_data_t services = launch_data_unpack((void *)services_buff, services_sz, NULL, 0, &offset, NULL);
10481
if (!services) {
10482
return BOOTSTRAP_NO_MEMORY;
10483
}
10484
10485
int error = _xpc_domain_import_services(j, services);
10486
if (!error) {
10487
mig_deallocate(services_buff, services_sz);
10488
}
10489
10490
return error;
10491
}
10492
#endif
10493
10494
#pragma mark XPC Events
10495
int
10496
xpc_event_find_channel(job_t j, const char *stream, struct machservice **ms)
10497
{
10498
int error = EXNOMEM;
10499
struct machservice *msi = NULL;
10500
SLIST_FOREACH(msi, &j->machservices, sle) {
10501
if (strcmp(stream, msi->name) == 0) {
10502
break;
10503
}
10504
}
10505
10506
if (!msi) {
10507
mach_port_t sp = MACH_PORT_NULL;
10508
msi = machservice_new(j, stream, &sp, false);
10509
if (!msi) {
10510
return EXNOMEM;
10511
}
10512
10513
job_log(j, LOG_DEBUG, "Creating new MachService for stream: %s", stream);
10514
/* Hack to keep this from being publicly accessible through
10515
* bootstrap_look_up().
10516
*/
10517
if (!j->dedicated_instance) {
10518
LIST_REMOVE(msi, name_hash_sle);
10519
}
10520
msi->event_channel = true;
10521
10522
/* If we call job_dispatch() here before the audit session for the job
10523
* has been set, we'll end up not watching this service. But we also have
10524
* to take care not to watch the port if the job is active.
10525
*
10526
* See <rdar://problem/10357855>.
10527
*/
10528
if (!j->currently_ignored) {
10529
machservice_watch(j, msi);
10530
}
10531
10532
error = 0;
10533
*ms = msi;
10534
} else if (!msi->event_channel) {
10535
job_log(j, LOG_ERR, "This job registered a MachService name identical to the requested event channel name: %s", stream);
10536
error = EEXIST;
10537
} else {
10538
error = 0;
10539
*ms = msi;
10540
}
10541
10542
return error;
10543
}
10544
10545
int
10546
xpc_event_get_event_name(job_t j, xpc_object_t request, xpc_object_t *reply)
10547
{
10548
const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10549
if (!stream) {
10550
return EXINVAL;
10551
}
10552
10553
uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10554
if (!token) {
10555
return EXINVAL;
10556
}
10557
10558
job_log(j, LOG_DEBUG, "Getting event name for stream/token: %s/0x%llu", stream, token);
10559
10560
int result = ESRCH;
10561
struct externalevent *event = externalevent_find(stream, token);
10562
if (event && j->event_monitor) {
10563
xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10564
xpc_dictionary_set_string(reply2, XPC_EVENT_ROUTINE_KEY_NAME, event->name);
10565
*reply = reply2;
10566
10567
job_log(j, LOG_DEBUG, "Found: %s", event->name);
10568
result = 0;
10569
}
10570
10571
return result;
10572
}
10573
10574
int
10575
xpc_event_copy_entitlements(job_t j, xpc_object_t request, xpc_object_t *reply)
10576
{
10577
const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10578
if (!stream) {
10579
return EXINVAL;
10580
}
10581
10582
uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10583
if (!token) {
10584
return EXINVAL;
10585
}
10586
10587
job_log(j, LOG_DEBUG, "Getting entitlements for stream/token: %s/0x%llu", stream, token);
10588
10589
int result = ESRCH;
10590
struct externalevent *event = externalevent_find(stream, token);
10591
if (event && j->event_monitor) {
10592
xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10593
xpc_dictionary_set_value(reply2, XPC_EVENT_ROUTINE_KEY_ENTITLEMENTS, event->entitlements);
10594
*reply = reply2;
10595
10596
job_log(j, LOG_DEBUG, "Found: %s", event->name);
10597
result = 0;
10598
}
10599
10600
return result;
10601
}
10602
10603
// TODO - can be removed with rdar://problem/12666150
10604
#ifndef XPC_EVENT_FLAG_ALLOW_UNMANAGED
10605
#define XPC_EVENT_FLAG_ALLOW_UNMANAGED (1 << 1)
10606
#endif
10607
10608
int
10609
xpc_event_set_event(job_t j, xpc_object_t request, xpc_object_t *reply)
10610
{
10611
const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10612
if (!stream) {
10613
return EXINVAL;
10614
}
10615
10616
const char *key = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_NAME);
10617
if (!key) {
10618
return EXINVAL;
10619
}
10620
10621
xpc_object_t event = xpc_dictionary_get_value(request, XPC_EVENT_ROUTINE_KEY_EVENT);
10622
if (event && xpc_get_type(event) != XPC_TYPE_DICTIONARY) {
10623
return EXINVAL;
10624
}
10625
10626
uint64_t flags = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_FLAGS);
10627
10628
/* Don't allow events to be set for anonymous jobs unless specifically
10629
* requested in the flags. Only permit this for internal development.
10630
*/
10631
if (j->anonymous && ((flags & XPC_EVENT_FLAG_ALLOW_UNMANAGED) == 0 || !launchd_apple_internal)) {
10632
job_log(j, LOG_ERR, "Unmanaged jobs may not make XPC Events requests.");
10633
return EPERM;
10634
}
10635
10636
job_log(j, LOG_DEBUG, "%s event for stream/key: %s/%s", event ? "Setting" : "Removing", stream, key);
10637
10638
struct externalevent *eei = NULL;
10639
LIST_FOREACH(eei, &j->events, job_le) {
10640
/* If the event for the given key already exists for the job, we need to
10641
* remove the old one first.
10642
*/
10643
if (strcmp(eei->name, key) == 0 && strcmp(eei->sys->name, stream) == 0) {
10644
job_log(j, LOG_DEBUG, "Event exists. Removing.");
10645
externalevent_delete(eei);
10646
break;
10647
}
10648
}
10649
10650
int result = EXNOMEM;
10651
if (event) {
10652
struct eventsystem *es = eventsystem_find(stream);
10653
if (!es) {
10654
job_log(j, LOG_DEBUG, "Creating stream.");
10655
es = eventsystem_new(stream);
10656
}
10657
10658
if (es) {
10659
job_log(j, LOG_DEBUG, "Adding event.");
10660
if (externalevent_new(j, es, key, event, flags)) {
10661
job_log(j, LOG_DEBUG, "Added new event for key: %s", key);
10662
result = 0;
10663
} else {
10664
job_log(j, LOG_ERR, "Could not create event for key: %s", key);
10665
}
10666
} else {
10667
job_log(j, LOG_ERR, "Event stream could not be created: %s", stream);
10668
}
10669
} else {
10670
/* If the event was NULL, then we just remove it and return. */
10671
result = 0;
10672
}
10673
10674
if (result == 0) {
10675
xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10676
*reply = reply2;
10677
}
10678
10679
return result;
10680
}
10681
10682
int
10683
xpc_event_copy_event(job_t j, xpc_object_t request, xpc_object_t *reply)
10684
{
10685
const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10686
const char *key = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_NAME);
10687
10688
bool all_streams = (stream == NULL);
10689
bool all_events = (key == NULL || strcmp(key, "") == 0); // strcmp for libxpc compatibility
10690
xpc_object_t events = NULL;
10691
10692
if (all_streams && !all_events) {
10693
return EXINVAL;
10694
}
10695
10696
if (all_streams || all_events) {
10697
job_log(j, LOG_DEBUG, "Fetching all events%s%s", stream ? " for stream: " : "", stream ? stream : "");
10698
events = xpc_dictionary_create(NULL, NULL, 0);
10699
} else {
10700
job_log(j, LOG_DEBUG, "Fetching stream/key: %s/%s", stream, key);
10701
}
10702
10703
int result = ESRCH;
10704
struct externalevent *eei = NULL;
10705
LIST_FOREACH(eei, &j->events, job_le) {
10706
if (all_streams) {
10707
xpc_object_t sub = xpc_dictionary_get_value(events, eei->sys->name);
10708
if (sub == NULL) {
10709
sub = xpc_dictionary_create(NULL, NULL, 0);
10710
xpc_dictionary_set_value(events, eei->sys->name, sub);
10711
xpc_release(sub);
10712
}
10713
xpc_dictionary_set_value(sub, eei->name, eei->event);
10714
} else if (strcmp(eei->sys->name, stream) == 0) {
10715
if (all_events) {
10716
xpc_dictionary_set_value(events, eei->name, eei->event);
10717
} else if (strcmp(eei->name, key) == 0) {
10718
job_log(j, LOG_DEBUG, "Found event.");
10719
events = xpc_retain(eei->event);
10720
break;
10721
}
10722
}
10723
}
10724
10725
if (events) {
10726
xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10727
xpc_dictionary_set_value(reply2, XPC_EVENT_ROUTINE_KEY_EVENT, events);
10728
xpc_release(events);
10729
10730
*reply = reply2;
10731
result = 0;
10732
}
10733
10734
return result;
10735
}
10736
10737
int
10738
xpc_event_channel_check_in(job_t j, xpc_object_t request, xpc_object_t *reply)
10739
{
10740
const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10741
if (!stream) {
10742
return EXINVAL;
10743
}
10744
10745
job_log(j, LOG_DEBUG, "Checking in stream: %s", stream);
10746
10747
struct machservice *ms = NULL;
10748
int error = xpc_event_find_channel(j, stream, &ms);
10749
if (error) {
10750
job_log(j, LOG_ERR, "Failed to check in: 0x%x: %s", error, xpc_strerror(error));
10751
} else if (ms->isActive) {
10752
job_log(j, LOG_ERR, "Attempt to check in on event channel multiple times: %s", stream);
10753
error = EBUSY;
10754
} else {
10755
machservice_request_notifications(ms);
10756
10757
xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10758
xpc_dictionary_set_mach_recv(reply2, XPC_EVENT_ROUTINE_KEY_PORT, ms->port);
10759
*reply = reply2;
10760
error = 0;
10761
}
10762
10763
return error;
10764
}
10765
10766
int
10767
xpc_event_channel_look_up(job_t j, xpc_object_t request, xpc_object_t *reply)
10768
{
10769
if (!j->event_monitor) {
10770
return EPERM;
10771
}
10772
10773
const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10774
if (!stream) {
10775
return EXINVAL;
10776
}
10777
10778
uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10779
if (!token) {
10780
return EXINVAL;
10781
}
10782
10783
job_log(j, LOG_DEBUG, "Looking up channel for stream/token: %s/%llu", stream, token);
10784
10785
struct externalevent *ee = externalevent_find(stream, token);
10786
if (!ee) {
10787
return ESRCH;
10788
}
10789
10790
struct machservice *ms = NULL;
10791
int error = xpc_event_find_channel(ee->job, stream, &ms);
10792
if (!error) {
10793
job_log(j, LOG_DEBUG, "Found event channel port: 0x%x", ms->port);
10794
xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10795
xpc_dictionary_set_mach_send(reply2, XPC_EVENT_ROUTINE_KEY_PORT, ms->port);
10796
*reply = reply2;
10797
error = 0;
10798
} else {
10799
job_log(j, LOG_ERR, "Could not find event channel for stream/token: %s/%llu: 0x%x: %s", stream, token, error, xpc_strerror(error));
10800
}
10801
10802
return error;
10803
}
10804
10805
int
10806
xpc_event_provider_check_in(job_t j, xpc_object_t request, xpc_object_t *reply)
10807
{
10808
if (!j->event_monitor) {
10809
return EPERM;
10810
}
10811
10812
/* This indicates that the event monitor is now safe to signal. This state
10813
* is independent of whether this operation actually succeeds; we just need
10814
* it to ignore SIGUSR1.
10815
*/
10816
j->event_monitor_ready2signal = true;
10817
10818
const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10819
if (!stream) {
10820
return EXINVAL;
10821
}
10822
10823
job_log(j, LOG_DEBUG, "Provider checking in for stream: %s", stream);
10824
10825
xpc_object_t events = xpc_array_create(NULL, 0);
10826
struct eventsystem *es = eventsystem_find(stream);
10827
if (!es) {
10828
/* If we had to create the event stream, there were no events, so just
10829
* give back the empty array.
10830
*/
10831
job_log(j, LOG_DEBUG, "Creating event stream.");
10832
es = eventsystem_new(stream);
10833
if (!job_assumes(j, es)) {
10834
xpc_release(events);
10835
return EXNOMEM;
10836
}
10837
10838
if (strcmp(stream, "com.apple.launchd.helper") == 0) {
10839
_launchd_support_system = es;
10840
}
10841
} else {
10842
job_log(j, LOG_DEBUG, "Filling event array.");
10843
10844
struct externalevent *ei = NULL;
10845
LIST_FOREACH(ei, &es->events, sys_le) {
10846
xpc_array_set_uint64(events, XPC_ARRAY_APPEND, ei->id);
10847
xpc_array_append_value(events, ei->event);
10848
}
10849
}
10850
10851
xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10852
xpc_dictionary_set_value(reply2, XPC_EVENT_ROUTINE_KEY_EVENTS, events);
10853
xpc_release(events);
10854
*reply = reply2;
10855
10856
return 0;
10857
}
10858
10859
int
10860
xpc_event_provider_set_state(job_t j, xpc_object_t request, xpc_object_t *reply)
10861
{
10862
job_t other_j = NULL;
10863
10864
if (!j->event_monitor) {
10865
return EPERM;
10866
}
10867
10868
const char *stream = xpc_dictionary_get_string(request, XPC_EVENT_ROUTINE_KEY_STREAM);
10869
if (!stream) {
10870
return EXINVAL;
10871
}
10872
10873
uint64_t token = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_TOKEN);
10874
if (!token) {
10875
return EXINVAL;
10876
}
10877
10878
bool state = false;
10879
xpc_object_t xstate = xpc_dictionary_get_value(request, XPC_EVENT_ROUTINE_KEY_STATE);
10880
if (!xstate || xpc_get_type(xstate) != XPC_TYPE_BOOL) {
10881
return EXINVAL;
10882
} else {
10883
state = xpc_bool_get_value(xstate);
10884
}
10885
10886
job_log(j, LOG_DEBUG, "Setting event state to %s for stream/token: %s/%llu", state ? "true" : "false", stream, token);
10887
10888
struct externalevent *ei = externalevent_find(stream, token);
10889
if (!ei) {
10890
job_log(j, LOG_ERR, "Could not find stream/token: %s/%llu", stream, token);
10891
return ESRCH;
10892
}
10893
10894
other_j = ei->job;
10895
ei->state = state;
10896
10897
if (ei->internal) {
10898
job_log(ei->job, LOG_NOTICE, "Job should be able to exec(3) now.");
10899
ei->job->waiting4ok = false;
10900
externalevent_delete(ei);
10901
}
10902
10903
(void)job_dispatch(other_j, false);
10904
10905
xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10906
*reply = reply2;
10907
10908
return 0;
10909
}
10910
10911
bool
10912
xpc_event_demux(mach_port_t p, xpc_object_t request, xpc_object_t *reply)
10913
{
10914
uint64_t op = xpc_dictionary_get_uint64(request, XPC_EVENT_ROUTINE_KEY_OP);
10915
if (!op) {
10916
return false;
10917
}
10918
10919
audit_token_t token;
10920
xpc_dictionary_get_audit_token(request, &token);
10921
runtime_record_caller_creds(&token);
10922
10923
struct ldcred *ldc = runtime_get_caller_creds();
10924
job_t j = managed_job(ldc->pid);
10925
if (!j) {
10926
j = job_mig_intran(p);
10927
if (!j) {
10928
op = -1;
10929
}
10930
}
10931
10932
job_log(j, LOG_DEBUG, "Incoming XPC event request: %llu", op);
10933
10934
int error = -1;
10935
switch (op) {
10936
case XPC_EVENT_GET_NAME:
10937
error = xpc_event_get_event_name(j, request, reply);
10938
break;
10939
case XPC_EVENT_SET:
10940
error = xpc_event_set_event(j, request, reply);
10941
break;
10942
case XPC_EVENT_COPY:
10943
error = xpc_event_copy_event(j, request, reply);
10944
break;
10945
case XPC_EVENT_CHECK_IN:
10946
error = xpc_event_channel_check_in(j, request, reply);
10947
break;
10948
case XPC_EVENT_LOOK_UP:
10949
error = xpc_event_channel_look_up(j, request, reply);
10950
break;
10951
case XPC_EVENT_PROVIDER_CHECK_IN:
10952
error = xpc_event_provider_check_in(j, request, reply);
10953
break;
10954
case XPC_EVENT_PROVIDER_SET_STATE:
10955
error = xpc_event_provider_set_state(j, request, reply);
10956
break;
10957
case XPC_EVENT_COPY_ENTITLEMENTS:
10958
error = xpc_event_copy_entitlements(j, request, reply);
10959
break;
10960
case -1:
10961
error = EINVAL;
10962
break;
10963
default:
10964
job_log(j, LOG_ERR, "Bogus opcode.");
10965
error = EDOM;
10966
}
10967
10968
if (error) {
10969
xpc_object_t reply2 = xpc_dictionary_create_reply(request);
10970
xpc_dictionary_set_uint64(reply2, XPC_EVENT_ROUTINE_KEY_ERROR, error);
10971
*reply = reply2;
10972
}
10973
10974
return true;
10975
}
10976
10977
uint64_t
10978
xpc_get_jetsam_entitlement(const char *key)
10979
{
10980
uint64_t entitlement = 0;
10981
10982
audit_token_t *token = runtime_get_caller_token();
10983
xpc_object_t value = xpc_copy_entitlement_for_token(key, token);
10984
if (value) {
10985
if (xpc_get_type(value) == XPC_TYPE_UINT64) {
10986
entitlement = xpc_uint64_get_value(value);
10987
}
10988
10989
xpc_release(value);
10990
}
10991
10992
return entitlement;
10993
}
10994
10995
int
10996
xpc_process_set_jetsam_band(job_t j, xpc_object_t request, xpc_object_t *reply)
10997
{
10998
if (!j) {
10999
return EINVAL;
11000
}
11001
11002
const char *label = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_LABEL);
11003
if (!label) {
11004
return EXINVAL;
11005
}
11006
11007
xpc_jetsam_band_t entitled_band = -1;
11008
xpc_jetsam_band_t requested_band = (xpc_jetsam_band_t)xpc_dictionary_get_uint64(request, XPC_PROCESS_ROUTINE_KEY_PRIORITY_BAND);
11009
if (!requested_band) {
11010
return EXINVAL;
11011
}
11012
11013
if (!(requested_band >= XPC_JETSAM_BAND_SUSPENDED && requested_band < XPC_JETSAM_BAND_LAST)) {
11014
return EXINVAL;
11015
}
11016
11017
uint64_t rcdata = xpc_dictionary_get_uint64(request, XPC_PROCESS_ROUTINE_KEY_RCDATA);
11018
11019
job_t tj = job_find(root_jobmgr, label);
11020
if (!tj) {
11021
return EXSRCH;
11022
}
11023
11024
boolean_t allow = false;
11025
if (j->embedded_god) {
11026
allow = true;
11027
} else {
11028
entitled_band = xpc_get_jetsam_entitlement("com.apple.private.jetsam.modify-priority");
11029
if (entitled_band >= requested_band) {
11030
allow = true;
11031
}
11032
}
11033
11034
if (!allow) {
11035
if (launchd_no_jetsam_perm_check) {
11036
job_log(j, LOG_NOTICE, "Jetsam priority checks disabled; allowing job to set priority: %d", requested_band);
11037
} else {
11038
job_log(j, LOG_ERR, "Job cannot decrease Jetsam priority band (requested/maximum): %d/%d", requested_band, entitled_band);
11039
return EPERM;
11040
}
11041
}
11042
11043
job_log(j, LOG_INFO, "Setting Jetsam band: %d.", requested_band);
11044
job_update_jetsam_properties(tj, requested_band, rcdata);
11045
11046
xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11047
*reply = reply2;
11048
11049
return 0;
11050
}
11051
11052
int
11053
xpc_process_set_jetsam_memory_limit(job_t j, xpc_object_t request, xpc_object_t *reply)
11054
{
11055
if (!j) {
11056
return EINVAL;
11057
}
11058
11059
const char *label = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_LABEL);
11060
if (!label) {
11061
return EXINVAL;
11062
}
11063
11064
int32_t entitlement_limit = 0;
11065
int32_t requested_limit = (int32_t)xpc_dictionary_get_uint64(request, XPC_PROCESS_ROUTINE_KEY_MEMORY_LIMIT);
11066
11067
job_t tj = job_find(root_jobmgr, label);
11068
if (!tj) {
11069
return EXSRCH;
11070
}
11071
11072
boolean_t allow = false;
11073
if (j->embedded_god) {
11074
allow = true;
11075
} else {
11076
entitlement_limit = (int32_t)xpc_get_jetsam_entitlement("com.apple.private.jetsam.memory_limit");
11077
if (entitlement_limit >= requested_limit) {
11078
allow = true;
11079
}
11080
}
11081
11082
if (!allow) {
11083
if (launchd_no_jetsam_perm_check) {
11084
job_log(j, LOG_NOTICE, "Jetsam priority checks disabled; allowing job to set memory limit: %d", requested_limit);
11085
} else {
11086
job_log(j, LOG_ERR, "Job cannot set Jetsam memory limit (requested/maximum): %d/%d", requested_limit, entitlement_limit);
11087
return EPERM;
11088
}
11089
}
11090
11091
job_log(j, LOG_INFO, "Setting Jetsam memory limit: %d.", requested_limit);
11092
job_update_jetsam_memory_limit(tj, requested_limit);
11093
11094
xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11095
*reply = reply2;
11096
11097
return 0;
11098
}
11099
11100
static jobmgr_t
11101
_xpc_process_find_target_manager(job_t j, xpc_service_type_t type, pid_t pid)
11102
{
11103
jobmgr_t target = NULL;
11104
if (type == XPC_SERVICE_TYPE_BUNDLED) {
11105
job_log(j, LOG_DEBUG, "Bundled service. Searching for XPC domains for PID: %d", pid);
11106
11107
jobmgr_t jmi = NULL;
11108
SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
11109
if (jmi->req_pid && jmi->req_pid == pid) {
11110
jobmgr_log(jmi, LOG_DEBUG, "Found job manager for PID.");
11111
target = jmi;
11112
break;
11113
}
11114
}
11115
} else if (type == XPC_SERVICE_TYPE_LAUNCHD || type == XPC_SERVICE_TYPE_APP) {
11116
target = j->mgr;
11117
}
11118
11119
return target;
11120
}
11121
11122
static int
11123
xpc_process_attach(job_t j, xpc_object_t request, xpc_object_t *reply)
11124
{
11125
if (!j) {
11126
return EINVAL;
11127
}
11128
11129
audit_token_t *token = runtime_get_caller_token();
11130
xpc_object_t entitlement = xpc_copy_entitlement_for_token(XPC_SERVICE_ENTITLEMENT_ATTACH, token);
11131
if (!entitlement) {
11132
job_log(j, LOG_ERR, "Job does not have entitlement: %s", XPC_SERVICE_ENTITLEMENT_ATTACH);
11133
return EPERM;
11134
}
11135
11136
if (entitlement != XPC_BOOL_TRUE) {
11137
char *desc = xpc_copy_description(entitlement);
11138
job_log(j, LOG_ERR, "Job has bad value for entitlement: %s:\n%s", XPC_SERVICE_ENTITLEMENT_ATTACH, desc);
11139
free(desc);
11140
11141
xpc_release(entitlement);
11142
return EPERM;
11143
}
11144
11145
const char *name = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_NAME);
11146
if (!name) {
11147
return EXINVAL;
11148
}
11149
11150
xpc_service_type_t type = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_TYPE);
11151
if (!type) {
11152
return EXINVAL;
11153
}
11154
11155
mach_port_t port = xpc_dictionary_copy_mach_send(request, XPC_PROCESS_ROUTINE_KEY_NEW_INSTANCE_PORT);
11156
if (!MACH_PORT_VALID(port)) {
11157
return EXINVAL;
11158
}
11159
11160
pid_t pid = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_HANDLE);
11161
11162
job_log(j, LOG_DEBUG, "Attaching to service: %s", name);
11163
11164
xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11165
jobmgr_t target = _xpc_process_find_target_manager(j, type, pid);
11166
if (target) {
11167
jobmgr_log(target, LOG_DEBUG, "Found target job manager for service: %s", name);
11168
(void)jobmgr_assumes(target, waiting4attach_new(target, name, port, 0, type));
11169
11170
/* HACK: This is awful. For legacy reasons, launchd job labels are all
11171
* stored in a global namespace, which is stored in the root job
11172
* manager. But XPC domains have a per-domain namespace. So if we're
11173
* looking for a legacy launchd job, we have to redirect any attachment
11174
* attempts to the root job manager to find existing instances.
11175
*
11176
* But because we store attachments on a per-job manager basis, we have
11177
* to create the new attachment in the actual target job manager, hence
11178
* why we change the target only after we've created the attachment.
11179
*/
11180
if (strcmp(target->name, VPROCMGR_SESSION_AQUA) == 0) {
11181
target = root_jobmgr;
11182
}
11183
11184
job_t existing = job_find(target, name);
11185
if (existing && existing->p) {
11186
job_log(existing, LOG_DEBUG, "Found existing instance of service.");
11187
xpc_dictionary_set_int64(reply2, XPC_PROCESS_ROUTINE_KEY_PID, existing->p);
11188
} else {
11189
xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, ESRCH);
11190
}
11191
} else if (type == XPC_SERVICE_TYPE_BUNDLED) {
11192
(void)job_assumes(j, waiting4attach_new(target, name, port, pid, type));
11193
xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, ESRCH);
11194
} else {
11195
xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, EXSRCH);
11196
}
11197
11198
*reply = reply2;
11199
return 0;
11200
}
11201
11202
static int
11203
xpc_process_detach(job_t j, xpc_object_t request, xpc_object_t *reply __unused)
11204
{
11205
if (!j) {
11206
return EINVAL;
11207
}
11208
11209
const char *name = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_NAME);
11210
if (!name) {
11211
return EXINVAL;
11212
}
11213
11214
xpc_service_type_t type = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_TYPE);
11215
if (!type) {
11216
return EXINVAL;
11217
}
11218
11219
job_log(j, LOG_DEBUG, "Deatching from service: %s", name);
11220
11221
pid_t pid = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_PID);
11222
jobmgr_t target = _xpc_process_find_target_manager(j, type, pid);
11223
if (target) {
11224
jobmgr_log(target, LOG_DEBUG, "Found target job manager for service: %s", name);
11225
11226
struct waiting4attach *w4ai = NULL;
11227
struct waiting4attach *w4ait = NULL;
11228
LIST_FOREACH_SAFE(w4ai, &target->attaches, le, w4ait) {
11229
if (strcmp(name, w4ai->name) == 0) {
11230
jobmgr_log(target, LOG_DEBUG, "Found attachment. Deleting.");
11231
waiting4attach_delete(target, w4ai);
11232
break;
11233
}
11234
}
11235
}
11236
11237
return 0;
11238
}
11239
11240
static int
11241
xpc_process_get_properties(job_t j, xpc_object_t request, xpc_object_t *reply)
11242
{
11243
if (j->anonymous) {
11244
/* Total hack. libxpc will send requests to the pipe created out of the
11245
* process' bootstrap port, so when job_mig_intran() tries to resolve
11246
* the process into a job, it'll wind up creating an anonymous job if
11247
* the requestor was an XPC service, whose job manager is an XPC domain.
11248
*/
11249
pid_t pid = j->p;
11250
jobmgr_t jmi = NULL;
11251
SLIST_FOREACH(jmi, &root_jobmgr->submgrs, sle) {
11252
if ((j = jobmgr_find_by_pid(jmi, pid, false))) {
11253
break;
11254
}
11255
}
11256
}
11257
11258
if (!j || j->anonymous) {
11259
return EXINVAL;
11260
}
11261
11262
struct waiting4attach *w4a = waiting4attach_find(j->mgr, j);
11263
if (!w4a) {
11264
return EXINVAL;
11265
}
11266
11267
xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11268
xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_TYPE, w4a->type);
11269
xpc_dictionary_set_mach_send(reply2, XPC_PROCESS_ROUTINE_KEY_NEW_INSTANCE_PORT, w4a->port);
11270
if (j->prog) {
11271
xpc_dictionary_set_string(reply2, XPC_PROCESS_ROUTINE_KEY_PATH, j->prog);
11272
} else {
11273
xpc_dictionary_set_string(reply2, XPC_PROCESS_ROUTINE_KEY_PATH, j->argv[0]);
11274
}
11275
11276
if (j->argv) {
11277
xpc_object_t xargv = xpc_array_create(NULL, 0);
11278
11279
size_t i = 0;
11280
for (i = 0; i < j->argc; i++) {
11281
if (j->argv[i]) {
11282
xpc_array_set_string(xargv, XPC_ARRAY_APPEND, j->argv[i]);
11283
}
11284
}
11285
11286
xpc_dictionary_set_value(reply2, XPC_PROCESS_ROUTINE_KEY_ARGV, xargv);
11287
xpc_release(xargv);
11288
}
11289
11290
*reply = reply2;
11291
return 0;
11292
}
11293
11294
static int
11295
xpc_process_service_kill(job_t j, xpc_object_t request, xpc_object_t *reply)
11296
{
11297
#if XPC_LPI_VERSION >= 20130426
11298
if (!j) {
11299
return ESRCH;
11300
}
11301
11302
jobmgr_t jm = _xpc_process_find_target_manager(j, XPC_SERVICE_TYPE_BUNDLED, j->p);
11303
if (!jm) {
11304
return ENOENT;
11305
}
11306
11307
const char *name = xpc_dictionary_get_string(request, XPC_PROCESS_ROUTINE_KEY_NAME);
11308
if (!name) {
11309
return EINVAL;
11310
}
11311
11312
int64_t whichsig = xpc_dictionary_get_int64(request, XPC_PROCESS_ROUTINE_KEY_SIGNAL);
11313
if (!whichsig) {
11314
return EINVAL;
11315
}
11316
11317
job_t j2kill = job_find(jm, name);
11318
if (!j2kill) {
11319
return ESRCH;
11320
}
11321
11322
if (j2kill->alias) {
11323
// Only allow for private instances to be killed.
11324
return EPERM;
11325
}
11326
11327
struct proc_bsdshortinfo proc;
11328
if (proc_pidinfo(j2kill->p, PROC_PIDT_SHORTBSDINFO, 1, &proc, PROC_PIDT_SHORTBSDINFO_SIZE) == 0) {
11329
if (errno != ESRCH) {
11330
(void)jobmgr_assumes_zero(root_jobmgr, errno);
11331
}
11332
11333
return errno;
11334
}
11335
11336
struct ldcred *ldc = runtime_get_caller_creds();
11337
if (proc.pbsi_uid != ldc->euid) {
11338
// Do not allow non-root to kill RoleAccount services running as a
11339
// different user.
11340
return EPERM;
11341
}
11342
11343
if (!j2kill->p) {
11344
return EALREADY;
11345
}
11346
11347
xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11348
if (!reply2) {
11349
return EINVAL;
11350
}
11351
11352
int error = 0;
11353
int ret = kill(j2kill->p, whichsig);
11354
if (ret) {
11355
error = errno;
11356
}
11357
11358
xpc_dictionary_set_int64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, error);
11359
*reply = reply2;
11360
return 0;
11361
#else
11362
return ENOTSUP;
11363
#endif
11364
}
11365
11366
bool
11367
xpc_process_demux(mach_port_t p, xpc_object_t request, xpc_object_t *reply)
11368
{
11369
uint64_t op = xpc_dictionary_get_uint64(request, XPC_PROCESS_ROUTINE_KEY_OP);
11370
if (!op) {
11371
return false;
11372
}
11373
11374
audit_token_t token;
11375
xpc_dictionary_get_audit_token(request, &token);
11376
runtime_record_caller_creds(&token);
11377
11378
job_t j = job_mig_intran(p);
11379
job_log(j, LOG_DEBUG, "Incoming XPC process request: %llu", op);
11380
11381
int error = -1;
11382
switch (op) {
11383
case XPC_PROCESS_JETSAM_SET_BAND:
11384
error = xpc_process_set_jetsam_band(j, request, reply);
11385
break;
11386
case XPC_PROCESS_JETSAM_SET_MEMORY_LIMIT:
11387
error = xpc_process_set_jetsam_memory_limit(j, request, reply);
11388
break;
11389
case XPC_PROCESS_SERVICE_ATTACH:
11390
error = xpc_process_attach(j, request, reply);
11391
break;
11392
case XPC_PROCESS_SERVICE_DETACH:
11393
error = xpc_process_detach(j, request, reply);
11394
break;
11395
case XPC_PROCESS_SERVICE_GET_PROPERTIES:
11396
error = xpc_process_get_properties(j, request, reply);
11397
break;
11398
case XPC_PROCESS_SERVICE_KILL:
11399
error = xpc_process_service_kill(j, request, reply);
11400
break;
11401
default:
11402
job_log(j, LOG_ERR, "Bogus process opcode.");
11403
error = EDOM;
11404
}
11405
11406
if (error) {
11407
xpc_object_t reply2 = xpc_dictionary_create_reply(request);
11408
if (reply2) {
11409
xpc_dictionary_set_uint64(reply2, XPC_PROCESS_ROUTINE_KEY_ERROR, error);
11410
}
11411
11412
*reply = reply2;
11413
}
11414
11415
return true;
11416
}
11417
11418
kern_return_t
11419
job_mig_kickstart(job_t j, name_t targetlabel, pid_t *out_pid, unsigned int flags)
11420
{
11421
struct ldcred *ldc = runtime_get_caller_creds();
11422
job_t otherj;
11423
11424
if (!j) {
11425
return BOOTSTRAP_NO_MEMORY;
11426
}
11427
11428
if (unlikely(!(otherj = job_find(NULL, targetlabel)))) {
11429
return BOOTSTRAP_UNKNOWN_SERVICE;
11430
}
11431
11432
#if TARGET_OS_EMBEDDED
11433
bool allow_non_root_kickstart = j->username && otherj->username && (strcmp(j->username, otherj->username) == 0);
11434
#else
11435
bool allow_non_root_kickstart = false;
11436
#endif
11437
11438
if (ldc->euid != 0 && ldc->euid != geteuid() && !allow_non_root_kickstart) {
11439
return BOOTSTRAP_NOT_PRIVILEGED;
11440
}
11441
11442
#if HAVE_SANDBOX
11443
if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
11444
return BOOTSTRAP_NOT_PRIVILEGED;
11445
}
11446
#endif
11447
11448
if (otherj->p && (flags & VPROCFLAG_STALL_JOB_EXEC)) {
11449
return BOOTSTRAP_SERVICE_ACTIVE;
11450
}
11451
11452
otherj->stall_before_exec = (flags & VPROCFLAG_STALL_JOB_EXEC);
11453
otherj = job_dispatch(otherj, true);
11454
11455
if (!job_assumes(j, otherj && otherj->p)) {
11456
// <rdar://problem/6787083> Clear this flag if we failed to start the job.
11457
otherj->stall_before_exec = false;
11458
return BOOTSTRAP_NO_MEMORY;
11459
}
11460
11461
*out_pid = otherj->p;
11462
11463
return 0;
11464
}
11465
11466
kern_return_t
11467
job_mig_spawn_internal(job_t j, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, job_t *outj)
11468
{
11469
launch_data_t jobdata = NULL;
11470
size_t data_offset = 0;
11471
struct ldcred *ldc = runtime_get_caller_creds();
11472
job_t jr;
11473
11474
if (!j) {
11475
return BOOTSTRAP_NO_MEMORY;
11476
}
11477
11478
if (unlikely(j->deny_job_creation)) {
11479
return BOOTSTRAP_NOT_PRIVILEGED;
11480
}
11481
11482
#if HAVE_SANDBOX
11483
if (unlikely(sandbox_check(ldc->pid, "job-creation", SANDBOX_FILTER_NONE) > 0)) {
11484
return BOOTSTRAP_NOT_PRIVILEGED;
11485
}
11486
#endif
11487
11488
if (unlikely(pid1_magic && ldc->euid && ldc->uid)) {
11489
job_log(j, LOG_DEBUG, "Punting spawn to per-user-context");
11490
return VPROC_ERR_TRY_PER_USER;
11491
}
11492
11493
if (!job_assumes(j, indataCnt != 0)) {
11494
return 1;
11495
}
11496
11497
runtime_ktrace0(RTKT_LAUNCHD_DATA_UNPACK);
11498
if (!job_assumes(j, (jobdata = launch_data_unpack((void *)indata, indataCnt, NULL, 0, &data_offset, NULL)) != NULL)) {
11499
return 1;
11500
}
11501
11502
jobmgr_t target_jm = jobmgr_find_by_name(j->mgr, NULL);
11503
if (!jobmgr_assumes(j->mgr, target_jm != NULL)) {
11504
jobmgr_log(j->mgr, LOG_ERR, "This API can only be used by a process running within an Aqua session.");
11505
return 1;
11506
}
11507
11508
jr = jobmgr_import2(target_jm ?: j->mgr, jobdata);
11509
11510
launch_data_t label = NULL;
11511
launch_data_t wait4debugger = NULL;
11512
if (!jr) {
11513
switch (errno) {
11514
case EEXIST:
11515
/* If EEXIST was returned, we know that there is a label string in
11516
* the dictionary. So we don't need to check the types here; that
11517
* has already been done.
11518
*/
11519
label = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_LABEL);
11520
jr = job_find(NULL, launch_data_get_string(label));
11521
if (job_assumes(j, jr != NULL) && !jr->p) {
11522
wait4debugger = launch_data_dict_lookup(jobdata, LAUNCH_JOBKEY_WAITFORDEBUGGER);
11523
if (wait4debugger && launch_data_get_type(wait4debugger) == LAUNCH_DATA_BOOL) {
11524
if (launch_data_get_bool(wait4debugger)) {
11525
/* If the job exists, we're going to kick-start it, but
11526
* we need to give the caller the opportunity to start
11527
* it suspended if it so desires. But this will only
11528
* take effect if the job isn't running.
11529
*/
11530
jr->wait4debugger_oneshot = true;
11531
}
11532
}
11533
}
11534
11535
*outj = jr;
11536
return BOOTSTRAP_NAME_IN_USE;
11537
default:
11538
return BOOTSTRAP_NO_MEMORY;
11539
}
11540
}
11541
11542
if (pid1_magic) {
11543
jr->mach_uid = ldc->uid;
11544
}
11545
11546
// TODO: Consolidate the app and legacy_LS_job bits.
11547
jr->legacy_LS_job = true;
11548
jr->abandon_pg = true;
11549
jr->asport = asport;
11550
jr->app = true;
11551
uuid_clear(jr->expected_audit_uuid);
11552
jr = job_dispatch(jr, true);
11553
11554
if (!job_assumes(j, jr != NULL)) {
11555
job_remove(jr);
11556
return BOOTSTRAP_NO_MEMORY;
11557
}
11558
11559
if (!job_assumes(jr, jr->p)) {
11560
job_remove(jr);
11561
return BOOTSTRAP_NO_MEMORY;
11562
}
11563
11564
job_log(jr, LOG_DEBUG, "Spawned by PID %u: %s", j->p, j->label);
11565
*outj = jr;
11566
11567
return BOOTSTRAP_SUCCESS;
11568
}
11569
11570
kern_return_t
11571
job_mig_spawn2(job_t j, mach_port_t rp, vm_offset_t indata, mach_msg_type_number_t indataCnt, mach_port_t asport, pid_t *child_pid, mach_port_t *obsvr_port)
11572
{
11573
job_t nj = NULL;
11574
kern_return_t kr = job_mig_spawn_internal(j, indata, indataCnt, asport, &nj);
11575
if (likely(kr == KERN_SUCCESS)) {
11576
if (job_setup_exit_port(nj) != KERN_SUCCESS) {
11577
job_remove(nj);
11578
kr = BOOTSTRAP_NO_MEMORY;
11579
} else {
11580
/* Do not return until the job has called exec(3), thereby making it
11581
* safe for the caller to send it SIGCONT.
11582
*
11583
* <rdar://problem/9042798>
11584
*/
11585
nj->spawn_reply_port = rp;
11586
kr = MIG_NO_REPLY;
11587
}
11588
} else if (kr == BOOTSTRAP_NAME_IN_USE) {
11589
bool was_running = nj->p;
11590
if (job_dispatch(nj, true)) {
11591
if (!was_running) {
11592
job_log(nj, LOG_DEBUG, "Job exists but is not running. Kick-starting.");
11593
11594
if (job_setup_exit_port(nj) == KERN_SUCCESS) {
11595
nj->spawn_reply_port = rp;
11596
kr = MIG_NO_REPLY;
11597
} else {
11598
kr = BOOTSTRAP_NO_MEMORY;
11599
}
11600
} else {
11601
*obsvr_port = MACH_PORT_NULL;
11602
*child_pid = nj->p;
11603
kr = KERN_SUCCESS;
11604
}
11605
} else {
11606
job_log(nj, LOG_ERR, "Failed to dispatch job, requestor: %s", j->label);
11607
kr = BOOTSTRAP_UNKNOWN_SERVICE;
11608
}
11609
}
11610
11611
mig_deallocate(indata, indataCnt);
11612
return kr;
11613
}
11614
11615
launch_data_t
11616
job_do_legacy_ipc_request(job_t j, launch_data_t request, mach_port_t asport __attribute__((unused)))
11617
{
11618
launch_data_t reply = NULL;
11619
11620
errno = ENOTSUP;
11621
if (launch_data_get_type(request) == LAUNCH_DATA_STRING) {
11622
if (strcmp(launch_data_get_string(request), LAUNCH_KEY_CHECKIN) == 0) {
11623
reply = job_export(j);
11624
job_checkin(j);
11625
}
11626
}
11627
11628
return reply;
11629
}
11630
11631
#define LAUNCHD_MAX_LEGACY_FDS 128
11632
#define countof(x) (sizeof((x)) / sizeof((x[0])))
11633
11634
kern_return_t
11635
job_mig_legacy_ipc_request(job_t j, vm_offset_t request,
11636
mach_msg_type_number_t requestCnt, mach_port_array_t request_fds,
11637
mach_msg_type_number_t request_fdsCnt, vm_offset_t *reply,
11638
mach_msg_type_number_t *replyCnt, mach_port_array_t *reply_fdps,
11639
mach_msg_type_number_t *reply_fdsCnt, mach_port_t asport)
11640
{
11641
if (!j) {
11642
return BOOTSTRAP_NO_MEMORY;
11643
}
11644
11645
/* TODO: Once we support actions other than checking in, we must check the
11646
* sandbox capabilities and EUID of the requestort.
11647
*/
11648
size_t nout_fdps = 0;
11649
size_t nfds = request_fdsCnt / sizeof(request_fds[0]);
11650
if (nfds > LAUNCHD_MAX_LEGACY_FDS) {
11651
job_log(j, LOG_ERR, "Too many incoming descriptors: %lu", nfds);
11652
return BOOTSTRAP_NO_MEMORY;
11653
}
11654
11655
int in_fds[LAUNCHD_MAX_LEGACY_FDS];
11656
size_t i = 0;
11657
for (i = 0; i < nfds; i++) {
11658
in_fds[i] = fileport_makefd(request_fds[i]);
11659
if (in_fds[i] == -1) {
11660
job_log(j, LOG_ERR, "Bad descriptor passed in legacy IPC request at index: %lu", i);
11661
}
11662
}
11663
11664
// DON'T goto outbad before this point.
11665
*reply = 0;
11666
*reply_fdps = NULL;
11667
launch_data_t ldreply = NULL;
11668
11669
size_t dataoff = 0;
11670
size_t fdoff = 0;
11671
launch_data_t ldrequest = launch_data_unpack((void *)request, requestCnt, in_fds, nfds, &dataoff, &fdoff);
11672
if (!ldrequest) {
11673
job_log(j, LOG_ERR, "Invalid legacy IPC request passed.");
11674
goto out_bad;
11675
}
11676
11677
ldreply = job_do_legacy_ipc_request(j, ldrequest, asport);
11678
if (!ldreply) {
11679
ldreply = launch_data_new_errno(errno);
11680
if (!ldreply) {
11681
goto out_bad;
11682
}
11683
}
11684
11685
*replyCnt = 10 * 1024 * 1024;
11686
mig_allocate(reply, *replyCnt);
11687
if (!*reply) {
11688
goto out_bad;
11689
}
11690
11691
int out_fds[LAUNCHD_MAX_LEGACY_FDS];
11692
size_t nout_fds = 0;
11693
size_t sz = launch_data_pack(ldreply, (void *)*reply, *replyCnt, out_fds, &nout_fds);
11694
if (!sz) {
11695
job_log(j, LOG_ERR, "Could not pack legacy IPC reply.");
11696
goto out_bad;
11697
}
11698
11699
if (nout_fds) {
11700
if (nout_fds > 128) {
11701
job_log(j, LOG_ERR, "Too many outgoing descriptors: %lu", nout_fds);
11702
goto out_bad;
11703
}
11704
11705
*reply_fdsCnt = nout_fds * sizeof((*reply_fdps)[0]);
11706
mig_allocate((vm_address_t *)reply_fdps, *reply_fdsCnt);
11707
if (!*reply_fdps) {
11708
goto out_bad;
11709
}
11710
11711
for (i = 0; i < nout_fds; i++) {
11712
mach_port_t fp = MACH_PORT_NULL;
11713
/* Whatever. Worst case is that we insert MACH_PORT_NULL. Not a big
11714
* deal. Note, these get stuffed into an array whose disposition is
11715
* mach_port_move_send_t, so we don't have to worry about them after
11716
* returning.
11717
*/
11718
if (fileport_makeport(out_fds[i], &fp) != 0) {
11719
job_log(j, LOG_ERR, "Could not pack response descriptor at index: %lu: %d: %s", i, errno, strerror(errno));
11720
}
11721
(*reply_fdps)[i] = fp;
11722
}
11723
11724
nout_fdps = nout_fds;
11725
} else {
11726
*reply_fdsCnt = 0;
11727
}
11728
11729
mig_deallocate(request, requestCnt);
11730
launch_data_free(ldreply);
11731
ldreply = NULL;
11732
11733
// Unused for now.
11734
(void)launchd_mport_deallocate(asport);
11735
11736
return BOOTSTRAP_SUCCESS;
11737
11738
out_bad:
11739
for (i = 0; i < nfds; i++) {
11740
(void)close(in_fds[i]);
11741
}
11742
11743
for (i = 0; i < nout_fds; i++) {
11744
(void)launchd_mport_deallocate((*reply_fdps)[i]);
11745
}
11746
11747
if (*reply) {
11748
mig_deallocate(*reply, *replyCnt);
11749
}
11750
11751
/* We should never hit this since the last goto out is in the case that
11752
* allocating this fails.
11753
*/
11754
if (*reply_fdps) {
11755
mig_deallocate((vm_address_t)*reply_fdps, *reply_fdsCnt);
11756
}
11757
11758
if (ldreply) {
11759
launch_data_free(ldreply);
11760
}
11761
11762
return BOOTSTRAP_NO_MEMORY;
11763
}
11764
11765
void
11766
jobmgr_init(bool sflag)
11767
{
11768
const char *root_session_type = pid1_magic ? VPROCMGR_SESSION_SYSTEM : VPROCMGR_SESSION_BACKGROUND;
11769
SLIST_INIT(&s_curious_jobs);
11770
LIST_INIT(&s_needing_sessions);
11771
11772
os_assert((root_jobmgr = jobmgr_new(NULL, MACH_PORT_NULL, MACH_PORT_NULL, sflag, root_session_type, false, MACH_PORT_NULL)) != NULL);
11773
os_assert((_s_xpc_system_domain = jobmgr_new_xpc_singleton_domain(root_jobmgr, "com.apple.xpc.system")) != NULL);
11774
_s_xpc_system_domain->req_asid = launchd_audit_session;
11775
_s_xpc_system_domain->req_asport = launchd_audit_port;
11776
_s_xpc_system_domain->shortdesc = "system";
11777
if (pid1_magic) {
11778
root_jobmgr->monitor_shutdown = true;
11779
}
11780
11781
uint32_t fflags = NOTE_ATTRIB | NOTE_LINK | NOTE_REVOKE | NOTE_EXTEND | NOTE_WRITE;
11782
s_no_hang_fd = open("/dev/autofs_nowait", O_EVTONLY | O_NONBLOCK);
11783
if (likely(s_no_hang_fd == -1)) {
11784
if (jobmgr_assumes_zero_p(root_jobmgr, (s_no_hang_fd = open("/dev", O_EVTONLY | O_NONBLOCK))) != -1) {
11785
(void)jobmgr_assumes_zero_p(root_jobmgr, kevent_mod((uintptr_t)s_no_hang_fd, EVFILT_VNODE, EV_ADD, fflags, 0, root_jobmgr));
11786
}
11787
}
11788
s_no_hang_fd = _fd(s_no_hang_fd);
11789
}
11790
11791
size_t
11792
our_strhash(const char *s)
11793
{
11794
size_t c, r = 5381;
11795
11796
/* djb2
11797
* This algorithm was first reported by Dan Bernstein many years ago in comp.lang.c
11798
*/
11799
11800
while ((c = *s++)) {
11801
r = ((r << 5) + r) + c; // hash*33 + c
11802
}
11803
11804
return r;
11805
}
11806
11807
size_t
11808
hash_label(const char *label)
11809
{
11810
return our_strhash(label) % LABEL_HASH_SIZE;
11811
}
11812
11813
size_t
11814
hash_ms(const char *msstr)
11815
{
11816
return our_strhash(msstr) % MACHSERVICE_HASH_SIZE;
11817
}
11818
11819
bool
11820
waiting4removal_new(job_t j, mach_port_t rp)
11821
{
11822
struct waiting_for_removal *w4r;
11823
11824
if (!job_assumes(j, (w4r = malloc(sizeof(struct waiting_for_removal))) != NULL)) {
11825
return false;
11826
}
11827
11828
w4r->reply_port = rp;
11829
11830
SLIST_INSERT_HEAD(&j->removal_watchers, w4r, sle);
11831
11832
return true;
11833
}
11834
11835
void
11836
waiting4removal_delete(job_t j, struct waiting_for_removal *w4r)
11837
{
11838
(void)job_assumes_zero(j, job_mig_send_signal_reply(w4r->reply_port, 0));
11839
11840
SLIST_REMOVE(&j->removal_watchers, w4r, waiting_for_removal, sle);
11841
11842
free(w4r);
11843
}
11844
11845
size_t
11846
get_kern_max_proc(void)
11847
{
11848
int mib[] = { CTL_KERN, KERN_MAXPROC };
11849
int max = 100;
11850
size_t max_sz = sizeof(max);
11851
11852
(void)posix_assumes_zero(sysctl(mib, 2, &max, &max_sz, NULL, 0));
11853
11854
return max;
11855
}
11856
11857
// See rdar://problem/6271234
11858
void
11859
eliminate_double_reboot(void)
11860
{
11861
if (unlikely(!pid1_magic)) {
11862
return;
11863
}
11864
11865
struct stat sb;
11866
const char *argv[] = { _PATH_BSHELL, "/etc/rc.deferred_install", NULL };
11867
int result = -1;
11868
11869
if (unlikely(stat(argv[1], &sb) != -1)) {
11870
jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Going to run deferred install script.");
11871
11872
pid_t p = 0;
11873
result = posix_spawnp(&p, argv[0], NULL, NULL, (char **)argv, environ);
11874
if (result == -1) {
11875
jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Couldn't run deferred install script: %d: %s", result, strerror(result));
11876
goto out;
11877
}
11878
11879
int wstatus = 0;
11880
result = waitpid(p, &wstatus, 0);
11881
if (result == -1) {
11882
jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Failed to reap deferred install script: %d: %s", errno, strerror(errno));
11883
goto out;
11884
}
11885
11886
if (WIFEXITED(wstatus)) {
11887
if ((result = WEXITSTATUS(wstatus)) == 0) {
11888
jobmgr_log(root_jobmgr, LOG_DEBUG | LOG_CONSOLE, "Deferred install script completed successfully.");
11889
} else {
11890
jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Deferred install script failed with status: %d", WEXITSTATUS(wstatus));
11891
}
11892
} else {
11893
jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Weirdness with install script: %d", wstatus);
11894
}
11895
}
11896
out:
11897
if (result == 0) {
11898
/* If the unlink(2) was to fail, it would be most likely fail with
11899
* EBUSY. All the other failure cases for unlink(2) don't apply when
11900
* we're running under PID 1 and have verified that the file exists.
11901
* Outside of someone deliberately messing with us (like if
11902
* /etc/rc.deferredinstall is actually a looping sym-link or a mount
11903
* point for a filesystem) and I/O errors, we should be good.
11904
*/
11905
if (unlink(argv[1]) == -1) {
11906
jobmgr_log(root_jobmgr, LOG_WARNING | LOG_CONSOLE, "Failed to remove deferred install script: %d: %s", errno, strerror(errno));
11907
}
11908
}
11909
}
11910
11911
void
11912
jetsam_property_setup(launch_data_t obj, const char *key, job_t j)
11913
{
11914
job_log(j, LOG_DEBUG, "Setting Jetsam properties for job...");
11915
if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMPRIORITY) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
11916
j->jetsam_priority = (typeof(j->jetsam_priority))launch_data_get_integer(obj);
11917
11918
#if XPC_LPI_VERSION >= 20120810
11919
if (j->jetsam_priority > XPC_JETSAM_PRIORITY_RESERVED && j->jetsam_priority < XPC_JETSAM_PRIORITY_RESERVED + XPC_JETSAM_BAND_LAST) {
11920
size_t band = j->jetsam_priority - XPC_JETSAM_PRIORITY_RESERVED;
11921
j->jetsam_priority = _launchd_priority_map[band - 1].priority;
11922
}
11923
#endif
11924
job_log(j, LOG_DEBUG, "Priority: %d", j->jetsam_priority);
11925
} else if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMMEMORYLIMIT) == 0 && launch_data_get_type(obj) == LAUNCH_DATA_INTEGER) {
11926
j->jetsam_memlimit = (typeof(j->jetsam_memlimit))launch_data_get_integer(obj);
11927
job_log(j, LOG_DEBUG, "Memory limit: %d", j->jetsam_memlimit);
11928
} else if (strcasecmp(key, LAUNCH_JOBKEY_JETSAMMEMORYLIMITBACKGROUND) == 0) {
11929
j->jetsam_memory_limit_background = true;
11930
job_log(j, LOG_DEBUG, "Memory limit is for background state only");
11931
} else if (strcasecmp(key, LAUNCH_KEY_JETSAMFRONTMOST) == 0) {
11932
/* Ignore. We only recognize this key so we don't complain when we get SpringBoard's request.
11933
* You can't set this in a plist.
11934
*/
11935
} else if (strcasecmp(key, LAUNCH_KEY_JETSAMACTIVE) == 0) {
11936
// Ignore.
11937
} else if (strcasecmp(key, LAUNCH_KEY_JETSAMLABEL) == 0) {
11938
/* Ignore. This key is present in SpringBoard's request dictionary, so we don't want to
11939
* complain about it.
11940
*/
11941
} else {
11942
job_log(j, LOG_ERR, "Unknown Jetsam key: %s", key);
11943
}
11944
11945
if (unlikely(!j->jetsam_properties)) {
11946
j->jetsam_properties = true;
11947
}
11948
}
11949
11950
void
11951
job_update_jetsam_properties(job_t j, xpc_jetsam_band_t band, uint64_t user_data)
11952
{
11953
#if TARGET_OS_EMBEDDED
11954
j->jetsam_priority = _launchd_priority_map[band - 1].priority;
11955
j->jetsam_properties = true;
11956
11957
memorystatus_priority_properties_t mjp;
11958
mjp.priority = j->jetsam_priority;
11959
mjp.user_data = user_data;
11960
11961
size_t size = sizeof(mjp);
11962
int r = memorystatus_control(MEMORYSTATUS_CMD_SET_PRIORITY_PROPERTIES, j->p, 0, &mjp, size);
11963
if (r == -1 && errno != ESRCH) {
11964
(void)job_assumes_zero(j, errno);
11965
}
11966
#else
11967
#pragma unused(j, band, user_data)
11968
#endif
11969
}
11970
11971
void
11972
job_update_jetsam_memory_limit(job_t j, int32_t limit)
11973
{
11974
#if TARGET_OS_EMBEDDED
11975
j->jetsam_memlimit = limit;
11976
j->jetsam_properties = true;
11977
11978
int r = memorystatus_control(MEMORYSTATUS_CMD_SET_JETSAM_HIGH_WATER_MARK, j->p, limit, NULL, 0);
11979
if (r == -1 && errno != ESRCH) {
11980
(void)job_assumes_zero(j, errno);
11981
}
11982
#else
11983
#pragma unused(j, limit)
11984
#endif
11985
}
11986
11987