Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/lib/libc/tests/sys/cpuset_test.c
105661 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2020-2021 Kyle Evans <[email protected]>
5
*
6
* Redistribution and use in source and binary forms, with or without
7
* modification, are permitted provided that the following conditions
8
* are met:
9
* 1. Redistributions of source code must retain the above copyright
10
* notice, this list of conditions and the following disclaimer.
11
* 2. Redistributions in binary form must reproduce the above copyright
12
* notice, this list of conditions and the following disclaimer in the
13
* documentation and/or other materials provided with the distribution.
14
*
15
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25
* SUCH DAMAGE.
26
*/
27
28
#include <sys/param.h>
29
#include <sys/cpuset.h>
30
#include <sys/jail.h>
31
#include <sys/procdesc.h>
32
#include <sys/select.h>
33
#include <sys/socket.h>
34
#include <sys/uio.h>
35
#include <sys/wait.h>
36
37
#include <assert.h>
38
#include <errno.h>
39
#include <stdio.h>
40
#include <stdlib.h>
41
#include <unistd.h>
42
43
#include <atf-c.h>
44
45
#define SP_PARENT 0
46
#define SP_CHILD 1
47
48
struct jail_test_info {
49
cpuset_t jail_tidmask;
50
cpusetid_t jail_cpuset;
51
cpusetid_t jail_child_cpuset;
52
};
53
54
struct jail_test_cb_params {
55
struct jail_test_info info;
56
cpuset_t mask;
57
cpusetid_t rootid;
58
cpusetid_t setid;
59
};
60
61
typedef void (*jail_test_cb)(struct jail_test_cb_params *);
62
63
#define FAILURE_JAIL 42
64
#define FAILURE_MASK 43
65
#define FAILURE_JAILSET 44
66
#define FAILURE_PIDSET 45
67
#define FAILURE_SEND 46
68
#define FAILURE_DEADLK 47
69
#define FAILURE_ATTACH 48
70
#define FAILURE_BADAFFIN 49
71
#define FAILURE_SUCCESS 50
72
73
static const char *
74
do_jail_errstr(int error)
75
{
76
77
switch (error) {
78
case FAILURE_JAIL:
79
return ("jail_set(2) failed");
80
case FAILURE_MASK:
81
return ("Failed to get the thread cpuset mask");
82
case FAILURE_JAILSET:
83
return ("Failed to get the jail setid");
84
case FAILURE_PIDSET:
85
return ("Failed to get the pid setid");
86
case FAILURE_SEND:
87
return ("Failed to send(2) cpuset information");
88
case FAILURE_DEADLK:
89
return ("Deadlock hit trying to attach to jail");
90
case FAILURE_ATTACH:
91
return ("jail_attach(2) failed");
92
case FAILURE_BADAFFIN:
93
return ("Unexpected post-attach affinity");
94
case FAILURE_SUCCESS:
95
return ("jail_attach(2) succeeded, but should have failed.");
96
default:
97
return (NULL);
98
}
99
}
100
101
static void
102
skip_ltncpu(int ncpu, cpuset_t *mask)
103
{
104
105
CPU_ZERO(mask);
106
ATF_REQUIRE_EQ(0, cpuset_getaffinity(CPU_LEVEL_CPUSET, CPU_WHICH_PID,
107
-1, sizeof(*mask), mask));
108
if (CPU_COUNT(mask) < ncpu)
109
atf_tc_skip("Test requires %d or more cores.", ncpu);
110
}
111
112
static void
113
skip_ltncpu_root(int ncpu, cpuset_t *mask)
114
{
115
116
CPU_ZERO(mask);
117
ATF_REQUIRE_EQ(0, cpuset_getaffinity(CPU_LEVEL_ROOT, CPU_WHICH_PID,
118
-1, sizeof(*mask), mask));
119
if (CPU_COUNT(mask) < ncpu) {
120
atf_tc_skip("Test requires cpuset root with %d or more cores.",
121
ncpu);
122
}
123
}
124
125
ATF_TC(newset);
126
ATF_TC_HEAD(newset, tc)
127
{
128
atf_tc_set_md_var(tc, "descr", "Test cpuset(2)");
129
}
130
ATF_TC_BODY(newset, tc)
131
{
132
cpusetid_t nsetid, setid, qsetid;
133
134
/* Obtain our initial set id. */
135
ATF_REQUIRE_EQ(0, cpuset_getid(CPU_LEVEL_CPUSET, CPU_WHICH_TID, -1,
136
&setid));
137
138
/* Create a new one. */
139
ATF_REQUIRE_EQ(0, cpuset(&nsetid));
140
ATF_CHECK(nsetid != setid);
141
142
/* Query id again, make sure it's equal to the one we just got. */
143
ATF_REQUIRE_EQ(0, cpuset_getid(CPU_LEVEL_CPUSET, CPU_WHICH_TID, -1,
144
&qsetid));
145
ATF_CHECK_EQ(nsetid, qsetid);
146
}
147
148
ATF_TC(transient);
149
ATF_TC_HEAD(transient, tc)
150
{
151
atf_tc_set_md_var(tc, "descr",
152
"Test that transient cpusets are freed.");
153
}
154
ATF_TC_BODY(transient, tc)
155
{
156
cpusetid_t isetid, scratch, setid;
157
158
ATF_REQUIRE_EQ(0, cpuset_getid(CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1,
159
&isetid));
160
161
ATF_REQUIRE_EQ(0, cpuset(&setid));
162
ATF_REQUIRE_EQ(0, cpuset_getid(CPU_LEVEL_CPUSET, CPU_WHICH_CPUSET,
163
setid, &scratch));
164
165
/*
166
* Return back to our initial cpuset; the kernel should free the cpuset
167
* we just created.
168
*/
169
ATF_REQUIRE_EQ(0, cpuset_setid(CPU_WHICH_PID, -1, isetid));
170
ATF_REQUIRE_EQ(-1, cpuset_getid(CPU_LEVEL_CPUSET, CPU_WHICH_CPUSET,
171
setid, &scratch));
172
ATF_CHECK_EQ(ESRCH, errno);
173
}
174
175
ATF_TC(deadlk);
176
ATF_TC_HEAD(deadlk, tc)
177
{
178
atf_tc_set_md_var(tc, "descr", "Test against disjoint cpusets.");
179
atf_tc_set_md_var(tc, "require.user", "root");
180
}
181
ATF_TC_BODY(deadlk, tc)
182
{
183
cpusetid_t setid;
184
cpuset_t dismask, mask, omask;
185
int fcpu, i, found, ncpu, second;
186
187
/* Make sure we have 3 cpus, so we test partial overlap. */
188
skip_ltncpu(3, &omask);
189
190
ATF_REQUIRE_EQ(0, cpuset(&setid));
191
CPU_ZERO(&mask);
192
CPU_ZERO(&dismask);
193
CPU_COPY(&omask, &mask);
194
CPU_COPY(&omask, &dismask);
195
fcpu = CPU_FFS(&mask);
196
ncpu = CPU_COUNT(&mask);
197
198
/*
199
* Turn off all but the first two for mask, turn off the first for
200
* dismask and turn them all off for both after the third.
201
*/
202
for (i = fcpu - 1, found = 0; i < CPU_MAXSIZE && found != ncpu; i++) {
203
if (CPU_ISSET(i, &omask)) {
204
found++;
205
if (found == 1) {
206
CPU_CLR(i, &dismask);
207
} else if (found == 2) {
208
second = i;
209
} else if (found >= 3) {
210
CPU_CLR(i, &mask);
211
if (found > 3)
212
CPU_CLR(i, &dismask);
213
}
214
}
215
}
216
217
ATF_REQUIRE_EQ(0, cpuset_setaffinity(CPU_LEVEL_CPUSET, CPU_WHICH_PID,
218
-1, sizeof(mask), &mask));
219
220
/* Must be a strict subset! */
221
ATF_REQUIRE_EQ(-1, cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID,
222
-1, sizeof(dismask), &dismask));
223
ATF_REQUIRE_EQ(EINVAL, errno);
224
225
/*
226
* We'll set our anonymous set to the 0,1 set that currently matches
227
* the process. If we then set the process to the 1,2 set that's in
228
* dismask, we should then personally be restricted down to the single
229
* overlapping CPOU.
230
*/
231
ATF_REQUIRE_EQ(0, cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID,
232
-1, sizeof(mask), &mask));
233
ATF_REQUIRE_EQ(0, cpuset_setaffinity(CPU_LEVEL_CPUSET, CPU_WHICH_PID,
234
-1, sizeof(dismask), &dismask));
235
ATF_REQUIRE_EQ(0, cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID,
236
-1, sizeof(mask), &mask));
237
ATF_REQUIRE_EQ(1, CPU_COUNT(&mask));
238
ATF_REQUIRE(CPU_ISSET(second, &mask));
239
240
/*
241
* Finally, clearing the overlap and attempting to set the process
242
* cpuset to a completely disjoint mask should fail, because this
243
* process will then not have anything to run on.
244
*/
245
CPU_CLR(second, &dismask);
246
ATF_REQUIRE_EQ(-1, cpuset_setaffinity(CPU_LEVEL_CPUSET, CPU_WHICH_PID,
247
-1, sizeof(dismask), &dismask));
248
ATF_REQUIRE_EQ(EDEADLK, errno);
249
}
250
251
static int
252
create_jail(void)
253
{
254
struct iovec iov[2];
255
char *name;
256
int error;
257
258
if (asprintf(&name, "cpuset_%d", getpid()) == -1)
259
_exit(42);
260
261
iov[0].iov_base = "name";
262
iov[0].iov_len = 5;
263
264
iov[1].iov_base = name;
265
iov[1].iov_len = strlen(name) + 1;
266
267
error = jail_set(iov, 2, JAIL_CREATE | JAIL_ATTACH);
268
free(name);
269
if (error < 0)
270
return (FAILURE_JAIL);
271
return (0);
272
}
273
274
static int
275
do_jail(int sock)
276
{
277
struct jail_test_info info;
278
int error;
279
280
error = create_jail();
281
if (error != 0)
282
return (error);
283
284
/* Record parameters, kick them over, then make a swift exit. */
285
CPU_ZERO(&info.jail_tidmask);
286
error = cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID,
287
-1, sizeof(info.jail_tidmask), &info.jail_tidmask);
288
if (error != 0)
289
return (FAILURE_MASK);
290
291
error = cpuset_getid(CPU_LEVEL_ROOT, CPU_WHICH_TID, -1,
292
&info.jail_cpuset);
293
if (error != 0)
294
return (FAILURE_JAILSET);
295
error = cpuset_getid(CPU_LEVEL_CPUSET, CPU_WHICH_TID, -1,
296
&info.jail_child_cpuset);
297
if (error != 0)
298
return (FAILURE_PIDSET);
299
if (send(sock, &info, sizeof(info), 0) != sizeof(info))
300
return (FAILURE_SEND);
301
return (0);
302
}
303
304
static void
305
do_jail_test(int ncpu, bool newset, jail_test_cb prologue,
306
jail_test_cb epilogue)
307
{
308
struct jail_test_cb_params cbp;
309
const char *errstr;
310
pid_t pid;
311
int error, sock, sockpair[2], status;
312
313
memset(&cbp.info, '\0', sizeof(cbp.info));
314
315
skip_ltncpu(ncpu, &cbp.mask);
316
317
ATF_REQUIRE_EQ(0, cpuset_getid(CPU_LEVEL_ROOT, CPU_WHICH_PID, -1,
318
&cbp.rootid));
319
if (newset)
320
ATF_REQUIRE_EQ(0, cpuset(&cbp.setid));
321
else
322
ATF_REQUIRE_EQ(0, cpuset_getid(CPU_LEVEL_CPUSET, CPU_WHICH_PID,
323
-1, &cbp.setid));
324
/* Special hack for prison0; it uses cpuset 1 as the root. */
325
if (cbp.rootid == 0)
326
cbp.rootid = 1;
327
328
/* Not every test needs early setup. */
329
if (prologue != NULL)
330
(*prologue)(&cbp);
331
332
ATF_REQUIRE_EQ(0, socketpair(PF_UNIX, SOCK_STREAM, 0, sockpair));
333
ATF_REQUIRE((pid = fork()) != -1);
334
335
if (pid == 0) {
336
/* Child */
337
close(sockpair[SP_PARENT]);
338
sock = sockpair[SP_CHILD];
339
340
_exit(do_jail(sock));
341
} else {
342
/* Parent */
343
sock = sockpair[SP_PARENT];
344
close(sockpair[SP_CHILD]);
345
346
while ((error = waitpid(pid, &status, 0)) == -1 &&
347
errno == EINTR) {
348
}
349
350
ATF_REQUIRE_EQ(sizeof(cbp.info), recv(sock, &cbp.info,
351
sizeof(cbp.info), 0));
352
353
/* Sanity check the exit info. */
354
ATF_REQUIRE_EQ(pid, error);
355
ATF_REQUIRE(WIFEXITED(status));
356
if (WEXITSTATUS(status) != 0) {
357
errstr = do_jail_errstr(WEXITSTATUS(status));
358
if (errstr != NULL)
359
atf_tc_fail("%s", errstr);
360
else
361
atf_tc_fail("Unknown error '%d'",
362
WEXITSTATUS(status));
363
}
364
365
epilogue(&cbp);
366
}
367
}
368
369
static void
370
jail_attach_mutate_pro(struct jail_test_cb_params *cbp)
371
{
372
cpuset_t *mask;
373
int count;
374
375
mask = &cbp->mask;
376
377
/* Knock out the first cpu. */
378
count = CPU_COUNT(mask);
379
CPU_CLR(CPU_FFS(mask) - 1, mask);
380
ATF_REQUIRE_EQ(count - 1, CPU_COUNT(mask));
381
ATF_REQUIRE_EQ(0, cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID,
382
-1, sizeof(*mask), mask));
383
}
384
385
static void
386
jail_attach_newbase_epi(struct jail_test_cb_params *cbp)
387
{
388
struct jail_test_info *info;
389
cpuset_t *mask;
390
391
info = &cbp->info;
392
mask = &cbp->mask;
393
394
/*
395
* The rootid test has been thrown in because a bug was discovered
396
* where any newly derived cpuset during attach would be parented to
397
* the wrong cpuset. Otherwise, we should observe that a new cpuset
398
* has been created for this process.
399
*/
400
ATF_REQUIRE(info->jail_cpuset != cbp->rootid);
401
ATF_REQUIRE(info->jail_cpuset != cbp->setid);
402
ATF_REQUIRE(info->jail_cpuset != info->jail_child_cpuset);
403
ATF_REQUIRE_EQ(0, CPU_CMP(mask, &info->jail_tidmask));
404
}
405
406
ATF_TC(jail_attach_newbase);
407
ATF_TC_HEAD(jail_attach_newbase, tc)
408
{
409
atf_tc_set_md_var(tc, "descr",
410
"Test jail attachment effect on affinity with a new base cpuset.");
411
atf_tc_set_md_var(tc, "require.user", "root");
412
}
413
ATF_TC_BODY(jail_attach_newbase, tc)
414
{
415
416
/* Need >= 2 cpus to test restriction. */
417
do_jail_test(2, true, &jail_attach_mutate_pro,
418
&jail_attach_newbase_epi);
419
}
420
421
ATF_TC(jail_attach_newbase_plain);
422
ATF_TC_HEAD(jail_attach_newbase_plain, tc)
423
{
424
atf_tc_set_md_var(tc, "descr",
425
"Test jail attachment effect on affinity with a new, unmodified base cpuset.");
426
atf_tc_set_md_var(tc, "require.user", "root");
427
}
428
ATF_TC_BODY(jail_attach_newbase_plain, tc)
429
{
430
431
do_jail_test(2, true, NULL, &jail_attach_newbase_epi);
432
}
433
434
/*
435
* Generic epilogue for tests that are expecting to use the jail's root cpuset
436
* with their own mask, whether that's been modified or not.
437
*/
438
static void
439
jail_attach_jset_epi(struct jail_test_cb_params *cbp)
440
{
441
struct jail_test_info *info;
442
cpuset_t *mask;
443
444
info = &cbp->info;
445
mask = &cbp->mask;
446
447
ATF_REQUIRE(info->jail_cpuset != cbp->setid);
448
ATF_REQUIRE_EQ(info->jail_cpuset, info->jail_child_cpuset);
449
ATF_REQUIRE_EQ(0, CPU_CMP(mask, &info->jail_tidmask));
450
}
451
452
ATF_TC(jail_attach_prevbase);
453
ATF_TC_HEAD(jail_attach_prevbase, tc)
454
{
455
atf_tc_set_md_var(tc, "descr",
456
"Test jail attachment effect on affinity without a new base.");
457
atf_tc_set_md_var(tc, "require.user", "root");
458
}
459
ATF_TC_BODY(jail_attach_prevbase, tc)
460
{
461
462
do_jail_test(2, false, &jail_attach_mutate_pro, &jail_attach_jset_epi);
463
}
464
465
static void
466
jail_attach_plain_pro(struct jail_test_cb_params *cbp)
467
{
468
469
if (cbp->setid != cbp->rootid)
470
atf_tc_skip("Must be running with the root cpuset.");
471
}
472
473
ATF_TC(jail_attach_plain);
474
ATF_TC_HEAD(jail_attach_plain, tc)
475
{
476
atf_tc_set_md_var(tc, "descr",
477
"Test jail attachment effect on affinity without specialization.");
478
atf_tc_set_md_var(tc, "require.user", "root");
479
}
480
ATF_TC_BODY(jail_attach_plain, tc)
481
{
482
483
do_jail_test(1, false, &jail_attach_plain_pro, &jail_attach_jset_epi);
484
}
485
486
static int
487
jail_attach_disjoint_newjail(int fd)
488
{
489
struct iovec iov[2];
490
char *name;
491
int jid;
492
493
if (asprintf(&name, "cpuset_%d", getpid()) == -1)
494
_exit(42);
495
496
iov[0].iov_base = "name";
497
iov[0].iov_len = sizeof("name");
498
499
iov[1].iov_base = name;
500
iov[1].iov_len = strlen(name) + 1;
501
502
if ((jid = jail_set(iov, 2, JAIL_CREATE | JAIL_ATTACH)) < 0)
503
return (FAILURE_JAIL);
504
505
/* Signal that we're ready. */
506
write(fd, &jid, sizeof(jid));
507
for (;;) {
508
/* Spin */
509
}
510
}
511
512
static int
513
wait_jail(int fd, int pfd)
514
{
515
fd_set lset;
516
struct timeval tv;
517
int error, jid, maxfd;
518
519
FD_ZERO(&lset);
520
FD_SET(fd, &lset);
521
FD_SET(pfd, &lset);
522
523
maxfd = MAX(fd, pfd);
524
525
tv.tv_sec = 5;
526
tv.tv_usec = 0;
527
528
/* Wait for jid to be written. */
529
do {
530
error = select(maxfd + 1, &lset, NULL, NULL, &tv);
531
} while (error == -1 && errno == EINTR);
532
533
if (error == 0) {
534
atf_tc_fail("Jail creator did not respond in time.");
535
}
536
537
ATF_REQUIRE_MSG(error > 0, "Unexpected error %d from select()", errno);
538
539
if (FD_ISSET(pfd, &lset)) {
540
/* Process died */
541
atf_tc_fail("Jail creator died unexpectedly.");
542
}
543
544
ATF_REQUIRE(FD_ISSET(fd, &lset));
545
ATF_REQUIRE_EQ(sizeof(jid), recv(fd, &jid, sizeof(jid), 0));
546
547
return (jid);
548
}
549
550
static int
551
try_attach_child(int jid, cpuset_t *expected_mask)
552
{
553
cpuset_t mask;
554
555
if (jail_attach(jid) == -1) {
556
if (errno == EDEADLK)
557
return (FAILURE_DEADLK);
558
return (FAILURE_ATTACH);
559
}
560
561
if (expected_mask == NULL)
562
return (FAILURE_SUCCESS);
563
564
/* If we had an expected mask, check it against the new process mask. */
565
CPU_ZERO(&mask);
566
if (cpuset_getaffinity(CPU_LEVEL_CPUSET, CPU_WHICH_PID,
567
-1, sizeof(mask), &mask) != 0) {
568
return (FAILURE_MASK);
569
}
570
571
if (CPU_CMP(expected_mask, &mask) != 0)
572
return (FAILURE_BADAFFIN);
573
574
return (0);
575
}
576
577
static void
578
try_attach(int jid, cpuset_t *expected_mask)
579
{
580
const char *errstr;
581
pid_t pid;
582
int error, fail, status;
583
584
ATF_REQUIRE(expected_mask != NULL);
585
ATF_REQUIRE((pid = fork()) != -1);
586
if (pid == 0)
587
_exit(try_attach_child(jid, expected_mask));
588
589
while ((error = waitpid(pid, &status, 0)) == -1 && errno == EINTR) {
590
/* Try again. */
591
}
592
593
/* Sanity check the exit info. */
594
ATF_REQUIRE_EQ(pid, error);
595
ATF_REQUIRE(WIFEXITED(status));
596
if ((fail = WEXITSTATUS(status)) != 0) {
597
errstr = do_jail_errstr(fail);
598
if (errstr != NULL)
599
atf_tc_fail("%s", errstr);
600
else
601
atf_tc_fail("Unknown error '%d'", WEXITSTATUS(status));
602
}
603
}
604
605
ATF_TC(jail_attach_disjoint);
606
ATF_TC_HEAD(jail_attach_disjoint, tc)
607
{
608
atf_tc_set_md_var(tc, "descr",
609
"Test root attachment into completely disjoint jail cpuset.");
610
atf_tc_set_md_var(tc, "require.user", "root");
611
}
612
ATF_TC_BODY(jail_attach_disjoint, tc)
613
{
614
cpuset_t smask, jmask;
615
int sockpair[2];
616
cpusetid_t setid;
617
pid_t pid;
618
int fcpu, jid, pfd, sock, scpu;
619
620
ATF_REQUIRE_EQ(0, cpuset(&setid));
621
622
skip_ltncpu(2, &jmask);
623
fcpu = CPU_FFS(&jmask) - 1;
624
ATF_REQUIRE_EQ(0, socketpair(PF_UNIX, SOCK_STREAM, 0, sockpair));
625
626
/* We'll wait on the procdesc, too, so we can fail faster if it dies. */
627
ATF_REQUIRE((pid = pdfork(&pfd, 0)) != -1);
628
629
if (pid == 0) {
630
/* First child sets up the jail. */
631
sock = sockpair[SP_CHILD];
632
close(sockpair[SP_PARENT]);
633
634
_exit(jail_attach_disjoint_newjail(sock));
635
}
636
637
close(sockpair[SP_CHILD]);
638
sock = sockpair[SP_PARENT];
639
640
ATF_REQUIRE((jid = wait_jail(sock, pfd)) > 0);
641
642
/*
643
* This process will be clamped down to the first cpu, while the jail
644
* will simply have the first CPU removed to make it a completely
645
* disjoint operation.
646
*/
647
CPU_ZERO(&smask);
648
CPU_SET(fcpu, &smask);
649
CPU_CLR(fcpu, &jmask);
650
651
/*
652
* We'll test with the first and second cpu set as well. Only the
653
* second cpu should be used.
654
*/
655
scpu = CPU_FFS(&jmask) - 1;
656
657
ATF_REQUIRE_EQ(0, cpuset_setaffinity(CPU_LEVEL_ROOT, CPU_WHICH_JAIL,
658
jid, sizeof(jmask), &jmask));
659
ATF_REQUIRE_EQ(0, cpuset_setaffinity(CPU_LEVEL_CPUSET, CPU_WHICH_CPUSET,
660
setid, sizeof(smask), &smask));
661
662
try_attach(jid, &jmask);
663
664
CPU_SET(scpu, &smask);
665
ATF_REQUIRE_EQ(0, cpuset_setaffinity(CPU_LEVEL_CPUSET, CPU_WHICH_CPUSET,
666
setid, sizeof(smask), &smask));
667
668
CPU_CLR(fcpu, &smask);
669
try_attach(jid, &smask);
670
}
671
672
struct nproc_info {
673
long nproc_init;
674
long nproc_final;
675
long nproc_global;
676
};
677
678
ATF_TC(jail_nproc);
679
ATF_TC_HEAD(jail_nproc, tc)
680
{
681
atf_tc_set_md_var(tc, "descr",
682
"Test that _SC_PROCESSORS_ONLN reflects jail cpuset constraints");
683
}
684
ATF_TC_BODY(jail_nproc, tc)
685
{
686
cpuset_t jmask;
687
struct nproc_info ninfo = { };
688
int sockpair[2];
689
cpusetid_t setid;
690
ssize_t readsz;
691
pid_t pid;
692
int fcpu, error, pfd, sock;
693
char okb = 0x7f, rcvb;
694
695
skip_ltncpu_root(2, &jmask);
696
fcpu = CPU_FFS(&jmask) - 1;
697
698
/*
699
* Just adjusting our affinity should not affect the number of
700
* processors considered online- we want to be sure that it's only
701
* adjusted if our jail's root set is.
702
*/
703
CPU_CLR(fcpu, &jmask);
704
error = cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_PID, -1,
705
sizeof(jmask), &jmask);
706
ATF_REQUIRE_EQ(0, error);
707
ATF_REQUIRE(sysconf(_SC_NPROCESSORS_ONLN) > CPU_COUNT(&jmask));
708
709
ATF_REQUIRE_EQ(0, socketpair(PF_UNIX, SOCK_STREAM, 0, sockpair));
710
711
/* We'll wait on the procdesc, too, so we can fail faster if it dies. */
712
ATF_REQUIRE((pid = pdfork(&pfd, 0)) != -1);
713
714
if (pid == 0) {
715
/* First child sets up the jail. */
716
sock = sockpair[SP_CHILD];
717
close(sockpair[SP_PARENT]);
718
719
error = create_jail();
720
if (error != 0)
721
_exit(error);
722
723
ninfo.nproc_init = sysconf(_SC_NPROCESSORS_ONLN);
724
725
/* Signal the parent that we're jailed. */
726
readsz = write(sock, &okb, sizeof(okb));
727
assert(readsz == sizeof(okb));
728
729
/* Wait for parent to adjust our mask and signal OK. */
730
readsz = read(sock, &rcvb, sizeof(rcvb));
731
assert(readsz == sizeof(rcvb));
732
assert(rcvb == okb);
733
734
ninfo.nproc_final = sysconf(_SC_NPROCESSORS_ONLN);
735
ninfo.nproc_global = sysconf(_SC_NPROCESSORS_CONF);
736
readsz = write(sock, &ninfo, sizeof(ninfo));
737
assert(readsz == sizeof(ninfo));
738
739
_exit(0);
740
}
741
742
close(sockpair[SP_CHILD]);
743
sock = sockpair[SP_PARENT];
744
745
/* Wait for signal that they are jailed. */
746
readsz = read(sock, &rcvb, sizeof(rcvb));
747
assert(readsz == sizeof(rcvb));
748
assert(rcvb == okb);
749
750
/* Grab the cpuset id and adjust it. */
751
error = cpuset_getid(CPU_LEVEL_ROOT, CPU_WHICH_PID, pid, &setid);
752
ATF_REQUIRE_EQ(0, error);
753
error = cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_CPUSET,
754
setid, sizeof(jmask), &jmask);
755
ATF_REQUIRE_EQ(0, error);
756
757
/* Signal OK to proceed. */
758
readsz = write(sock, &okb, sizeof(okb));
759
ATF_REQUIRE_EQ(sizeof(okb), readsz);
760
761
/* Grab our final nproc info. */
762
readsz = read(sock, &ninfo, sizeof(ninfo));
763
ATF_REQUIRE_EQ(sizeof(ninfo), readsz);
764
765
/*
766
* We set our own affinity to jmask, which is derived from *our* root
767
* set, at the beginning of the test. The jail would inherit from this
768
* set, so we just re-use that mask here to confirm that
769
* _SC_NPROCESSORS_ONLN did actually drop in response to us limiting the
770
* jail, and that its _SC_NPROCESSORS_CONF did not.
771
*/
772
ATF_REQUIRE_EQ(CPU_COUNT(&jmask) + 1, ninfo.nproc_init);
773
ATF_REQUIRE_EQ(CPU_COUNT(&jmask) + 1, ninfo.nproc_global);
774
ATF_REQUIRE_EQ(CPU_COUNT(&jmask), ninfo.nproc_final);
775
}
776
777
ATF_TC(badparent);
778
ATF_TC_HEAD(badparent, tc)
779
{
780
atf_tc_set_md_var(tc, "descr",
781
"Test parent assignment when assigning a new cpuset.");
782
}
783
ATF_TC_BODY(badparent, tc)
784
{
785
cpuset_t mask;
786
cpusetid_t finalsetid, origsetid, setid;
787
788
/* Need to mask off at least one CPU. */
789
skip_ltncpu(2, &mask);
790
791
ATF_REQUIRE_EQ(0, cpuset_getid(CPU_LEVEL_CPUSET, CPU_WHICH_TID, -1,
792
&origsetid));
793
794
ATF_REQUIRE_EQ(0, cpuset(&setid));
795
796
/*
797
* Mask off the first CPU, then we'll reparent ourselves to our original
798
* set.
799
*/
800
CPU_CLR(CPU_FFS(&mask) - 1, &mask);
801
ATF_REQUIRE_EQ(0, cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID,
802
-1, sizeof(mask), &mask));
803
804
ATF_REQUIRE_EQ(0, cpuset_setid(CPU_WHICH_PID, -1, origsetid));
805
ATF_REQUIRE_EQ(0, cpuset_getid(CPU_LEVEL_CPUSET, CPU_WHICH_TID, -1,
806
&finalsetid));
807
808
ATF_REQUIRE_EQ(finalsetid, origsetid);
809
}
810
811
ATF_TP_ADD_TCS(tp)
812
{
813
814
ATF_TP_ADD_TC(tp, newset);
815
ATF_TP_ADD_TC(tp, transient);
816
ATF_TP_ADD_TC(tp, deadlk);
817
ATF_TP_ADD_TC(tp, jail_attach_newbase);
818
ATF_TP_ADD_TC(tp, jail_attach_newbase_plain);
819
ATF_TP_ADD_TC(tp, jail_attach_prevbase);
820
ATF_TP_ADD_TC(tp, jail_attach_plain);
821
ATF_TP_ADD_TC(tp, jail_attach_disjoint);
822
ATF_TP_ADD_TC(tp, jail_nproc);
823
ATF_TP_ADD_TC(tp, badparent);
824
return (atf_no_error());
825
}
826
827