Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/kernel/cgroup/cpuset.c
49786 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* kernel/cpuset.c
4
*
5
* Processor and Memory placement constraints for sets of tasks.
6
*
7
* Copyright (C) 2003 BULL SA.
8
* Copyright (C) 2004-2007 Silicon Graphics, Inc.
9
* Copyright (C) 2006 Google, Inc
10
*
11
* Portions derived from Patrick Mochel's sysfs code.
12
* sysfs is Copyright (c) 2001-3 Patrick Mochel
13
*
14
* 2003-10-10 Written by Simon Derr.
15
* 2003-10-22 Updates by Stephen Hemminger.
16
* 2004 May-July Rework by Paul Jackson.
17
* 2006 Rework by Paul Menage to use generic cgroups
18
* 2008 Rework of the scheduler domains and CPU hotplug handling
19
* by Max Krasnyansky
20
*/
21
#include "cpuset-internal.h"
22
23
#include <linux/init.h>
24
#include <linux/interrupt.h>
25
#include <linux/kernel.h>
26
#include <linux/mempolicy.h>
27
#include <linux/mm.h>
28
#include <linux/memory.h>
29
#include <linux/export.h>
30
#include <linux/rcupdate.h>
31
#include <linux/sched.h>
32
#include <linux/sched/deadline.h>
33
#include <linux/sched/mm.h>
34
#include <linux/sched/task.h>
35
#include <linux/security.h>
36
#include <linux/oom.h>
37
#include <linux/sched/isolation.h>
38
#include <linux/wait.h>
39
#include <linux/workqueue.h>
40
#include <linux/task_work.h>
41
42
DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
43
DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
44
45
/*
46
* There could be abnormal cpuset configurations for cpu or memory
47
* node binding, add this key to provide a quick low-cost judgment
48
* of the situation.
49
*/
50
DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
51
52
static const char * const perr_strings[] = {
53
[PERR_INVCPUS] = "Invalid cpu list in cpuset.cpus.exclusive",
54
[PERR_INVPARENT] = "Parent is an invalid partition root",
55
[PERR_NOTPART] = "Parent is not a partition root",
56
[PERR_NOTEXCL] = "Cpu list in cpuset.cpus not exclusive",
57
[PERR_NOCPUS] = "Parent unable to distribute cpu downstream",
58
[PERR_HOTPLUG] = "No cpu available due to hotplug",
59
[PERR_CPUSEMPTY] = "cpuset.cpus and cpuset.cpus.exclusive are empty",
60
[PERR_HKEEPING] = "partition config conflicts with housekeeping setup",
61
[PERR_ACCESS] = "Enable partition not permitted",
62
[PERR_REMOTE] = "Have remote partition underneath",
63
};
64
65
/*
66
* For local partitions, update to subpartitions_cpus & isolated_cpus is done
67
* in update_parent_effective_cpumask(). For remote partitions, it is done in
68
* the remote_partition_*() and remote_cpus_update() helpers.
69
*/
70
/*
71
* Exclusive CPUs distributed out to local or remote sub-partitions of
72
* top_cpuset
73
*/
74
static cpumask_var_t subpartitions_cpus;
75
76
/*
77
* Exclusive CPUs in isolated partitions
78
*/
79
static cpumask_var_t isolated_cpus;
80
81
/*
82
* isolated_cpus updating flag (protected by cpuset_mutex)
83
* Set if isolated_cpus is going to be updated in the current
84
* cpuset_mutex crtical section.
85
*/
86
static bool isolated_cpus_updating;
87
88
/*
89
* Housekeeping (HK_TYPE_DOMAIN) CPUs at boot
90
*/
91
static cpumask_var_t boot_hk_cpus;
92
static bool have_boot_isolcpus;
93
94
/*
95
* A flag to force sched domain rebuild at the end of an operation.
96
* It can be set in
97
* - update_partition_sd_lb()
98
* - update_cpumasks_hier()
99
* - cpuset_update_flag()
100
* - cpuset_hotplug_update_tasks()
101
* - cpuset_handle_hotplug()
102
*
103
* Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock.
104
*
105
* Note that update_relax_domain_level() in cpuset-v1.c can still call
106
* rebuild_sched_domains_locked() directly without using this flag.
107
*/
108
static bool force_sd_rebuild;
109
110
/*
111
* Partition root states:
112
*
113
* 0 - member (not a partition root)
114
* 1 - partition root
115
* 2 - partition root without load balancing (isolated)
116
* -1 - invalid partition root
117
* -2 - invalid isolated partition root
118
*
119
* There are 2 types of partitions - local or remote. Local partitions are
120
* those whose parents are partition root themselves. Setting of
121
* cpuset.cpus.exclusive are optional in setting up local partitions.
122
* Remote partitions are those whose parents are not partition roots. Passing
123
* down exclusive CPUs by setting cpuset.cpus.exclusive along its ancestor
124
* nodes are mandatory in creating a remote partition.
125
*
126
* For simplicity, a local partition can be created under a local or remote
127
* partition but a remote partition cannot have any partition root in its
128
* ancestor chain except the cgroup root.
129
*/
130
#define PRS_MEMBER 0
131
#define PRS_ROOT 1
132
#define PRS_ISOLATED 2
133
#define PRS_INVALID_ROOT -1
134
#define PRS_INVALID_ISOLATED -2
135
136
/*
137
* Temporary cpumasks for working with partitions that are passed among
138
* functions to avoid memory allocation in inner functions.
139
*/
140
struct tmpmasks {
141
cpumask_var_t addmask, delmask; /* For partition root */
142
cpumask_var_t new_cpus; /* For update_cpumasks_hier() */
143
};
144
145
void inc_dl_tasks_cs(struct task_struct *p)
146
{
147
struct cpuset *cs = task_cs(p);
148
149
cs->nr_deadline_tasks++;
150
}
151
152
void dec_dl_tasks_cs(struct task_struct *p)
153
{
154
struct cpuset *cs = task_cs(p);
155
156
cs->nr_deadline_tasks--;
157
}
158
159
static inline bool is_partition_valid(const struct cpuset *cs)
160
{
161
return cs->partition_root_state > 0;
162
}
163
164
static inline bool is_partition_invalid(const struct cpuset *cs)
165
{
166
return cs->partition_root_state < 0;
167
}
168
169
static inline bool cs_is_member(const struct cpuset *cs)
170
{
171
return cs->partition_root_state == PRS_MEMBER;
172
}
173
174
/*
175
* Callers should hold callback_lock to modify partition_root_state.
176
*/
177
static inline void make_partition_invalid(struct cpuset *cs)
178
{
179
if (cs->partition_root_state > 0)
180
cs->partition_root_state = -cs->partition_root_state;
181
}
182
183
/*
184
* Send notification event of whenever partition_root_state changes.
185
*/
186
static inline void notify_partition_change(struct cpuset *cs, int old_prs)
187
{
188
if (old_prs == cs->partition_root_state)
189
return;
190
cgroup_file_notify(&cs->partition_file);
191
192
/* Reset prs_err if not invalid */
193
if (is_partition_valid(cs))
194
WRITE_ONCE(cs->prs_err, PERR_NONE);
195
}
196
197
/*
198
* The top_cpuset is always synchronized to cpu_active_mask and we should avoid
199
* using cpu_online_mask as much as possible. An active CPU is always an online
200
* CPU, but not vice versa. cpu_active_mask and cpu_online_mask can differ
201
* during hotplug operations. A CPU is marked active at the last stage of CPU
202
* bringup (CPUHP_AP_ACTIVE). It is also the stage where cpuset hotplug code
203
* will be called to update the sched domains so that the scheduler can move
204
* a normal task to a newly active CPU or remove tasks away from a newly
205
* inactivated CPU. The online bit is set much earlier in the CPU bringup
206
* process and cleared much later in CPU teardown.
207
*
208
* If cpu_online_mask is used while a hotunplug operation is happening in
209
* parallel, we may leave an offline CPU in cpu_allowed or some other masks.
210
*/
211
static struct cpuset top_cpuset = {
212
.flags = BIT(CS_CPU_EXCLUSIVE) |
213
BIT(CS_MEM_EXCLUSIVE) | BIT(CS_SCHED_LOAD_BALANCE),
214
.partition_root_state = PRS_ROOT,
215
.relax_domain_level = -1,
216
.remote_partition = false,
217
};
218
219
/*
220
* There are two global locks guarding cpuset structures - cpuset_mutex and
221
* callback_lock. The cpuset code uses only cpuset_mutex. Other kernel
222
* subsystems can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset
223
* structures. Note that cpuset_mutex needs to be a mutex as it is used in
224
* paths that rely on priority inheritance (e.g. scheduler - on RT) for
225
* correctness.
226
*
227
* A task must hold both locks to modify cpusets. If a task holds
228
* cpuset_mutex, it blocks others, ensuring that it is the only task able to
229
* also acquire callback_lock and be able to modify cpusets. It can perform
230
* various checks on the cpuset structure first, knowing nothing will change.
231
* It can also allocate memory while just holding cpuset_mutex. While it is
232
* performing these checks, various callback routines can briefly acquire
233
* callback_lock to query cpusets. Once it is ready to make the changes, it
234
* takes callback_lock, blocking everyone else.
235
*
236
* Calls to the kernel memory allocator can not be made while holding
237
* callback_lock, as that would risk double tripping on callback_lock
238
* from one of the callbacks into the cpuset code from within
239
* __alloc_pages().
240
*
241
* If a task is only holding callback_lock, then it has read-only
242
* access to cpusets.
243
*
244
* Now, the task_struct fields mems_allowed and mempolicy may be changed
245
* by other task, we use alloc_lock in the task_struct fields to protect
246
* them.
247
*
248
* The cpuset_common_seq_show() handlers only hold callback_lock across
249
* small pieces of code, such as when reading out possibly multi-word
250
* cpumasks and nodemasks.
251
*/
252
253
static DEFINE_MUTEX(cpuset_mutex);
254
255
/**
256
* cpuset_lock - Acquire the global cpuset mutex
257
*
258
* This locks the global cpuset mutex to prevent modifications to cpuset
259
* hierarchy and configurations. This helper is not enough to make modification.
260
*/
261
void cpuset_lock(void)
262
{
263
mutex_lock(&cpuset_mutex);
264
}
265
266
void cpuset_unlock(void)
267
{
268
mutex_unlock(&cpuset_mutex);
269
}
270
271
/**
272
* cpuset_full_lock - Acquire full protection for cpuset modification
273
*
274
* Takes both CPU hotplug read lock (cpus_read_lock()) and cpuset mutex
275
* to safely modify cpuset data.
276
*/
277
void cpuset_full_lock(void)
278
{
279
cpus_read_lock();
280
mutex_lock(&cpuset_mutex);
281
}
282
283
void cpuset_full_unlock(void)
284
{
285
mutex_unlock(&cpuset_mutex);
286
cpus_read_unlock();
287
}
288
289
static DEFINE_SPINLOCK(callback_lock);
290
291
void cpuset_callback_lock_irq(void)
292
{
293
spin_lock_irq(&callback_lock);
294
}
295
296
void cpuset_callback_unlock_irq(void)
297
{
298
spin_unlock_irq(&callback_lock);
299
}
300
301
static struct workqueue_struct *cpuset_migrate_mm_wq;
302
303
static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
304
305
static inline void check_insane_mems_config(nodemask_t *nodes)
306
{
307
if (!cpusets_insane_config() &&
308
movable_only_nodes(nodes)) {
309
static_branch_enable_cpuslocked(&cpusets_insane_config_key);
310
pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
311
"Cpuset allocations might fail even with a lot of memory available.\n",
312
nodemask_pr_args(nodes));
313
}
314
}
315
316
/*
317
* decrease cs->attach_in_progress.
318
* wake_up cpuset_attach_wq if cs->attach_in_progress==0.
319
*/
320
static inline void dec_attach_in_progress_locked(struct cpuset *cs)
321
{
322
lockdep_assert_held(&cpuset_mutex);
323
324
cs->attach_in_progress--;
325
if (!cs->attach_in_progress)
326
wake_up(&cpuset_attach_wq);
327
}
328
329
static inline void dec_attach_in_progress(struct cpuset *cs)
330
{
331
mutex_lock(&cpuset_mutex);
332
dec_attach_in_progress_locked(cs);
333
mutex_unlock(&cpuset_mutex);
334
}
335
336
static inline bool cpuset_v2(void)
337
{
338
return !IS_ENABLED(CONFIG_CPUSETS_V1) ||
339
cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
340
}
341
342
/*
343
* Cgroup v2 behavior is used on the "cpus" and "mems" control files when
344
* on default hierarchy or when the cpuset_v2_mode flag is set by mounting
345
* the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
346
* With v2 behavior, "cpus" and "mems" are always what the users have
347
* requested and won't be changed by hotplug events. Only the effective
348
* cpus or mems will be affected.
349
*/
350
static inline bool is_in_v2_mode(void)
351
{
352
return cpuset_v2() ||
353
(cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
354
}
355
356
static inline bool cpuset_is_populated(struct cpuset *cs)
357
{
358
lockdep_assert_held(&cpuset_mutex);
359
360
/* Cpusets in the process of attaching should be considered as populated */
361
return cgroup_is_populated(cs->css.cgroup) ||
362
cs->attach_in_progress;
363
}
364
365
/**
366
* partition_is_populated - check if partition has tasks
367
* @cs: partition root to be checked
368
* @excluded_child: a child cpuset to be excluded in task checking
369
* Return: true if there are tasks, false otherwise
370
*
371
* @cs should be a valid partition root or going to become a partition root.
372
* @excluded_child should be non-NULL when this cpuset is going to become a
373
* partition itself.
374
*
375
* Note that a remote partition is not allowed underneath a valid local
376
* or remote partition. So if a non-partition root child is populated,
377
* the whole partition is considered populated.
378
*/
379
static inline bool partition_is_populated(struct cpuset *cs,
380
struct cpuset *excluded_child)
381
{
382
struct cpuset *cp;
383
struct cgroup_subsys_state *pos_css;
384
385
/*
386
* We cannot call cs_is_populated(cs) directly, as
387
* nr_populated_domain_children may include populated
388
* csets from descendants that are partitions.
389
*/
390
if (cs->css.cgroup->nr_populated_csets ||
391
cs->attach_in_progress)
392
return true;
393
394
rcu_read_lock();
395
cpuset_for_each_descendant_pre(cp, pos_css, cs) {
396
if (cp == cs || cp == excluded_child)
397
continue;
398
399
if (is_partition_valid(cp)) {
400
pos_css = css_rightmost_descendant(pos_css);
401
continue;
402
}
403
404
if (cpuset_is_populated(cp)) {
405
rcu_read_unlock();
406
return true;
407
}
408
}
409
rcu_read_unlock();
410
return false;
411
}
412
413
/*
414
* Return in pmask the portion of a task's cpusets's cpus_allowed that
415
* are online and are capable of running the task. If none are found,
416
* walk up the cpuset hierarchy until we find one that does have some
417
* appropriate cpus.
418
*
419
* One way or another, we guarantee to return some non-empty subset
420
* of cpu_active_mask.
421
*
422
* Call with callback_lock or cpuset_mutex held.
423
*/
424
static void guarantee_active_cpus(struct task_struct *tsk,
425
struct cpumask *pmask)
426
{
427
const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
428
struct cpuset *cs;
429
430
if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_active_mask)))
431
cpumask_copy(pmask, cpu_active_mask);
432
433
rcu_read_lock();
434
cs = task_cs(tsk);
435
436
while (!cpumask_intersects(cs->effective_cpus, pmask))
437
cs = parent_cs(cs);
438
439
cpumask_and(pmask, pmask, cs->effective_cpus);
440
rcu_read_unlock();
441
}
442
443
/*
444
* Return in *pmask the portion of a cpusets's mems_allowed that
445
* are online, with memory. If none are online with memory, walk
446
* up the cpuset hierarchy until we find one that does have some
447
* online mems. The top cpuset always has some mems online.
448
*
449
* One way or another, we guarantee to return some non-empty subset
450
* of node_states[N_MEMORY].
451
*
452
* Call with callback_lock or cpuset_mutex held.
453
*/
454
static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
455
{
456
while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
457
cs = parent_cs(cs);
458
nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
459
}
460
461
/**
462
* alloc_cpumasks - Allocate an array of cpumask variables
463
* @pmasks: Pointer to array of cpumask_var_t pointers
464
* @size: Number of cpumasks to allocate
465
* Return: 0 if successful, -ENOMEM otherwise.
466
*
467
* Allocates @size cpumasks and initializes them to empty. Returns 0 on
468
* success, -ENOMEM on allocation failure. On failure, any previously
469
* allocated cpumasks are freed.
470
*/
471
static inline int alloc_cpumasks(cpumask_var_t *pmasks[], u32 size)
472
{
473
int i;
474
475
for (i = 0; i < size; i++) {
476
if (!zalloc_cpumask_var(pmasks[i], GFP_KERNEL)) {
477
while (--i >= 0)
478
free_cpumask_var(*pmasks[i]);
479
return -ENOMEM;
480
}
481
}
482
return 0;
483
}
484
485
/**
486
* alloc_tmpmasks - Allocate temporary cpumasks for cpuset operations.
487
* @tmp: Pointer to tmpmasks structure to populate
488
* Return: 0 on success, -ENOMEM on allocation failure
489
*/
490
static inline int alloc_tmpmasks(struct tmpmasks *tmp)
491
{
492
/*
493
* Array of pointers to the three cpumask_var_t fields in tmpmasks.
494
* Note: Array size must match actual number of masks (3)
495
*/
496
cpumask_var_t *pmask[3] = {
497
&tmp->new_cpus,
498
&tmp->addmask,
499
&tmp->delmask
500
};
501
502
return alloc_cpumasks(pmask, ARRAY_SIZE(pmask));
503
}
504
505
/**
506
* free_tmpmasks - free cpumasks in a tmpmasks structure
507
* @tmp: the tmpmasks structure pointer
508
*/
509
static inline void free_tmpmasks(struct tmpmasks *tmp)
510
{
511
if (!tmp)
512
return;
513
514
free_cpumask_var(tmp->new_cpus);
515
free_cpumask_var(tmp->addmask);
516
free_cpumask_var(tmp->delmask);
517
}
518
519
/**
520
* dup_or_alloc_cpuset - Duplicate or allocate a new cpuset
521
* @cs: Source cpuset to duplicate (NULL for a fresh allocation)
522
*
523
* Creates a new cpuset by either:
524
* 1. Duplicating an existing cpuset (if @cs is non-NULL), or
525
* 2. Allocating a fresh cpuset with zero-initialized masks (if @cs is NULL)
526
*
527
* Return: Pointer to newly allocated cpuset on success, NULL on failure
528
*/
529
static struct cpuset *dup_or_alloc_cpuset(struct cpuset *cs)
530
{
531
struct cpuset *trial;
532
533
/* Allocate base structure */
534
trial = cs ? kmemdup(cs, sizeof(*cs), GFP_KERNEL) :
535
kzalloc(sizeof(*cs), GFP_KERNEL);
536
if (!trial)
537
return NULL;
538
539
/* Setup cpumask pointer array */
540
cpumask_var_t *pmask[4] = {
541
&trial->cpus_allowed,
542
&trial->effective_cpus,
543
&trial->effective_xcpus,
544
&trial->exclusive_cpus
545
};
546
547
if (alloc_cpumasks(pmask, ARRAY_SIZE(pmask))) {
548
kfree(trial);
549
return NULL;
550
}
551
552
/* Copy masks if duplicating */
553
if (cs) {
554
cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
555
cpumask_copy(trial->effective_cpus, cs->effective_cpus);
556
cpumask_copy(trial->effective_xcpus, cs->effective_xcpus);
557
cpumask_copy(trial->exclusive_cpus, cs->exclusive_cpus);
558
}
559
560
return trial;
561
}
562
563
/**
564
* free_cpuset - free the cpuset
565
* @cs: the cpuset to be freed
566
*/
567
static inline void free_cpuset(struct cpuset *cs)
568
{
569
free_cpumask_var(cs->cpus_allowed);
570
free_cpumask_var(cs->effective_cpus);
571
free_cpumask_var(cs->effective_xcpus);
572
free_cpumask_var(cs->exclusive_cpus);
573
kfree(cs);
574
}
575
576
/* Return user specified exclusive CPUs */
577
static inline struct cpumask *user_xcpus(struct cpuset *cs)
578
{
579
return cpumask_empty(cs->exclusive_cpus) ? cs->cpus_allowed
580
: cs->exclusive_cpus;
581
}
582
583
static inline bool xcpus_empty(struct cpuset *cs)
584
{
585
return cpumask_empty(cs->cpus_allowed) &&
586
cpumask_empty(cs->exclusive_cpus);
587
}
588
589
/*
590
* cpusets_are_exclusive() - check if two cpusets are exclusive
591
*
592
* Return true if exclusive, false if not
593
*/
594
static inline bool cpusets_are_exclusive(struct cpuset *cs1, struct cpuset *cs2)
595
{
596
struct cpumask *xcpus1 = user_xcpus(cs1);
597
struct cpumask *xcpus2 = user_xcpus(cs2);
598
599
if (cpumask_intersects(xcpus1, xcpus2))
600
return false;
601
return true;
602
}
603
604
/**
605
* cpus_excl_conflict - Check if two cpusets have exclusive CPU conflicts
606
* @cs1: first cpuset to check
607
* @cs2: second cpuset to check
608
*
609
* Returns: true if CPU exclusivity conflict exists, false otherwise
610
*
611
* Conflict detection rules:
612
* 1. If either cpuset is CPU exclusive, they must be mutually exclusive
613
* 2. exclusive_cpus masks cannot intersect between cpusets
614
* 3. The allowed CPUs of one cpuset cannot be a subset of another's exclusive CPUs
615
*/
616
static inline bool cpus_excl_conflict(struct cpuset *cs1, struct cpuset *cs2)
617
{
618
/* If either cpuset is exclusive, check if they are mutually exclusive */
619
if (is_cpu_exclusive(cs1) || is_cpu_exclusive(cs2))
620
return !cpusets_are_exclusive(cs1, cs2);
621
622
/* Exclusive_cpus cannot intersect */
623
if (cpumask_intersects(cs1->exclusive_cpus, cs2->exclusive_cpus))
624
return true;
625
626
/* The cpus_allowed of one cpuset cannot be a subset of another cpuset's exclusive_cpus */
627
if (!cpumask_empty(cs1->cpus_allowed) &&
628
cpumask_subset(cs1->cpus_allowed, cs2->exclusive_cpus))
629
return true;
630
631
if (!cpumask_empty(cs2->cpus_allowed) &&
632
cpumask_subset(cs2->cpus_allowed, cs1->exclusive_cpus))
633
return true;
634
635
return false;
636
}
637
638
static inline bool mems_excl_conflict(struct cpuset *cs1, struct cpuset *cs2)
639
{
640
if ((is_mem_exclusive(cs1) || is_mem_exclusive(cs2)))
641
return nodes_intersects(cs1->mems_allowed, cs2->mems_allowed);
642
return false;
643
}
644
645
/*
646
* validate_change() - Used to validate that any proposed cpuset change
647
* follows the structural rules for cpusets.
648
*
649
* If we replaced the flag and mask values of the current cpuset
650
* (cur) with those values in the trial cpuset (trial), would
651
* our various subset and exclusive rules still be valid? Presumes
652
* cpuset_mutex held.
653
*
654
* 'cur' is the address of an actual, in-use cpuset. Operations
655
* such as list traversal that depend on the actual address of the
656
* cpuset in the list must use cur below, not trial.
657
*
658
* 'trial' is the address of bulk structure copy of cur, with
659
* perhaps one or more of the fields cpus_allowed, mems_allowed,
660
* or flags changed to new, trial values.
661
*
662
* Return 0 if valid, -errno if not.
663
*/
664
665
static int validate_change(struct cpuset *cur, struct cpuset *trial)
666
{
667
struct cgroup_subsys_state *css;
668
struct cpuset *c, *par;
669
int ret = 0;
670
671
rcu_read_lock();
672
673
if (!is_in_v2_mode())
674
ret = cpuset1_validate_change(cur, trial);
675
if (ret)
676
goto out;
677
678
/* Remaining checks don't apply to root cpuset */
679
if (cur == &top_cpuset)
680
goto out;
681
682
par = parent_cs(cur);
683
684
/*
685
* Cpusets with tasks - existing or newly being attached - can't
686
* be changed to have empty cpus_allowed or mems_allowed.
687
*/
688
ret = -ENOSPC;
689
if (cpuset_is_populated(cur)) {
690
if (!cpumask_empty(cur->cpus_allowed) &&
691
cpumask_empty(trial->cpus_allowed))
692
goto out;
693
if (!nodes_empty(cur->mems_allowed) &&
694
nodes_empty(trial->mems_allowed))
695
goto out;
696
}
697
698
/*
699
* We can't shrink if we won't have enough room for SCHED_DEADLINE
700
* tasks. This check is not done when scheduling is disabled as the
701
* users should know what they are doing.
702
*
703
* For v1, effective_cpus == cpus_allowed & user_xcpus() returns
704
* cpus_allowed.
705
*
706
* For v2, is_cpu_exclusive() & is_sched_load_balance() are true only
707
* for non-isolated partition root. At this point, the target
708
* effective_cpus isn't computed yet. user_xcpus() is the best
709
* approximation.
710
*
711
* TBD: May need to precompute the real effective_cpus here in case
712
* incorrect scheduling of SCHED_DEADLINE tasks in a partition
713
* becomes an issue.
714
*/
715
ret = -EBUSY;
716
if (is_cpu_exclusive(cur) && is_sched_load_balance(cur) &&
717
!cpuset_cpumask_can_shrink(cur->effective_cpus, user_xcpus(trial)))
718
goto out;
719
720
/*
721
* If either I or some sibling (!= me) is exclusive, we can't
722
* overlap. exclusive_cpus cannot overlap with each other if set.
723
*/
724
ret = -EINVAL;
725
cpuset_for_each_child(c, css, par) {
726
if (c == cur)
727
continue;
728
if (cpus_excl_conflict(trial, c))
729
goto out;
730
if (mems_excl_conflict(trial, c))
731
goto out;
732
}
733
734
ret = 0;
735
out:
736
rcu_read_unlock();
737
return ret;
738
}
739
740
#ifdef CONFIG_SMP
741
/*
742
* Helper routine for generate_sched_domains().
743
* Do cpusets a, b have overlapping effective cpus_allowed masks?
744
*/
745
static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
746
{
747
return cpumask_intersects(a->effective_cpus, b->effective_cpus);
748
}
749
750
static void
751
update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
752
{
753
if (dattr->relax_domain_level < c->relax_domain_level)
754
dattr->relax_domain_level = c->relax_domain_level;
755
return;
756
}
757
758
static void update_domain_attr_tree(struct sched_domain_attr *dattr,
759
struct cpuset *root_cs)
760
{
761
struct cpuset *cp;
762
struct cgroup_subsys_state *pos_css;
763
764
rcu_read_lock();
765
cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
766
/* skip the whole subtree if @cp doesn't have any CPU */
767
if (cpumask_empty(cp->cpus_allowed)) {
768
pos_css = css_rightmost_descendant(pos_css);
769
continue;
770
}
771
772
if (is_sched_load_balance(cp))
773
update_domain_attr(dattr, cp);
774
}
775
rcu_read_unlock();
776
}
777
778
/* Must be called with cpuset_mutex held. */
779
static inline int nr_cpusets(void)
780
{
781
/* jump label reference count + the top-level cpuset */
782
return static_key_count(&cpusets_enabled_key.key) + 1;
783
}
784
785
/*
786
* generate_sched_domains()
787
*
788
* This function builds a partial partition of the systems CPUs
789
* A 'partial partition' is a set of non-overlapping subsets whose
790
* union is a subset of that set.
791
* The output of this function needs to be passed to kernel/sched/core.c
792
* partition_sched_domains() routine, which will rebuild the scheduler's
793
* load balancing domains (sched domains) as specified by that partial
794
* partition.
795
*
796
* See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
797
* for a background explanation of this.
798
*
799
* Does not return errors, on the theory that the callers of this
800
* routine would rather not worry about failures to rebuild sched
801
* domains when operating in the severe memory shortage situations
802
* that could cause allocation failures below.
803
*
804
* Must be called with cpuset_mutex held.
805
*
806
* The three key local variables below are:
807
* cp - cpuset pointer, used (together with pos_css) to perform a
808
* top-down scan of all cpusets. For our purposes, rebuilding
809
* the schedulers sched domains, we can ignore !is_sched_load_
810
* balance cpusets.
811
* csa - (for CpuSet Array) Array of pointers to all the cpusets
812
* that need to be load balanced, for convenient iterative
813
* access by the subsequent code that finds the best partition,
814
* i.e the set of domains (subsets) of CPUs such that the
815
* cpus_allowed of every cpuset marked is_sched_load_balance
816
* is a subset of one of these domains, while there are as
817
* many such domains as possible, each as small as possible.
818
* doms - Conversion of 'csa' to an array of cpumasks, for passing to
819
* the kernel/sched/core.c routine partition_sched_domains() in a
820
* convenient format, that can be easily compared to the prior
821
* value to determine what partition elements (sched domains)
822
* were changed (added or removed.)
823
*
824
* Finding the best partition (set of domains):
825
* The double nested loops below over i, j scan over the load
826
* balanced cpusets (using the array of cpuset pointers in csa[])
827
* looking for pairs of cpusets that have overlapping cpus_allowed
828
* and merging them using a union-find algorithm.
829
*
830
* The union of the cpus_allowed masks from the set of all cpusets
831
* having the same root then form the one element of the partition
832
* (one sched domain) to be passed to partition_sched_domains().
833
*
834
*/
835
static int generate_sched_domains(cpumask_var_t **domains,
836
struct sched_domain_attr **attributes)
837
{
838
struct cpuset *cp; /* top-down scan of cpusets */
839
struct cpuset **csa; /* array of all cpuset ptrs */
840
int csn; /* how many cpuset ptrs in csa so far */
841
int i, j; /* indices for partition finding loops */
842
cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
843
struct sched_domain_attr *dattr; /* attributes for custom domains */
844
int ndoms = 0; /* number of sched domains in result */
845
int nslot; /* next empty doms[] struct cpumask slot */
846
struct cgroup_subsys_state *pos_css;
847
bool root_load_balance = is_sched_load_balance(&top_cpuset);
848
bool cgrpv2 = cpuset_v2();
849
int nslot_update;
850
851
doms = NULL;
852
dattr = NULL;
853
csa = NULL;
854
855
/* Special case for the 99% of systems with one, full, sched domain */
856
if (root_load_balance && cpumask_empty(subpartitions_cpus)) {
857
single_root_domain:
858
ndoms = 1;
859
doms = alloc_sched_domains(ndoms);
860
if (!doms)
861
goto done;
862
863
dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
864
if (dattr) {
865
*dattr = SD_ATTR_INIT;
866
update_domain_attr_tree(dattr, &top_cpuset);
867
}
868
cpumask_and(doms[0], top_cpuset.effective_cpus,
869
housekeeping_cpumask(HK_TYPE_DOMAIN));
870
871
goto done;
872
}
873
874
csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
875
if (!csa)
876
goto done;
877
csn = 0;
878
879
rcu_read_lock();
880
if (root_load_balance)
881
csa[csn++] = &top_cpuset;
882
cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
883
if (cp == &top_cpuset)
884
continue;
885
886
if (cgrpv2)
887
goto v2;
888
889
/*
890
* v1:
891
* Continue traversing beyond @cp iff @cp has some CPUs and
892
* isn't load balancing. The former is obvious. The
893
* latter: All child cpusets contain a subset of the
894
* parent's cpus, so just skip them, and then we call
895
* update_domain_attr_tree() to calc relax_domain_level of
896
* the corresponding sched domain.
897
*/
898
if (!cpumask_empty(cp->cpus_allowed) &&
899
!(is_sched_load_balance(cp) &&
900
cpumask_intersects(cp->cpus_allowed,
901
housekeeping_cpumask(HK_TYPE_DOMAIN))))
902
continue;
903
904
if (is_sched_load_balance(cp) &&
905
!cpumask_empty(cp->effective_cpus))
906
csa[csn++] = cp;
907
908
/* skip @cp's subtree */
909
pos_css = css_rightmost_descendant(pos_css);
910
continue;
911
912
v2:
913
/*
914
* Only valid partition roots that are not isolated and with
915
* non-empty effective_cpus will be saved into csn[].
916
*/
917
if ((cp->partition_root_state == PRS_ROOT) &&
918
!cpumask_empty(cp->effective_cpus))
919
csa[csn++] = cp;
920
921
/*
922
* Skip @cp's subtree if not a partition root and has no
923
* exclusive CPUs to be granted to child cpusets.
924
*/
925
if (!is_partition_valid(cp) && cpumask_empty(cp->exclusive_cpus))
926
pos_css = css_rightmost_descendant(pos_css);
927
}
928
rcu_read_unlock();
929
930
/*
931
* If there are only isolated partitions underneath the cgroup root,
932
* we can optimize out unneeded sched domains scanning.
933
*/
934
if (root_load_balance && (csn == 1))
935
goto single_root_domain;
936
937
for (i = 0; i < csn; i++)
938
uf_node_init(&csa[i]->node);
939
940
/* Merge overlapping cpusets */
941
for (i = 0; i < csn; i++) {
942
for (j = i + 1; j < csn; j++) {
943
if (cpusets_overlap(csa[i], csa[j])) {
944
/*
945
* Cgroup v2 shouldn't pass down overlapping
946
* partition root cpusets.
947
*/
948
WARN_ON_ONCE(cgrpv2);
949
uf_union(&csa[i]->node, &csa[j]->node);
950
}
951
}
952
}
953
954
/* Count the total number of domains */
955
for (i = 0; i < csn; i++) {
956
if (uf_find(&csa[i]->node) == &csa[i]->node)
957
ndoms++;
958
}
959
960
/*
961
* Now we know how many domains to create.
962
* Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
963
*/
964
doms = alloc_sched_domains(ndoms);
965
if (!doms)
966
goto done;
967
968
/*
969
* The rest of the code, including the scheduler, can deal with
970
* dattr==NULL case. No need to abort if alloc fails.
971
*/
972
dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
973
GFP_KERNEL);
974
975
/*
976
* Cgroup v2 doesn't support domain attributes, just set all of them
977
* to SD_ATTR_INIT. Also non-isolating partition root CPUs are a
978
* subset of HK_TYPE_DOMAIN housekeeping CPUs.
979
*/
980
if (cgrpv2) {
981
for (i = 0; i < ndoms; i++) {
982
/*
983
* The top cpuset may contain some boot time isolated
984
* CPUs that need to be excluded from the sched domain.
985
*/
986
if (csa[i] == &top_cpuset)
987
cpumask_and(doms[i], csa[i]->effective_cpus,
988
housekeeping_cpumask(HK_TYPE_DOMAIN));
989
else
990
cpumask_copy(doms[i], csa[i]->effective_cpus);
991
if (dattr)
992
dattr[i] = SD_ATTR_INIT;
993
}
994
goto done;
995
}
996
997
for (nslot = 0, i = 0; i < csn; i++) {
998
nslot_update = 0;
999
for (j = i; j < csn; j++) {
1000
if (uf_find(&csa[j]->node) == &csa[i]->node) {
1001
struct cpumask *dp = doms[nslot];
1002
1003
if (i == j) {
1004
nslot_update = 1;
1005
cpumask_clear(dp);
1006
if (dattr)
1007
*(dattr + nslot) = SD_ATTR_INIT;
1008
}
1009
cpumask_or(dp, dp, csa[j]->effective_cpus);
1010
cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN));
1011
if (dattr)
1012
update_domain_attr_tree(dattr + nslot, csa[j]);
1013
}
1014
}
1015
if (nslot_update)
1016
nslot++;
1017
}
1018
BUG_ON(nslot != ndoms);
1019
1020
done:
1021
kfree(csa);
1022
1023
/*
1024
* Fallback to the default domain if kmalloc() failed.
1025
* See comments in partition_sched_domains().
1026
*/
1027
if (doms == NULL)
1028
ndoms = 1;
1029
1030
*domains = doms;
1031
*attributes = dattr;
1032
return ndoms;
1033
}
1034
1035
static void dl_update_tasks_root_domain(struct cpuset *cs)
1036
{
1037
struct css_task_iter it;
1038
struct task_struct *task;
1039
1040
if (cs->nr_deadline_tasks == 0)
1041
return;
1042
1043
css_task_iter_start(&cs->css, 0, &it);
1044
1045
while ((task = css_task_iter_next(&it)))
1046
dl_add_task_root_domain(task);
1047
1048
css_task_iter_end(&it);
1049
}
1050
1051
void dl_rebuild_rd_accounting(void)
1052
{
1053
struct cpuset *cs = NULL;
1054
struct cgroup_subsys_state *pos_css;
1055
int cpu;
1056
u64 cookie = ++dl_cookie;
1057
1058
lockdep_assert_held(&cpuset_mutex);
1059
lockdep_assert_cpus_held();
1060
lockdep_assert_held(&sched_domains_mutex);
1061
1062
rcu_read_lock();
1063
1064
for_each_possible_cpu(cpu) {
1065
if (dl_bw_visited(cpu, cookie))
1066
continue;
1067
1068
dl_clear_root_domain_cpu(cpu);
1069
}
1070
1071
cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1072
1073
if (cpumask_empty(cs->effective_cpus)) {
1074
pos_css = css_rightmost_descendant(pos_css);
1075
continue;
1076
}
1077
1078
css_get(&cs->css);
1079
1080
rcu_read_unlock();
1081
1082
dl_update_tasks_root_domain(cs);
1083
1084
rcu_read_lock();
1085
css_put(&cs->css);
1086
}
1087
rcu_read_unlock();
1088
}
1089
1090
/*
1091
* Rebuild scheduler domains.
1092
*
1093
* If the flag 'sched_load_balance' of any cpuset with non-empty
1094
* 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
1095
* which has that flag enabled, or if any cpuset with a non-empty
1096
* 'cpus' is removed, then call this routine to rebuild the
1097
* scheduler's dynamic sched domains.
1098
*
1099
* Call with cpuset_mutex held. Takes cpus_read_lock().
1100
*/
1101
void rebuild_sched_domains_locked(void)
1102
{
1103
struct cgroup_subsys_state *pos_css;
1104
struct sched_domain_attr *attr;
1105
cpumask_var_t *doms;
1106
struct cpuset *cs;
1107
int ndoms;
1108
1109
lockdep_assert_cpus_held();
1110
lockdep_assert_held(&cpuset_mutex);
1111
force_sd_rebuild = false;
1112
1113
/*
1114
* If we have raced with CPU hotplug, return early to avoid
1115
* passing doms with offlined cpu to partition_sched_domains().
1116
* Anyways, cpuset_handle_hotplug() will rebuild sched domains.
1117
*
1118
* With no CPUs in any subpartitions, top_cpuset's effective CPUs
1119
* should be the same as the active CPUs, so checking only top_cpuset
1120
* is enough to detect racing CPU offlines.
1121
*/
1122
if (cpumask_empty(subpartitions_cpus) &&
1123
!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
1124
return;
1125
1126
/*
1127
* With subpartition CPUs, however, the effective CPUs of a partition
1128
* root should be only a subset of the active CPUs. Since a CPU in any
1129
* partition root could be offlined, all must be checked.
1130
*/
1131
if (!cpumask_empty(subpartitions_cpus)) {
1132
rcu_read_lock();
1133
cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1134
if (!is_partition_valid(cs)) {
1135
pos_css = css_rightmost_descendant(pos_css);
1136
continue;
1137
}
1138
if (!cpumask_subset(cs->effective_cpus,
1139
cpu_active_mask)) {
1140
rcu_read_unlock();
1141
return;
1142
}
1143
}
1144
rcu_read_unlock();
1145
}
1146
1147
/* Generate domain masks and attrs */
1148
ndoms = generate_sched_domains(&doms, &attr);
1149
1150
/* Have scheduler rebuild the domains */
1151
partition_sched_domains(ndoms, doms, attr);
1152
}
1153
#else /* !CONFIG_SMP */
1154
void rebuild_sched_domains_locked(void)
1155
{
1156
}
1157
#endif /* CONFIG_SMP */
1158
1159
static void rebuild_sched_domains_cpuslocked(void)
1160
{
1161
mutex_lock(&cpuset_mutex);
1162
rebuild_sched_domains_locked();
1163
mutex_unlock(&cpuset_mutex);
1164
}
1165
1166
void rebuild_sched_domains(void)
1167
{
1168
cpus_read_lock();
1169
rebuild_sched_domains_cpuslocked();
1170
cpus_read_unlock();
1171
}
1172
1173
void cpuset_reset_sched_domains(void)
1174
{
1175
mutex_lock(&cpuset_mutex);
1176
partition_sched_domains(1, NULL, NULL);
1177
mutex_unlock(&cpuset_mutex);
1178
}
1179
1180
/**
1181
* cpuset_update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1182
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1183
* @new_cpus: the temp variable for the new effective_cpus mask
1184
*
1185
* Iterate through each task of @cs updating its cpus_allowed to the
1186
* effective cpuset's. As this function is called with cpuset_mutex held,
1187
* cpuset membership stays stable.
1188
*
1189
* For top_cpuset, task_cpu_possible_mask() is used instead of effective_cpus
1190
* to make sure all offline CPUs are also included as hotplug code won't
1191
* update cpumasks for tasks in top_cpuset.
1192
*
1193
* As task_cpu_possible_mask() can be task dependent in arm64, we have to
1194
* do cpu masking per task instead of doing it once for all.
1195
*/
1196
void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
1197
{
1198
struct css_task_iter it;
1199
struct task_struct *task;
1200
bool top_cs = cs == &top_cpuset;
1201
1202
css_task_iter_start(&cs->css, 0, &it);
1203
while ((task = css_task_iter_next(&it))) {
1204
const struct cpumask *possible_mask = task_cpu_possible_mask(task);
1205
1206
if (top_cs) {
1207
/*
1208
* PF_NO_SETAFFINITY tasks are ignored.
1209
* All per cpu kthreads should have PF_NO_SETAFFINITY
1210
* flag set, see kthread_set_per_cpu().
1211
*/
1212
if (task->flags & PF_NO_SETAFFINITY)
1213
continue;
1214
cpumask_andnot(new_cpus, possible_mask, subpartitions_cpus);
1215
} else {
1216
cpumask_and(new_cpus, possible_mask, cs->effective_cpus);
1217
}
1218
set_cpus_allowed_ptr(task, new_cpus);
1219
}
1220
css_task_iter_end(&it);
1221
}
1222
1223
/**
1224
* compute_effective_cpumask - Compute the effective cpumask of the cpuset
1225
* @new_cpus: the temp variable for the new effective_cpus mask
1226
* @cs: the cpuset the need to recompute the new effective_cpus mask
1227
* @parent: the parent cpuset
1228
*
1229
* The result is valid only if the given cpuset isn't a partition root.
1230
*/
1231
static void compute_effective_cpumask(struct cpumask *new_cpus,
1232
struct cpuset *cs, struct cpuset *parent)
1233
{
1234
cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
1235
}
1236
1237
/*
1238
* Commands for update_parent_effective_cpumask
1239
*/
1240
enum partition_cmd {
1241
partcmd_enable, /* Enable partition root */
1242
partcmd_enablei, /* Enable isolated partition root */
1243
partcmd_disable, /* Disable partition root */
1244
partcmd_update, /* Update parent's effective_cpus */
1245
partcmd_invalidate, /* Make partition invalid */
1246
};
1247
1248
static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1249
struct tmpmasks *tmp);
1250
1251
/*
1252
* Update partition exclusive flag
1253
*
1254
* Return: 0 if successful, an error code otherwise
1255
*/
1256
static int update_partition_exclusive_flag(struct cpuset *cs, int new_prs)
1257
{
1258
bool exclusive = (new_prs > PRS_MEMBER);
1259
1260
if (exclusive && !is_cpu_exclusive(cs)) {
1261
if (cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 1))
1262
return PERR_NOTEXCL;
1263
} else if (!exclusive && is_cpu_exclusive(cs)) {
1264
/* Turning off CS_CPU_EXCLUSIVE will not return error */
1265
cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1266
}
1267
return 0;
1268
}
1269
1270
/*
1271
* Update partition load balance flag and/or rebuild sched domain
1272
*
1273
* Changing load balance flag will automatically call
1274
* rebuild_sched_domains_locked().
1275
* This function is for cgroup v2 only.
1276
*/
1277
static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
1278
{
1279
int new_prs = cs->partition_root_state;
1280
bool rebuild_domains = (new_prs > 0) || (old_prs > 0);
1281
bool new_lb;
1282
1283
/*
1284
* If cs is not a valid partition root, the load balance state
1285
* will follow its parent.
1286
*/
1287
if (new_prs > 0) {
1288
new_lb = (new_prs != PRS_ISOLATED);
1289
} else {
1290
new_lb = is_sched_load_balance(parent_cs(cs));
1291
}
1292
if (new_lb != !!is_sched_load_balance(cs)) {
1293
rebuild_domains = true;
1294
if (new_lb)
1295
set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1296
else
1297
clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1298
}
1299
1300
if (rebuild_domains)
1301
cpuset_force_rebuild();
1302
}
1303
1304
/*
1305
* tasks_nocpu_error - Return true if tasks will have no effective_cpus
1306
*/
1307
static bool tasks_nocpu_error(struct cpuset *parent, struct cpuset *cs,
1308
struct cpumask *xcpus)
1309
{
1310
/*
1311
* A populated partition (cs or parent) can't have empty effective_cpus
1312
*/
1313
return (cpumask_subset(parent->effective_cpus, xcpus) &&
1314
partition_is_populated(parent, cs)) ||
1315
(!cpumask_intersects(xcpus, cpu_active_mask) &&
1316
partition_is_populated(cs, NULL));
1317
}
1318
1319
static void reset_partition_data(struct cpuset *cs)
1320
{
1321
struct cpuset *parent = parent_cs(cs);
1322
1323
if (!cpuset_v2())
1324
return;
1325
1326
lockdep_assert_held(&callback_lock);
1327
1328
if (cpumask_empty(cs->exclusive_cpus)) {
1329
cpumask_clear(cs->effective_xcpus);
1330
if (is_cpu_exclusive(cs))
1331
clear_bit(CS_CPU_EXCLUSIVE, &cs->flags);
1332
}
1333
if (!cpumask_and(cs->effective_cpus, parent->effective_cpus, cs->cpus_allowed))
1334
cpumask_copy(cs->effective_cpus, parent->effective_cpus);
1335
}
1336
1337
/*
1338
* isolated_cpus_update - Update the isolated_cpus mask
1339
* @old_prs: old partition_root_state
1340
* @new_prs: new partition_root_state
1341
* @xcpus: exclusive CPUs with state change
1342
*/
1343
static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus)
1344
{
1345
WARN_ON_ONCE(old_prs == new_prs);
1346
if (new_prs == PRS_ISOLATED)
1347
cpumask_or(isolated_cpus, isolated_cpus, xcpus);
1348
else
1349
cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
1350
1351
isolated_cpus_updating = true;
1352
}
1353
1354
/*
1355
* partition_xcpus_add - Add new exclusive CPUs to partition
1356
* @new_prs: new partition_root_state
1357
* @parent: parent cpuset
1358
* @xcpus: exclusive CPUs to be added
1359
*
1360
* Remote partition if parent == NULL
1361
*/
1362
static void partition_xcpus_add(int new_prs, struct cpuset *parent,
1363
struct cpumask *xcpus)
1364
{
1365
WARN_ON_ONCE(new_prs < 0);
1366
lockdep_assert_held(&callback_lock);
1367
if (!parent)
1368
parent = &top_cpuset;
1369
1370
1371
if (parent == &top_cpuset)
1372
cpumask_or(subpartitions_cpus, subpartitions_cpus, xcpus);
1373
1374
if (new_prs != parent->partition_root_state)
1375
isolated_cpus_update(parent->partition_root_state, new_prs,
1376
xcpus);
1377
1378
cpumask_andnot(parent->effective_cpus, parent->effective_cpus, xcpus);
1379
}
1380
1381
/*
1382
* partition_xcpus_del - Remove exclusive CPUs from partition
1383
* @old_prs: old partition_root_state
1384
* @parent: parent cpuset
1385
* @xcpus: exclusive CPUs to be removed
1386
*
1387
* Remote partition if parent == NULL
1388
*/
1389
static void partition_xcpus_del(int old_prs, struct cpuset *parent,
1390
struct cpumask *xcpus)
1391
{
1392
WARN_ON_ONCE(old_prs < 0);
1393
lockdep_assert_held(&callback_lock);
1394
if (!parent)
1395
parent = &top_cpuset;
1396
1397
if (parent == &top_cpuset)
1398
cpumask_andnot(subpartitions_cpus, subpartitions_cpus, xcpus);
1399
1400
if (old_prs != parent->partition_root_state)
1401
isolated_cpus_update(old_prs, parent->partition_root_state,
1402
xcpus);
1403
1404
cpumask_and(xcpus, xcpus, cpu_active_mask);
1405
cpumask_or(parent->effective_cpus, parent->effective_cpus, xcpus);
1406
}
1407
1408
/*
1409
* isolated_cpus_can_update - check for isolated & nohz_full conflicts
1410
* @add_cpus: cpu mask for cpus that are going to be isolated
1411
* @del_cpus: cpu mask for cpus that are no longer isolated, can be NULL
1412
* Return: false if there is conflict, true otherwise
1413
*
1414
* If nohz_full is enabled and we have isolated CPUs, their combination must
1415
* still leave housekeeping CPUs.
1416
*
1417
* TBD: Should consider merging this function into
1418
* prstate_housekeeping_conflict().
1419
*/
1420
static bool isolated_cpus_can_update(struct cpumask *add_cpus,
1421
struct cpumask *del_cpus)
1422
{
1423
cpumask_var_t full_hk_cpus;
1424
int res = true;
1425
1426
if (!housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
1427
return true;
1428
1429
if (del_cpus && cpumask_weight_and(del_cpus,
1430
housekeeping_cpumask(HK_TYPE_KERNEL_NOISE)))
1431
return true;
1432
1433
if (!alloc_cpumask_var(&full_hk_cpus, GFP_KERNEL))
1434
return false;
1435
1436
cpumask_and(full_hk_cpus, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE),
1437
housekeeping_cpumask(HK_TYPE_DOMAIN));
1438
cpumask_andnot(full_hk_cpus, full_hk_cpus, isolated_cpus);
1439
cpumask_and(full_hk_cpus, full_hk_cpus, cpu_active_mask);
1440
if (!cpumask_weight_andnot(full_hk_cpus, add_cpus))
1441
res = false;
1442
1443
free_cpumask_var(full_hk_cpus);
1444
return res;
1445
}
1446
1447
/*
1448
* prstate_housekeeping_conflict - check for partition & housekeeping conflicts
1449
* @prstate: partition root state to be checked
1450
* @new_cpus: cpu mask
1451
* Return: true if there is conflict, false otherwise
1452
*
1453
* CPUs outside of boot_hk_cpus, if defined, can only be used in an
1454
* isolated partition.
1455
*/
1456
static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
1457
{
1458
if (!have_boot_isolcpus)
1459
return false;
1460
1461
if ((prstate != PRS_ISOLATED) && !cpumask_subset(new_cpus, boot_hk_cpus))
1462
return true;
1463
1464
return false;
1465
}
1466
1467
/*
1468
* update_isolation_cpumasks - Update external isolation related CPU masks
1469
*
1470
* The following external CPU masks will be updated if necessary:
1471
* - workqueue unbound cpumask
1472
*/
1473
static void update_isolation_cpumasks(void)
1474
{
1475
int ret;
1476
1477
if (!isolated_cpus_updating)
1478
return;
1479
1480
lockdep_assert_cpus_held();
1481
1482
ret = workqueue_unbound_exclude_cpumask(isolated_cpus);
1483
WARN_ON_ONCE(ret < 0);
1484
1485
ret = tmigr_isolated_exclude_cpumask(isolated_cpus);
1486
WARN_ON_ONCE(ret < 0);
1487
1488
isolated_cpus_updating = false;
1489
}
1490
1491
/**
1492
* cpuset_cpu_is_isolated - Check if the given CPU is isolated
1493
* @cpu: the CPU number to be checked
1494
* Return: true if CPU is used in an isolated partition, false otherwise
1495
*/
1496
bool cpuset_cpu_is_isolated(int cpu)
1497
{
1498
return cpumask_test_cpu(cpu, isolated_cpus);
1499
}
1500
EXPORT_SYMBOL_GPL(cpuset_cpu_is_isolated);
1501
1502
/**
1503
* rm_siblings_excl_cpus - Remove exclusive CPUs that are used by sibling cpusets
1504
* @parent: Parent cpuset containing all siblings
1505
* @cs: Current cpuset (will be skipped)
1506
* @excpus: exclusive effective CPU mask to modify
1507
*
1508
* This function ensures the given @excpus mask doesn't include any CPUs that
1509
* are exclusively allocated to sibling cpusets. It walks through all siblings
1510
* of @cs under @parent and removes their exclusive CPUs from @excpus.
1511
*/
1512
static int rm_siblings_excl_cpus(struct cpuset *parent, struct cpuset *cs,
1513
struct cpumask *excpus)
1514
{
1515
struct cgroup_subsys_state *css;
1516
struct cpuset *sibling;
1517
int retval = 0;
1518
1519
if (cpumask_empty(excpus))
1520
return retval;
1521
1522
/*
1523
* Exclude exclusive CPUs from siblings
1524
*/
1525
rcu_read_lock();
1526
cpuset_for_each_child(sibling, css, parent) {
1527
if (sibling == cs)
1528
continue;
1529
1530
if (cpumask_intersects(excpus, sibling->exclusive_cpus)) {
1531
cpumask_andnot(excpus, excpus, sibling->exclusive_cpus);
1532
retval++;
1533
continue;
1534
}
1535
if (cpumask_intersects(excpus, sibling->effective_xcpus)) {
1536
cpumask_andnot(excpus, excpus, sibling->effective_xcpus);
1537
retval++;
1538
}
1539
}
1540
rcu_read_unlock();
1541
1542
return retval;
1543
}
1544
1545
/*
1546
* compute_excpus - compute effective exclusive CPUs
1547
* @cs: cpuset
1548
* @xcpus: effective exclusive CPUs value to be set
1549
* Return: 0 if there is no sibling conflict, > 0 otherwise
1550
*
1551
* If exclusive_cpus isn't explicitly set , we have to scan the sibling cpusets
1552
* and exclude their exclusive_cpus or effective_xcpus as well.
1553
*/
1554
static int compute_excpus(struct cpuset *cs, struct cpumask *excpus)
1555
{
1556
struct cpuset *parent = parent_cs(cs);
1557
1558
cpumask_and(excpus, user_xcpus(cs), parent->effective_xcpus);
1559
1560
if (!cpumask_empty(cs->exclusive_cpus))
1561
return 0;
1562
1563
return rm_siblings_excl_cpus(parent, cs, excpus);
1564
}
1565
1566
/*
1567
* compute_trialcs_excpus - Compute effective exclusive CPUs for a trial cpuset
1568
* @trialcs: The trial cpuset containing the proposed new configuration
1569
* @cs: The original cpuset that the trial configuration is based on
1570
* Return: 0 if successful with no sibling conflict, >0 if a conflict is found
1571
*
1572
* Computes the effective_xcpus for a trial configuration. @cs is provided to represent
1573
* the real cs.
1574
*/
1575
static int compute_trialcs_excpus(struct cpuset *trialcs, struct cpuset *cs)
1576
{
1577
struct cpuset *parent = parent_cs(trialcs);
1578
struct cpumask *excpus = trialcs->effective_xcpus;
1579
1580
/* trialcs is member, cpuset.cpus has no impact to excpus */
1581
if (cs_is_member(cs))
1582
cpumask_and(excpus, trialcs->exclusive_cpus,
1583
parent->effective_xcpus);
1584
else
1585
cpumask_and(excpus, user_xcpus(trialcs), parent->effective_xcpus);
1586
1587
return rm_siblings_excl_cpus(parent, cs, excpus);
1588
}
1589
1590
static inline bool is_remote_partition(struct cpuset *cs)
1591
{
1592
return cs->remote_partition;
1593
}
1594
1595
static inline bool is_local_partition(struct cpuset *cs)
1596
{
1597
return is_partition_valid(cs) && !is_remote_partition(cs);
1598
}
1599
1600
/*
1601
* remote_partition_enable - Enable current cpuset as a remote partition root
1602
* @cs: the cpuset to update
1603
* @new_prs: new partition_root_state
1604
* @tmp: temporary masks
1605
* Return: 0 if successful, errcode if error
1606
*
1607
* Enable the current cpuset to become a remote partition root taking CPUs
1608
* directly from the top cpuset. cpuset_mutex must be held by the caller.
1609
*/
1610
static int remote_partition_enable(struct cpuset *cs, int new_prs,
1611
struct tmpmasks *tmp)
1612
{
1613
/*
1614
* The user must have sysadmin privilege.
1615
*/
1616
if (!capable(CAP_SYS_ADMIN))
1617
return PERR_ACCESS;
1618
1619
/*
1620
* The requested exclusive_cpus must not be allocated to other
1621
* partitions and it can't use up all the root's effective_cpus.
1622
*
1623
* The effective_xcpus mask can contain offline CPUs, but there must
1624
* be at least one or more online CPUs present before it can be enabled.
1625
*
1626
* Note that creating a remote partition with any local partition root
1627
* above it or remote partition root underneath it is not allowed.
1628
*/
1629
compute_excpus(cs, tmp->new_cpus);
1630
WARN_ON_ONCE(cpumask_intersects(tmp->new_cpus, subpartitions_cpus));
1631
if (!cpumask_intersects(tmp->new_cpus, cpu_active_mask) ||
1632
cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
1633
return PERR_INVCPUS;
1634
if (((new_prs == PRS_ISOLATED) &&
1635
!isolated_cpus_can_update(tmp->new_cpus, NULL)) ||
1636
prstate_housekeeping_conflict(new_prs, tmp->new_cpus))
1637
return PERR_HKEEPING;
1638
1639
spin_lock_irq(&callback_lock);
1640
partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
1641
cs->remote_partition = true;
1642
cpumask_copy(cs->effective_xcpus, tmp->new_cpus);
1643
spin_unlock_irq(&callback_lock);
1644
update_isolation_cpumasks();
1645
cpuset_force_rebuild();
1646
cs->prs_err = 0;
1647
1648
/*
1649
* Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1650
*/
1651
cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1652
update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1653
return 0;
1654
}
1655
1656
/*
1657
* remote_partition_disable - Remove current cpuset from remote partition list
1658
* @cs: the cpuset to update
1659
* @tmp: temporary masks
1660
*
1661
* The effective_cpus is also updated.
1662
*
1663
* cpuset_mutex must be held by the caller.
1664
*/
1665
static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
1666
{
1667
WARN_ON_ONCE(!is_remote_partition(cs));
1668
/*
1669
* When a CPU is offlined, top_cpuset may end up with no available CPUs,
1670
* which should clear subpartitions_cpus. We should not emit a warning for this
1671
* scenario: the hierarchy is updated from top to bottom, so subpartitions_cpus
1672
* may already be cleared when disabling the partition.
1673
*/
1674
WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus) &&
1675
!cpumask_empty(subpartitions_cpus));
1676
1677
spin_lock_irq(&callback_lock);
1678
cs->remote_partition = false;
1679
partition_xcpus_del(cs->partition_root_state, NULL, cs->effective_xcpus);
1680
if (cs->prs_err)
1681
cs->partition_root_state = -cs->partition_root_state;
1682
else
1683
cs->partition_root_state = PRS_MEMBER;
1684
1685
/* effective_xcpus may need to be changed */
1686
compute_excpus(cs, cs->effective_xcpus);
1687
reset_partition_data(cs);
1688
spin_unlock_irq(&callback_lock);
1689
update_isolation_cpumasks();
1690
cpuset_force_rebuild();
1691
1692
/*
1693
* Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1694
*/
1695
cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1696
update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1697
}
1698
1699
/*
1700
* remote_cpus_update - cpus_exclusive change of remote partition
1701
* @cs: the cpuset to be updated
1702
* @xcpus: the new exclusive_cpus mask, if non-NULL
1703
* @excpus: the new effective_xcpus mask
1704
* @tmp: temporary masks
1705
*
1706
* top_cpuset and subpartitions_cpus will be updated or partition can be
1707
* invalidated.
1708
*/
1709
static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
1710
struct cpumask *excpus, struct tmpmasks *tmp)
1711
{
1712
bool adding, deleting;
1713
int prs = cs->partition_root_state;
1714
1715
if (WARN_ON_ONCE(!is_remote_partition(cs)))
1716
return;
1717
1718
WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
1719
1720
if (cpumask_empty(excpus)) {
1721
cs->prs_err = PERR_CPUSEMPTY;
1722
goto invalidate;
1723
}
1724
1725
adding = cpumask_andnot(tmp->addmask, excpus, cs->effective_xcpus);
1726
deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, excpus);
1727
1728
/*
1729
* Additions of remote CPUs is only allowed if those CPUs are
1730
* not allocated to other partitions and there are effective_cpus
1731
* left in the top cpuset.
1732
*/
1733
if (adding) {
1734
WARN_ON_ONCE(cpumask_intersects(tmp->addmask, subpartitions_cpus));
1735
if (!capable(CAP_SYS_ADMIN))
1736
cs->prs_err = PERR_ACCESS;
1737
else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
1738
cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
1739
cs->prs_err = PERR_NOCPUS;
1740
else if ((prs == PRS_ISOLATED) &&
1741
!isolated_cpus_can_update(tmp->addmask, tmp->delmask))
1742
cs->prs_err = PERR_HKEEPING;
1743
if (cs->prs_err)
1744
goto invalidate;
1745
}
1746
1747
spin_lock_irq(&callback_lock);
1748
if (adding)
1749
partition_xcpus_add(prs, NULL, tmp->addmask);
1750
if (deleting)
1751
partition_xcpus_del(prs, NULL, tmp->delmask);
1752
/*
1753
* Need to update effective_xcpus and exclusive_cpus now as
1754
* update_sibling_cpumasks() below may iterate back to the same cs.
1755
*/
1756
cpumask_copy(cs->effective_xcpus, excpus);
1757
if (xcpus)
1758
cpumask_copy(cs->exclusive_cpus, xcpus);
1759
spin_unlock_irq(&callback_lock);
1760
update_isolation_cpumasks();
1761
if (adding || deleting)
1762
cpuset_force_rebuild();
1763
1764
/*
1765
* Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1766
*/
1767
cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1768
update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1769
return;
1770
1771
invalidate:
1772
remote_partition_disable(cs, tmp);
1773
}
1774
1775
/**
1776
* update_parent_effective_cpumask - update effective_cpus mask of parent cpuset
1777
* @cs: The cpuset that requests change in partition root state
1778
* @cmd: Partition root state change command
1779
* @newmask: Optional new cpumask for partcmd_update
1780
* @tmp: Temporary addmask and delmask
1781
* Return: 0 or a partition root state error code
1782
*
1783
* For partcmd_enable*, the cpuset is being transformed from a non-partition
1784
* root to a partition root. The effective_xcpus (cpus_allowed if
1785
* effective_xcpus not set) mask of the given cpuset will be taken away from
1786
* parent's effective_cpus. The function will return 0 if all the CPUs listed
1787
* in effective_xcpus can be granted or an error code will be returned.
1788
*
1789
* For partcmd_disable, the cpuset is being transformed from a partition
1790
* root back to a non-partition root. Any CPUs in effective_xcpus will be
1791
* given back to parent's effective_cpus. 0 will always be returned.
1792
*
1793
* For partcmd_update, if the optional newmask is specified, the cpu list is
1794
* to be changed from effective_xcpus to newmask. Otherwise, effective_xcpus is
1795
* assumed to remain the same. The cpuset should either be a valid or invalid
1796
* partition root. The partition root state may change from valid to invalid
1797
* or vice versa. An error code will be returned if transitioning from
1798
* invalid to valid violates the exclusivity rule.
1799
*
1800
* For partcmd_invalidate, the current partition will be made invalid.
1801
*
1802
* The partcmd_enable* and partcmd_disable commands are used by
1803
* update_prstate(). An error code may be returned and the caller will check
1804
* for error.
1805
*
1806
* The partcmd_update command is used by update_cpumasks_hier() with newmask
1807
* NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
1808
* by update_cpumask() with NULL newmask. In both cases, the callers won't
1809
* check for error and so partition_root_state and prs_err will be updated
1810
* directly.
1811
*/
1812
static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
1813
struct cpumask *newmask,
1814
struct tmpmasks *tmp)
1815
{
1816
struct cpuset *parent = parent_cs(cs);
1817
int adding; /* Adding cpus to parent's effective_cpus */
1818
int deleting; /* Deleting cpus from parent's effective_cpus */
1819
int old_prs, new_prs;
1820
int part_error = PERR_NONE; /* Partition error? */
1821
struct cpumask *xcpus = user_xcpus(cs);
1822
int parent_prs = parent->partition_root_state;
1823
bool nocpu;
1824
1825
lockdep_assert_held(&cpuset_mutex);
1826
WARN_ON_ONCE(is_remote_partition(cs)); /* For local partition only */
1827
1828
/*
1829
* new_prs will only be changed for the partcmd_update and
1830
* partcmd_invalidate commands.
1831
*/
1832
adding = deleting = false;
1833
old_prs = new_prs = cs->partition_root_state;
1834
1835
if (cmd == partcmd_invalidate) {
1836
if (is_partition_invalid(cs))
1837
return 0;
1838
1839
/*
1840
* Make the current partition invalid.
1841
*/
1842
if (is_partition_valid(parent))
1843
adding = cpumask_and(tmp->addmask,
1844
xcpus, parent->effective_xcpus);
1845
if (old_prs > 0)
1846
new_prs = -old_prs;
1847
1848
goto write_error;
1849
}
1850
1851
/*
1852
* The parent must be a partition root.
1853
* The new cpumask, if present, or the current cpus_allowed must
1854
* not be empty.
1855
*/
1856
if (!is_partition_valid(parent)) {
1857
return is_partition_invalid(parent)
1858
? PERR_INVPARENT : PERR_NOTPART;
1859
}
1860
if (!newmask && xcpus_empty(cs))
1861
return PERR_CPUSEMPTY;
1862
1863
nocpu = tasks_nocpu_error(parent, cs, xcpus);
1864
1865
if ((cmd == partcmd_enable) || (cmd == partcmd_enablei)) {
1866
/*
1867
* Need to call compute_excpus() in case
1868
* exclusive_cpus not set. Sibling conflict should only happen
1869
* if exclusive_cpus isn't set.
1870
*/
1871
xcpus = tmp->delmask;
1872
if (compute_excpus(cs, xcpus))
1873
WARN_ON_ONCE(!cpumask_empty(cs->exclusive_cpus));
1874
new_prs = (cmd == partcmd_enable) ? PRS_ROOT : PRS_ISOLATED;
1875
1876
/*
1877
* Enabling partition root is not allowed if its
1878
* effective_xcpus is empty.
1879
*/
1880
if (cpumask_empty(xcpus))
1881
return PERR_INVCPUS;
1882
1883
if (prstate_housekeeping_conflict(new_prs, xcpus))
1884
return PERR_HKEEPING;
1885
1886
if ((new_prs == PRS_ISOLATED) && (new_prs != parent_prs) &&
1887
!isolated_cpus_can_update(xcpus, NULL))
1888
return PERR_HKEEPING;
1889
1890
if (tasks_nocpu_error(parent, cs, xcpus))
1891
return PERR_NOCPUS;
1892
1893
/*
1894
* This function will only be called when all the preliminary
1895
* checks have passed. At this point, the following condition
1896
* should hold.
1897
*
1898
* (cs->effective_xcpus & cpu_active_mask) ⊆ parent->effective_cpus
1899
*
1900
* Warn if it is not the case.
1901
*/
1902
cpumask_and(tmp->new_cpus, xcpus, cpu_active_mask);
1903
WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, parent->effective_cpus));
1904
1905
deleting = true;
1906
} else if (cmd == partcmd_disable) {
1907
/*
1908
* May need to add cpus back to parent's effective_cpus
1909
* (and maybe removed from subpartitions_cpus/isolated_cpus)
1910
* for valid partition root. xcpus may contain CPUs that
1911
* shouldn't be removed from the two global cpumasks.
1912
*/
1913
if (is_partition_valid(cs)) {
1914
cpumask_copy(tmp->addmask, cs->effective_xcpus);
1915
adding = true;
1916
}
1917
new_prs = PRS_MEMBER;
1918
} else if (newmask) {
1919
/*
1920
* Empty cpumask is not allowed
1921
*/
1922
if (cpumask_empty(newmask)) {
1923
part_error = PERR_CPUSEMPTY;
1924
goto write_error;
1925
}
1926
1927
/* Check newmask again, whether cpus are available for parent/cs */
1928
nocpu |= tasks_nocpu_error(parent, cs, newmask);
1929
1930
/*
1931
* partcmd_update with newmask:
1932
*
1933
* Compute add/delete mask to/from effective_cpus
1934
*
1935
* For valid partition:
1936
* addmask = exclusive_cpus & ~newmask
1937
* & parent->effective_xcpus
1938
* delmask = newmask & ~exclusive_cpus
1939
* & parent->effective_xcpus
1940
*
1941
* For invalid partition:
1942
* delmask = newmask & parent->effective_xcpus
1943
* The partition may become valid soon.
1944
*/
1945
if (is_partition_invalid(cs)) {
1946
adding = false;
1947
deleting = cpumask_and(tmp->delmask,
1948
newmask, parent->effective_xcpus);
1949
} else {
1950
cpumask_andnot(tmp->addmask, xcpus, newmask);
1951
adding = cpumask_and(tmp->addmask, tmp->addmask,
1952
parent->effective_xcpus);
1953
1954
cpumask_andnot(tmp->delmask, newmask, xcpus);
1955
deleting = cpumask_and(tmp->delmask, tmp->delmask,
1956
parent->effective_xcpus);
1957
}
1958
1959
/*
1960
* TBD: Invalidate a currently valid child root partition may
1961
* still break isolated_cpus_can_update() rule if parent is an
1962
* isolated partition.
1963
*/
1964
if (is_partition_valid(cs) && (old_prs != parent_prs)) {
1965
if ((parent_prs == PRS_ROOT) &&
1966
/* Adding to parent means removing isolated CPUs */
1967
!isolated_cpus_can_update(tmp->delmask, tmp->addmask))
1968
part_error = PERR_HKEEPING;
1969
if ((parent_prs == PRS_ISOLATED) &&
1970
/* Adding to parent means adding isolated CPUs */
1971
!isolated_cpus_can_update(tmp->addmask, tmp->delmask))
1972
part_error = PERR_HKEEPING;
1973
}
1974
1975
/*
1976
* The new CPUs to be removed from parent's effective CPUs
1977
* must be present.
1978
*/
1979
if (deleting) {
1980
cpumask_and(tmp->new_cpus, tmp->delmask, cpu_active_mask);
1981
WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, parent->effective_cpus));
1982
}
1983
1984
/*
1985
* Make partition invalid if parent's effective_cpus could
1986
* become empty and there are tasks in the parent.
1987
*/
1988
if (nocpu && (!adding ||
1989
!cpumask_intersects(tmp->addmask, cpu_active_mask))) {
1990
part_error = PERR_NOCPUS;
1991
deleting = false;
1992
adding = cpumask_and(tmp->addmask,
1993
xcpus, parent->effective_xcpus);
1994
}
1995
} else {
1996
/*
1997
* partcmd_update w/o newmask
1998
*
1999
* delmask = effective_xcpus & parent->effective_cpus
2000
*
2001
* This can be called from:
2002
* 1) update_cpumasks_hier()
2003
* 2) cpuset_hotplug_update_tasks()
2004
*
2005
* Check to see if it can be transitioned from valid to
2006
* invalid partition or vice versa.
2007
*
2008
* A partition error happens when parent has tasks and all
2009
* its effective CPUs will have to be distributed out.
2010
*/
2011
if (nocpu) {
2012
part_error = PERR_NOCPUS;
2013
if (is_partition_valid(cs))
2014
adding = cpumask_and(tmp->addmask,
2015
xcpus, parent->effective_xcpus);
2016
} else if (is_partition_invalid(cs) && !cpumask_empty(xcpus) &&
2017
cpumask_subset(xcpus, parent->effective_xcpus)) {
2018
struct cgroup_subsys_state *css;
2019
struct cpuset *child;
2020
bool exclusive = true;
2021
2022
/*
2023
* Convert invalid partition to valid has to
2024
* pass the cpu exclusivity test.
2025
*/
2026
rcu_read_lock();
2027
cpuset_for_each_child(child, css, parent) {
2028
if (child == cs)
2029
continue;
2030
if (!cpusets_are_exclusive(cs, child)) {
2031
exclusive = false;
2032
break;
2033
}
2034
}
2035
rcu_read_unlock();
2036
if (exclusive)
2037
deleting = cpumask_and(tmp->delmask,
2038
xcpus, parent->effective_cpus);
2039
else
2040
part_error = PERR_NOTEXCL;
2041
}
2042
}
2043
2044
write_error:
2045
if (part_error)
2046
WRITE_ONCE(cs->prs_err, part_error);
2047
2048
if (cmd == partcmd_update) {
2049
/*
2050
* Check for possible transition between valid and invalid
2051
* partition root.
2052
*/
2053
switch (cs->partition_root_state) {
2054
case PRS_ROOT:
2055
case PRS_ISOLATED:
2056
if (part_error)
2057
new_prs = -old_prs;
2058
break;
2059
case PRS_INVALID_ROOT:
2060
case PRS_INVALID_ISOLATED:
2061
if (!part_error)
2062
new_prs = -old_prs;
2063
break;
2064
}
2065
}
2066
2067
if (!adding && !deleting && (new_prs == old_prs))
2068
return 0;
2069
2070
/*
2071
* Transitioning between invalid to valid or vice versa may require
2072
* changing CS_CPU_EXCLUSIVE. In the case of partcmd_update,
2073
* validate_change() has already been successfully called and
2074
* CPU lists in cs haven't been updated yet. So defer it to later.
2075
*/
2076
if ((old_prs != new_prs) && (cmd != partcmd_update)) {
2077
int err = update_partition_exclusive_flag(cs, new_prs);
2078
2079
if (err)
2080
return err;
2081
}
2082
2083
/*
2084
* Change the parent's effective_cpus & effective_xcpus (top cpuset
2085
* only).
2086
*
2087
* Newly added CPUs will be removed from effective_cpus and
2088
* newly deleted ones will be added back to effective_cpus.
2089
*/
2090
spin_lock_irq(&callback_lock);
2091
if (old_prs != new_prs)
2092
cs->partition_root_state = new_prs;
2093
2094
/*
2095
* Adding to parent's effective_cpus means deletion CPUs from cs
2096
* and vice versa.
2097
*/
2098
if (adding)
2099
partition_xcpus_del(old_prs, parent, tmp->addmask);
2100
if (deleting)
2101
partition_xcpus_add(new_prs, parent, tmp->delmask);
2102
2103
spin_unlock_irq(&callback_lock);
2104
update_isolation_cpumasks();
2105
2106
if ((old_prs != new_prs) && (cmd == partcmd_update))
2107
update_partition_exclusive_flag(cs, new_prs);
2108
2109
if (adding || deleting) {
2110
cpuset_update_tasks_cpumask(parent, tmp->addmask);
2111
update_sibling_cpumasks(parent, cs, tmp);
2112
}
2113
2114
/*
2115
* For partcmd_update without newmask, it is being called from
2116
* cpuset_handle_hotplug(). Update the load balance flag and
2117
* scheduling domain accordingly.
2118
*/
2119
if ((cmd == partcmd_update) && !newmask)
2120
update_partition_sd_lb(cs, old_prs);
2121
2122
notify_partition_change(cs, old_prs);
2123
return 0;
2124
}
2125
2126
/**
2127
* compute_partition_effective_cpumask - compute effective_cpus for partition
2128
* @cs: partition root cpuset
2129
* @new_ecpus: previously computed effective_cpus to be updated
2130
*
2131
* Compute the effective_cpus of a partition root by scanning effective_xcpus
2132
* of child partition roots and excluding their effective_xcpus.
2133
*
2134
* This has the side effect of invalidating valid child partition roots,
2135
* if necessary. Since it is called from either cpuset_hotplug_update_tasks()
2136
* or update_cpumasks_hier() where parent and children are modified
2137
* successively, we don't need to call update_parent_effective_cpumask()
2138
* and the child's effective_cpus will be updated in later iterations.
2139
*
2140
* Note that rcu_read_lock() is assumed to be held.
2141
*/
2142
static void compute_partition_effective_cpumask(struct cpuset *cs,
2143
struct cpumask *new_ecpus)
2144
{
2145
struct cgroup_subsys_state *css;
2146
struct cpuset *child;
2147
bool populated = partition_is_populated(cs, NULL);
2148
2149
/*
2150
* Check child partition roots to see if they should be
2151
* invalidated when
2152
* 1) child effective_xcpus not a subset of new
2153
* excluisve_cpus
2154
* 2) All the effective_cpus will be used up and cp
2155
* has tasks
2156
*/
2157
compute_excpus(cs, new_ecpus);
2158
cpumask_and(new_ecpus, new_ecpus, cpu_active_mask);
2159
2160
rcu_read_lock();
2161
cpuset_for_each_child(child, css, cs) {
2162
if (!is_partition_valid(child))
2163
continue;
2164
2165
/*
2166
* There shouldn't be a remote partition underneath another
2167
* partition root.
2168
*/
2169
WARN_ON_ONCE(is_remote_partition(child));
2170
child->prs_err = 0;
2171
if (!cpumask_subset(child->effective_xcpus,
2172
cs->effective_xcpus))
2173
child->prs_err = PERR_INVCPUS;
2174
else if (populated &&
2175
cpumask_subset(new_ecpus, child->effective_xcpus))
2176
child->prs_err = PERR_NOCPUS;
2177
2178
if (child->prs_err) {
2179
int old_prs = child->partition_root_state;
2180
2181
/*
2182
* Invalidate child partition
2183
*/
2184
spin_lock_irq(&callback_lock);
2185
make_partition_invalid(child);
2186
spin_unlock_irq(&callback_lock);
2187
notify_partition_change(child, old_prs);
2188
continue;
2189
}
2190
cpumask_andnot(new_ecpus, new_ecpus,
2191
child->effective_xcpus);
2192
}
2193
rcu_read_unlock();
2194
}
2195
2196
/*
2197
* update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
2198
* @cs: the cpuset to consider
2199
* @tmp: temp variables for calculating effective_cpus & partition setup
2200
* @force: don't skip any descendant cpusets if set
2201
*
2202
* When configured cpumask is changed, the effective cpumasks of this cpuset
2203
* and all its descendants need to be updated.
2204
*
2205
* On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
2206
*
2207
* Called with cpuset_mutex held
2208
*/
2209
static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
2210
bool force)
2211
{
2212
struct cpuset *cp;
2213
struct cgroup_subsys_state *pos_css;
2214
int old_prs, new_prs;
2215
2216
rcu_read_lock();
2217
cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2218
struct cpuset *parent = parent_cs(cp);
2219
bool remote = is_remote_partition(cp);
2220
bool update_parent = false;
2221
2222
old_prs = new_prs = cp->partition_root_state;
2223
2224
/*
2225
* For child remote partition root (!= cs), we need to call
2226
* remote_cpus_update() if effective_xcpus will be changed.
2227
* Otherwise, we can skip the whole subtree.
2228
*
2229
* remote_cpus_update() will reuse tmp->new_cpus only after
2230
* its value is being processed.
2231
*/
2232
if (remote && (cp != cs)) {
2233
compute_excpus(cp, tmp->new_cpus);
2234
if (cpumask_equal(cp->effective_xcpus, tmp->new_cpus)) {
2235
pos_css = css_rightmost_descendant(pos_css);
2236
continue;
2237
}
2238
rcu_read_unlock();
2239
remote_cpus_update(cp, NULL, tmp->new_cpus, tmp);
2240
rcu_read_lock();
2241
2242
/* Remote partition may be invalidated */
2243
new_prs = cp->partition_root_state;
2244
remote = (new_prs == old_prs);
2245
}
2246
2247
if (remote || (is_partition_valid(parent) && is_partition_valid(cp)))
2248
compute_partition_effective_cpumask(cp, tmp->new_cpus);
2249
else
2250
compute_effective_cpumask(tmp->new_cpus, cp, parent);
2251
2252
if (remote)
2253
goto get_css; /* Ready to update cpuset data */
2254
2255
/*
2256
* A partition with no effective_cpus is allowed as long as
2257
* there is no task associated with it. Call
2258
* update_parent_effective_cpumask() to check it.
2259
*/
2260
if (is_partition_valid(cp) && cpumask_empty(tmp->new_cpus)) {
2261
update_parent = true;
2262
goto update_parent_effective;
2263
}
2264
2265
/*
2266
* If it becomes empty, inherit the effective mask of the
2267
* parent, which is guaranteed to have some CPUs unless
2268
* it is a partition root that has explicitly distributed
2269
* out all its CPUs.
2270
*/
2271
if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus))
2272
cpumask_copy(tmp->new_cpus, parent->effective_cpus);
2273
2274
/*
2275
* Skip the whole subtree if
2276
* 1) the cpumask remains the same,
2277
* 2) has no partition root state,
2278
* 3) force flag not set, and
2279
* 4) for v2 load balance state same as its parent.
2280
*/
2281
if (!cp->partition_root_state && !force &&
2282
cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
2283
(!cpuset_v2() ||
2284
(is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
2285
pos_css = css_rightmost_descendant(pos_css);
2286
continue;
2287
}
2288
2289
update_parent_effective:
2290
/*
2291
* update_parent_effective_cpumask() should have been called
2292
* for cs already in update_cpumask(). We should also call
2293
* cpuset_update_tasks_cpumask() again for tasks in the parent
2294
* cpuset if the parent's effective_cpus changes.
2295
*/
2296
if ((cp != cs) && old_prs) {
2297
switch (parent->partition_root_state) {
2298
case PRS_ROOT:
2299
case PRS_ISOLATED:
2300
update_parent = true;
2301
break;
2302
2303
default:
2304
/*
2305
* When parent is not a partition root or is
2306
* invalid, child partition roots become
2307
* invalid too.
2308
*/
2309
if (is_partition_valid(cp))
2310
new_prs = -cp->partition_root_state;
2311
WRITE_ONCE(cp->prs_err,
2312
is_partition_invalid(parent)
2313
? PERR_INVPARENT : PERR_NOTPART);
2314
break;
2315
}
2316
}
2317
get_css:
2318
if (!css_tryget_online(&cp->css))
2319
continue;
2320
rcu_read_unlock();
2321
2322
if (update_parent) {
2323
update_parent_effective_cpumask(cp, partcmd_update, NULL, tmp);
2324
/*
2325
* The cpuset partition_root_state may become
2326
* invalid. Capture it.
2327
*/
2328
new_prs = cp->partition_root_state;
2329
}
2330
2331
spin_lock_irq(&callback_lock);
2332
cpumask_copy(cp->effective_cpus, tmp->new_cpus);
2333
cp->partition_root_state = new_prs;
2334
if (!cpumask_empty(cp->exclusive_cpus) && (cp != cs))
2335
compute_excpus(cp, cp->effective_xcpus);
2336
2337
/*
2338
* Make sure effective_xcpus is properly set for a valid
2339
* partition root.
2340
*/
2341
if ((new_prs > 0) && cpumask_empty(cp->exclusive_cpus))
2342
cpumask_and(cp->effective_xcpus,
2343
cp->cpus_allowed, parent->effective_xcpus);
2344
else if (new_prs < 0)
2345
reset_partition_data(cp);
2346
spin_unlock_irq(&callback_lock);
2347
2348
notify_partition_change(cp, old_prs);
2349
2350
WARN_ON(!is_in_v2_mode() &&
2351
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
2352
2353
cpuset_update_tasks_cpumask(cp, cp->effective_cpus);
2354
2355
/*
2356
* On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
2357
* from parent if current cpuset isn't a valid partition root
2358
* and their load balance states differ.
2359
*/
2360
if (cpuset_v2() && !is_partition_valid(cp) &&
2361
(is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
2362
if (is_sched_load_balance(parent))
2363
set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2364
else
2365
clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2366
}
2367
2368
/*
2369
* On legacy hierarchy, if the effective cpumask of any non-
2370
* empty cpuset is changed, we need to rebuild sched domains.
2371
* On default hierarchy, the cpuset needs to be a partition
2372
* root as well.
2373
*/
2374
if (!cpumask_empty(cp->cpus_allowed) &&
2375
is_sched_load_balance(cp) &&
2376
(!cpuset_v2() || is_partition_valid(cp)))
2377
cpuset_force_rebuild();
2378
2379
rcu_read_lock();
2380
css_put(&cp->css);
2381
}
2382
rcu_read_unlock();
2383
}
2384
2385
/**
2386
* update_sibling_cpumasks - Update siblings cpumasks
2387
* @parent: Parent cpuset
2388
* @cs: Current cpuset
2389
* @tmp: Temp variables
2390
*/
2391
static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
2392
struct tmpmasks *tmp)
2393
{
2394
struct cpuset *sibling;
2395
struct cgroup_subsys_state *pos_css;
2396
2397
lockdep_assert_held(&cpuset_mutex);
2398
2399
/*
2400
* Check all its siblings and call update_cpumasks_hier()
2401
* if their effective_cpus will need to be changed.
2402
*
2403
* It is possible a change in parent's effective_cpus
2404
* due to a change in a child partition's effective_xcpus will impact
2405
* its siblings even if they do not inherit parent's effective_cpus
2406
* directly.
2407
*
2408
* The update_cpumasks_hier() function may sleep. So we have to
2409
* release the RCU read lock before calling it.
2410
*/
2411
rcu_read_lock();
2412
cpuset_for_each_child(sibling, pos_css, parent) {
2413
if (sibling == cs)
2414
continue;
2415
if (!is_partition_valid(sibling)) {
2416
compute_effective_cpumask(tmp->new_cpus, sibling,
2417
parent);
2418
if (cpumask_equal(tmp->new_cpus, sibling->effective_cpus))
2419
continue;
2420
} else if (is_remote_partition(sibling)) {
2421
/*
2422
* Change in a sibling cpuset won't affect a remote
2423
* partition root.
2424
*/
2425
continue;
2426
}
2427
2428
if (!css_tryget_online(&sibling->css))
2429
continue;
2430
2431
rcu_read_unlock();
2432
update_cpumasks_hier(sibling, tmp, false);
2433
rcu_read_lock();
2434
css_put(&sibling->css);
2435
}
2436
rcu_read_unlock();
2437
}
2438
2439
static int parse_cpuset_cpulist(const char *buf, struct cpumask *out_mask)
2440
{
2441
int retval;
2442
2443
retval = cpulist_parse(buf, out_mask);
2444
if (retval < 0)
2445
return retval;
2446
if (!cpumask_subset(out_mask, top_cpuset.cpus_allowed))
2447
return -EINVAL;
2448
2449
return 0;
2450
}
2451
2452
/**
2453
* validate_partition - Validate a cpuset partition configuration
2454
* @cs: The cpuset to validate
2455
* @trialcs: The trial cpuset containing proposed configuration changes
2456
*
2457
* If any validation check fails, the appropriate error code is set in the
2458
* cpuset's prs_err field.
2459
*
2460
* Return: PRS error code (0 if valid, non-zero error code if invalid)
2461
*/
2462
static enum prs_errcode validate_partition(struct cpuset *cs, struct cpuset *trialcs)
2463
{
2464
struct cpuset *parent = parent_cs(cs);
2465
2466
if (cs_is_member(trialcs))
2467
return PERR_NONE;
2468
2469
if (cpumask_empty(trialcs->effective_xcpus))
2470
return PERR_INVCPUS;
2471
2472
if (prstate_housekeeping_conflict(trialcs->partition_root_state,
2473
trialcs->effective_xcpus))
2474
return PERR_HKEEPING;
2475
2476
if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus))
2477
return PERR_NOCPUS;
2478
2479
return PERR_NONE;
2480
}
2481
2482
static int cpus_allowed_validate_change(struct cpuset *cs, struct cpuset *trialcs,
2483
struct tmpmasks *tmp)
2484
{
2485
int retval;
2486
struct cpuset *parent = parent_cs(cs);
2487
2488
retval = validate_change(cs, trialcs);
2489
2490
if ((retval == -EINVAL) && cpuset_v2()) {
2491
struct cgroup_subsys_state *css;
2492
struct cpuset *cp;
2493
2494
/*
2495
* The -EINVAL error code indicates that partition sibling
2496
* CPU exclusivity rule has been violated. We still allow
2497
* the cpumask change to proceed while invalidating the
2498
* partition. However, any conflicting sibling partitions
2499
* have to be marked as invalid too.
2500
*/
2501
trialcs->prs_err = PERR_NOTEXCL;
2502
rcu_read_lock();
2503
cpuset_for_each_child(cp, css, parent) {
2504
struct cpumask *xcpus = user_xcpus(trialcs);
2505
2506
if (is_partition_valid(cp) &&
2507
cpumask_intersects(xcpus, cp->effective_xcpus)) {
2508
rcu_read_unlock();
2509
update_parent_effective_cpumask(cp, partcmd_invalidate, NULL, tmp);
2510
rcu_read_lock();
2511
}
2512
}
2513
rcu_read_unlock();
2514
retval = 0;
2515
}
2516
return retval;
2517
}
2518
2519
/**
2520
* partition_cpus_change - Handle partition state changes due to CPU mask updates
2521
* @cs: The target cpuset being modified
2522
* @trialcs: The trial cpuset containing proposed configuration changes
2523
* @tmp: Temporary masks for intermediate calculations
2524
*
2525
* This function handles partition state transitions triggered by CPU mask changes.
2526
* CPU modifications may cause a partition to be disabled or require state updates.
2527
*/
2528
static void partition_cpus_change(struct cpuset *cs, struct cpuset *trialcs,
2529
struct tmpmasks *tmp)
2530
{
2531
enum prs_errcode prs_err;
2532
2533
if (cs_is_member(cs))
2534
return;
2535
2536
prs_err = validate_partition(cs, trialcs);
2537
if (prs_err)
2538
trialcs->prs_err = cs->prs_err = prs_err;
2539
2540
if (is_remote_partition(cs)) {
2541
if (trialcs->prs_err)
2542
remote_partition_disable(cs, tmp);
2543
else
2544
remote_cpus_update(cs, trialcs->exclusive_cpus,
2545
trialcs->effective_xcpus, tmp);
2546
} else {
2547
if (trialcs->prs_err)
2548
update_parent_effective_cpumask(cs, partcmd_invalidate,
2549
NULL, tmp);
2550
else
2551
update_parent_effective_cpumask(cs, partcmd_update,
2552
trialcs->effective_xcpus, tmp);
2553
}
2554
}
2555
2556
/**
2557
* update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
2558
* @cs: the cpuset to consider
2559
* @trialcs: trial cpuset
2560
* @buf: buffer of cpu numbers written to this cpuset
2561
*/
2562
static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2563
const char *buf)
2564
{
2565
int retval;
2566
struct tmpmasks tmp;
2567
bool force = false;
2568
int old_prs = cs->partition_root_state;
2569
2570
retval = parse_cpuset_cpulist(buf, trialcs->cpus_allowed);
2571
if (retval < 0)
2572
return retval;
2573
2574
/* Nothing to do if the cpus didn't change */
2575
if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
2576
return 0;
2577
2578
if (alloc_tmpmasks(&tmp))
2579
return -ENOMEM;
2580
2581
compute_trialcs_excpus(trialcs, cs);
2582
trialcs->prs_err = PERR_NONE;
2583
2584
retval = cpus_allowed_validate_change(cs, trialcs, &tmp);
2585
if (retval < 0)
2586
goto out_free;
2587
2588
/*
2589
* Check all the descendants in update_cpumasks_hier() if
2590
* effective_xcpus is to be changed.
2591
*/
2592
force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
2593
2594
partition_cpus_change(cs, trialcs, &tmp);
2595
2596
spin_lock_irq(&callback_lock);
2597
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
2598
cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2599
if ((old_prs > 0) && !is_partition_valid(cs))
2600
reset_partition_data(cs);
2601
spin_unlock_irq(&callback_lock);
2602
2603
/* effective_cpus/effective_xcpus will be updated here */
2604
update_cpumasks_hier(cs, &tmp, force);
2605
2606
/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2607
if (cs->partition_root_state)
2608
update_partition_sd_lb(cs, old_prs);
2609
out_free:
2610
free_tmpmasks(&tmp);
2611
return retval;
2612
}
2613
2614
/**
2615
* update_exclusive_cpumask - update the exclusive_cpus mask of a cpuset
2616
* @cs: the cpuset to consider
2617
* @trialcs: trial cpuset
2618
* @buf: buffer of cpu numbers written to this cpuset
2619
*
2620
* The tasks' cpumask will be updated if cs is a valid partition root.
2621
*/
2622
static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2623
const char *buf)
2624
{
2625
int retval;
2626
struct tmpmasks tmp;
2627
bool force = false;
2628
int old_prs = cs->partition_root_state;
2629
2630
retval = parse_cpuset_cpulist(buf, trialcs->exclusive_cpus);
2631
if (retval < 0)
2632
return retval;
2633
2634
/* Nothing to do if the CPUs didn't change */
2635
if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus))
2636
return 0;
2637
2638
/*
2639
* Reject the change if there is exclusive CPUs conflict with
2640
* the siblings.
2641
*/
2642
if (compute_trialcs_excpus(trialcs, cs))
2643
return -EINVAL;
2644
2645
/*
2646
* Check all the descendants in update_cpumasks_hier() if
2647
* effective_xcpus is to be changed.
2648
*/
2649
force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
2650
2651
retval = validate_change(cs, trialcs);
2652
if (retval)
2653
return retval;
2654
2655
if (alloc_tmpmasks(&tmp))
2656
return -ENOMEM;
2657
2658
trialcs->prs_err = PERR_NONE;
2659
partition_cpus_change(cs, trialcs, &tmp);
2660
2661
spin_lock_irq(&callback_lock);
2662
cpumask_copy(cs->exclusive_cpus, trialcs->exclusive_cpus);
2663
cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2664
if ((old_prs > 0) && !is_partition_valid(cs))
2665
reset_partition_data(cs);
2666
spin_unlock_irq(&callback_lock);
2667
2668
/*
2669
* Call update_cpumasks_hier() to update effective_cpus/effective_xcpus
2670
* of the subtree when it is a valid partition root or effective_xcpus
2671
* is updated.
2672
*/
2673
if (is_partition_valid(cs) || force)
2674
update_cpumasks_hier(cs, &tmp, force);
2675
2676
/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2677
if (cs->partition_root_state)
2678
update_partition_sd_lb(cs, old_prs);
2679
2680
free_tmpmasks(&tmp);
2681
return 0;
2682
}
2683
2684
/*
2685
* Migrate memory region from one set of nodes to another. This is
2686
* performed asynchronously as it can be called from process migration path
2687
* holding locks involved in process management. All mm migrations are
2688
* performed in the queued order and can be waited for by flushing
2689
* cpuset_migrate_mm_wq.
2690
*/
2691
2692
struct cpuset_migrate_mm_work {
2693
struct work_struct work;
2694
struct mm_struct *mm;
2695
nodemask_t from;
2696
nodemask_t to;
2697
};
2698
2699
static void cpuset_migrate_mm_workfn(struct work_struct *work)
2700
{
2701
struct cpuset_migrate_mm_work *mwork =
2702
container_of(work, struct cpuset_migrate_mm_work, work);
2703
2704
/* on a wq worker, no need to worry about %current's mems_allowed */
2705
do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
2706
mmput(mwork->mm);
2707
kfree(mwork);
2708
}
2709
2710
static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
2711
const nodemask_t *to)
2712
{
2713
struct cpuset_migrate_mm_work *mwork;
2714
2715
if (nodes_equal(*from, *to)) {
2716
mmput(mm);
2717
return;
2718
}
2719
2720
mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
2721
if (mwork) {
2722
mwork->mm = mm;
2723
mwork->from = *from;
2724
mwork->to = *to;
2725
INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
2726
queue_work(cpuset_migrate_mm_wq, &mwork->work);
2727
} else {
2728
mmput(mm);
2729
}
2730
}
2731
2732
static void flush_migrate_mm_task_workfn(struct callback_head *head)
2733
{
2734
flush_workqueue(cpuset_migrate_mm_wq);
2735
kfree(head);
2736
}
2737
2738
static void schedule_flush_migrate_mm(void)
2739
{
2740
struct callback_head *flush_cb;
2741
2742
flush_cb = kzalloc(sizeof(struct callback_head), GFP_KERNEL);
2743
if (!flush_cb)
2744
return;
2745
2746
init_task_work(flush_cb, flush_migrate_mm_task_workfn);
2747
2748
if (task_work_add(current, flush_cb, TWA_RESUME))
2749
kfree(flush_cb);
2750
}
2751
2752
/*
2753
* cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
2754
* @tsk: the task to change
2755
* @newmems: new nodes that the task will be set
2756
*
2757
* We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
2758
* and rebind an eventual tasks' mempolicy. If the task is allocating in
2759
* parallel, it might temporarily see an empty intersection, which results in
2760
* a seqlock check and retry before OOM or allocation failure.
2761
*/
2762
static void cpuset_change_task_nodemask(struct task_struct *tsk,
2763
nodemask_t *newmems)
2764
{
2765
task_lock(tsk);
2766
2767
local_irq_disable();
2768
write_seqcount_begin(&tsk->mems_allowed_seq);
2769
2770
nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
2771
mpol_rebind_task(tsk, newmems);
2772
tsk->mems_allowed = *newmems;
2773
2774
write_seqcount_end(&tsk->mems_allowed_seq);
2775
local_irq_enable();
2776
2777
task_unlock(tsk);
2778
}
2779
2780
static void *cpuset_being_rebound;
2781
2782
/**
2783
* cpuset_update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
2784
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
2785
*
2786
* Iterate through each task of @cs updating its mems_allowed to the
2787
* effective cpuset's. As this function is called with cpuset_mutex held,
2788
* cpuset membership stays stable.
2789
*/
2790
void cpuset_update_tasks_nodemask(struct cpuset *cs)
2791
{
2792
static nodemask_t newmems; /* protected by cpuset_mutex */
2793
struct css_task_iter it;
2794
struct task_struct *task;
2795
2796
cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
2797
2798
guarantee_online_mems(cs, &newmems);
2799
2800
/*
2801
* The mpol_rebind_mm() call takes mmap_lock, which we couldn't
2802
* take while holding tasklist_lock. Forks can happen - the
2803
* mpol_dup() cpuset_being_rebound check will catch such forks,
2804
* and rebind their vma mempolicies too. Because we still hold
2805
* the global cpuset_mutex, we know that no other rebind effort
2806
* will be contending for the global variable cpuset_being_rebound.
2807
* It's ok if we rebind the same mm twice; mpol_rebind_mm()
2808
* is idempotent. Also migrate pages in each mm to new nodes.
2809
*/
2810
css_task_iter_start(&cs->css, 0, &it);
2811
while ((task = css_task_iter_next(&it))) {
2812
struct mm_struct *mm;
2813
bool migrate;
2814
2815
cpuset_change_task_nodemask(task, &newmems);
2816
2817
mm = get_task_mm(task);
2818
if (!mm)
2819
continue;
2820
2821
migrate = is_memory_migrate(cs);
2822
2823
mpol_rebind_mm(mm, &cs->mems_allowed);
2824
if (migrate)
2825
cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
2826
else
2827
mmput(mm);
2828
}
2829
css_task_iter_end(&it);
2830
2831
/*
2832
* All the tasks' nodemasks have been updated, update
2833
* cs->old_mems_allowed.
2834
*/
2835
cs->old_mems_allowed = newmems;
2836
2837
/* We're done rebinding vmas to this cpuset's new mems_allowed. */
2838
cpuset_being_rebound = NULL;
2839
}
2840
2841
/*
2842
* update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
2843
* @cs: the cpuset to consider
2844
* @new_mems: a temp variable for calculating new effective_mems
2845
*
2846
* When configured nodemask is changed, the effective nodemasks of this cpuset
2847
* and all its descendants need to be updated.
2848
*
2849
* On legacy hierarchy, effective_mems will be the same with mems_allowed.
2850
*
2851
* Called with cpuset_mutex held
2852
*/
2853
static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
2854
{
2855
struct cpuset *cp;
2856
struct cgroup_subsys_state *pos_css;
2857
2858
rcu_read_lock();
2859
cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2860
struct cpuset *parent = parent_cs(cp);
2861
2862
nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
2863
2864
/*
2865
* If it becomes empty, inherit the effective mask of the
2866
* parent, which is guaranteed to have some MEMs.
2867
*/
2868
if (is_in_v2_mode() && nodes_empty(*new_mems))
2869
*new_mems = parent->effective_mems;
2870
2871
/* Skip the whole subtree if the nodemask remains the same. */
2872
if (nodes_equal(*new_mems, cp->effective_mems)) {
2873
pos_css = css_rightmost_descendant(pos_css);
2874
continue;
2875
}
2876
2877
if (!css_tryget_online(&cp->css))
2878
continue;
2879
rcu_read_unlock();
2880
2881
spin_lock_irq(&callback_lock);
2882
cp->effective_mems = *new_mems;
2883
spin_unlock_irq(&callback_lock);
2884
2885
WARN_ON(!is_in_v2_mode() &&
2886
!nodes_equal(cp->mems_allowed, cp->effective_mems));
2887
2888
cpuset_update_tasks_nodemask(cp);
2889
2890
rcu_read_lock();
2891
css_put(&cp->css);
2892
}
2893
rcu_read_unlock();
2894
}
2895
2896
/*
2897
* Handle user request to change the 'mems' memory placement
2898
* of a cpuset. Needs to validate the request, update the
2899
* cpusets mems_allowed, and for each task in the cpuset,
2900
* update mems_allowed and rebind task's mempolicy and any vma
2901
* mempolicies and if the cpuset is marked 'memory_migrate',
2902
* migrate the tasks pages to the new memory.
2903
*
2904
* Call with cpuset_mutex held. May take callback_lock during call.
2905
* Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2906
* lock each such tasks mm->mmap_lock, scan its vma's and rebind
2907
* their mempolicies to the cpusets new mems_allowed.
2908
*/
2909
static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
2910
const char *buf)
2911
{
2912
int retval;
2913
2914
/*
2915
* An empty mems_allowed is ok iff there are no tasks in the cpuset.
2916
* The validate_change() call ensures that cpusets with tasks have memory.
2917
*/
2918
retval = nodelist_parse(buf, trialcs->mems_allowed);
2919
if (retval < 0)
2920
return retval;
2921
2922
if (!nodes_subset(trialcs->mems_allowed,
2923
top_cpuset.mems_allowed))
2924
return -EINVAL;
2925
2926
/* No change? nothing to do */
2927
if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed))
2928
return 0;
2929
2930
retval = validate_change(cs, trialcs);
2931
if (retval < 0)
2932
return retval;
2933
2934
check_insane_mems_config(&trialcs->mems_allowed);
2935
2936
spin_lock_irq(&callback_lock);
2937
cs->mems_allowed = trialcs->mems_allowed;
2938
spin_unlock_irq(&callback_lock);
2939
2940
/* use trialcs->mems_allowed as a temp variable */
2941
update_nodemasks_hier(cs, &trialcs->mems_allowed);
2942
return 0;
2943
}
2944
2945
bool current_cpuset_is_being_rebound(void)
2946
{
2947
bool ret;
2948
2949
rcu_read_lock();
2950
ret = task_cs(current) == cpuset_being_rebound;
2951
rcu_read_unlock();
2952
2953
return ret;
2954
}
2955
2956
/*
2957
* cpuset_update_flag - read a 0 or a 1 in a file and update associated flag
2958
* bit: the bit to update (see cpuset_flagbits_t)
2959
* cs: the cpuset to update
2960
* turning_on: whether the flag is being set or cleared
2961
*
2962
* Call with cpuset_mutex held.
2963
*/
2964
2965
int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
2966
int turning_on)
2967
{
2968
struct cpuset *trialcs;
2969
int balance_flag_changed;
2970
int spread_flag_changed;
2971
int err;
2972
2973
trialcs = dup_or_alloc_cpuset(cs);
2974
if (!trialcs)
2975
return -ENOMEM;
2976
2977
if (turning_on)
2978
set_bit(bit, &trialcs->flags);
2979
else
2980
clear_bit(bit, &trialcs->flags);
2981
2982
err = validate_change(cs, trialcs);
2983
if (err < 0)
2984
goto out;
2985
2986
balance_flag_changed = (is_sched_load_balance(cs) !=
2987
is_sched_load_balance(trialcs));
2988
2989
spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
2990
|| (is_spread_page(cs) != is_spread_page(trialcs)));
2991
2992
spin_lock_irq(&callback_lock);
2993
cs->flags = trialcs->flags;
2994
spin_unlock_irq(&callback_lock);
2995
2996
if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) {
2997
if (cpuset_v2())
2998
cpuset_force_rebuild();
2999
else
3000
rebuild_sched_domains_locked();
3001
}
3002
3003
if (spread_flag_changed)
3004
cpuset1_update_tasks_flags(cs);
3005
out:
3006
free_cpuset(trialcs);
3007
return err;
3008
}
3009
3010
/**
3011
* update_prstate - update partition_root_state
3012
* @cs: the cpuset to update
3013
* @new_prs: new partition root state
3014
* Return: 0 if successful, != 0 if error
3015
*
3016
* Call with cpuset_mutex held.
3017
*/
3018
static int update_prstate(struct cpuset *cs, int new_prs)
3019
{
3020
int err = PERR_NONE, old_prs = cs->partition_root_state;
3021
struct cpuset *parent = parent_cs(cs);
3022
struct tmpmasks tmpmask;
3023
bool isolcpus_updated = false;
3024
3025
if (old_prs == new_prs)
3026
return 0;
3027
3028
/*
3029
* Treat a previously invalid partition root as if it is a "member".
3030
*/
3031
if (new_prs && is_partition_invalid(cs))
3032
old_prs = PRS_MEMBER;
3033
3034
if (alloc_tmpmasks(&tmpmask))
3035
return -ENOMEM;
3036
3037
err = update_partition_exclusive_flag(cs, new_prs);
3038
if (err)
3039
goto out;
3040
3041
if (!old_prs) {
3042
/*
3043
* cpus_allowed and exclusive_cpus cannot be both empty.
3044
*/
3045
if (xcpus_empty(cs)) {
3046
err = PERR_CPUSEMPTY;
3047
goto out;
3048
}
3049
3050
/*
3051
* We don't support the creation of a new local partition with
3052
* a remote partition underneath it. This unsupported
3053
* setting can happen only if parent is the top_cpuset because
3054
* a remote partition cannot be created underneath an existing
3055
* local or remote partition.
3056
*/
3057
if ((parent == &top_cpuset) &&
3058
cpumask_intersects(cs->exclusive_cpus, subpartitions_cpus)) {
3059
err = PERR_REMOTE;
3060
goto out;
3061
}
3062
3063
/*
3064
* If parent is valid partition, enable local partiion.
3065
* Otherwise, enable a remote partition.
3066
*/
3067
if (is_partition_valid(parent)) {
3068
enum partition_cmd cmd = (new_prs == PRS_ROOT)
3069
? partcmd_enable : partcmd_enablei;
3070
3071
err = update_parent_effective_cpumask(cs, cmd, NULL, &tmpmask);
3072
} else {
3073
err = remote_partition_enable(cs, new_prs, &tmpmask);
3074
}
3075
} else if (old_prs && new_prs) {
3076
/*
3077
* A change in load balance state only, no change in cpumasks.
3078
* Need to update isolated_cpus.
3079
*/
3080
if (((new_prs == PRS_ISOLATED) &&
3081
!isolated_cpus_can_update(cs->effective_xcpus, NULL)) ||
3082
prstate_housekeeping_conflict(new_prs, cs->effective_xcpus))
3083
err = PERR_HKEEPING;
3084
else
3085
isolcpus_updated = true;
3086
} else {
3087
/*
3088
* Switching back to member is always allowed even if it
3089
* disables child partitions.
3090
*/
3091
if (is_remote_partition(cs))
3092
remote_partition_disable(cs, &tmpmask);
3093
else
3094
update_parent_effective_cpumask(cs, partcmd_disable,
3095
NULL, &tmpmask);
3096
3097
/*
3098
* Invalidation of child partitions will be done in
3099
* update_cpumasks_hier().
3100
*/
3101
}
3102
out:
3103
/*
3104
* Make partition invalid & disable CS_CPU_EXCLUSIVE if an error
3105
* happens.
3106
*/
3107
if (err) {
3108
new_prs = -new_prs;
3109
update_partition_exclusive_flag(cs, new_prs);
3110
}
3111
3112
spin_lock_irq(&callback_lock);
3113
cs->partition_root_state = new_prs;
3114
WRITE_ONCE(cs->prs_err, err);
3115
if (!is_partition_valid(cs))
3116
reset_partition_data(cs);
3117
else if (isolcpus_updated)
3118
isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus);
3119
spin_unlock_irq(&callback_lock);
3120
update_isolation_cpumasks();
3121
3122
/* Force update if switching back to member & update effective_xcpus */
3123
update_cpumasks_hier(cs, &tmpmask, !new_prs);
3124
3125
/* A newly created partition must have effective_xcpus set */
3126
WARN_ON_ONCE(!old_prs && (new_prs > 0)
3127
&& cpumask_empty(cs->effective_xcpus));
3128
3129
/* Update sched domains and load balance flag */
3130
update_partition_sd_lb(cs, old_prs);
3131
3132
notify_partition_change(cs, old_prs);
3133
if (force_sd_rebuild)
3134
rebuild_sched_domains_locked();
3135
free_tmpmasks(&tmpmask);
3136
return 0;
3137
}
3138
3139
static struct cpuset *cpuset_attach_old_cs;
3140
3141
/*
3142
* Check to see if a cpuset can accept a new task
3143
* For v1, cpus_allowed and mems_allowed can't be empty.
3144
* For v2, effective_cpus can't be empty.
3145
* Note that in v1, effective_cpus = cpus_allowed.
3146
*/
3147
static int cpuset_can_attach_check(struct cpuset *cs)
3148
{
3149
if (cpumask_empty(cs->effective_cpus) ||
3150
(!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
3151
return -ENOSPC;
3152
return 0;
3153
}
3154
3155
static void reset_migrate_dl_data(struct cpuset *cs)
3156
{
3157
cs->nr_migrate_dl_tasks = 0;
3158
cs->sum_migrate_dl_bw = 0;
3159
}
3160
3161
/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
3162
static int cpuset_can_attach(struct cgroup_taskset *tset)
3163
{
3164
struct cgroup_subsys_state *css;
3165
struct cpuset *cs, *oldcs;
3166
struct task_struct *task;
3167
bool cpus_updated, mems_updated;
3168
int ret;
3169
3170
/* used later by cpuset_attach() */
3171
cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
3172
oldcs = cpuset_attach_old_cs;
3173
cs = css_cs(css);
3174
3175
mutex_lock(&cpuset_mutex);
3176
3177
/* Check to see if task is allowed in the cpuset */
3178
ret = cpuset_can_attach_check(cs);
3179
if (ret)
3180
goto out_unlock;
3181
3182
cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus);
3183
mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
3184
3185
cgroup_taskset_for_each(task, css, tset) {
3186
ret = task_can_attach(task);
3187
if (ret)
3188
goto out_unlock;
3189
3190
/*
3191
* Skip rights over task check in v2 when nothing changes,
3192
* migration permission derives from hierarchy ownership in
3193
* cgroup_procs_write_permission()).
3194
*/
3195
if (!cpuset_v2() || (cpus_updated || mems_updated)) {
3196
ret = security_task_setscheduler(task);
3197
if (ret)
3198
goto out_unlock;
3199
}
3200
3201
if (dl_task(task)) {
3202
cs->nr_migrate_dl_tasks++;
3203
cs->sum_migrate_dl_bw += task->dl.dl_bw;
3204
}
3205
}
3206
3207
if (!cs->nr_migrate_dl_tasks)
3208
goto out_success;
3209
3210
if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
3211
int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
3212
3213
if (unlikely(cpu >= nr_cpu_ids)) {
3214
reset_migrate_dl_data(cs);
3215
ret = -EINVAL;
3216
goto out_unlock;
3217
}
3218
3219
ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
3220
if (ret) {
3221
reset_migrate_dl_data(cs);
3222
goto out_unlock;
3223
}
3224
}
3225
3226
out_success:
3227
/*
3228
* Mark attach is in progress. This makes validate_change() fail
3229
* changes which zero cpus/mems_allowed.
3230
*/
3231
cs->attach_in_progress++;
3232
out_unlock:
3233
mutex_unlock(&cpuset_mutex);
3234
return ret;
3235
}
3236
3237
static void cpuset_cancel_attach(struct cgroup_taskset *tset)
3238
{
3239
struct cgroup_subsys_state *css;
3240
struct cpuset *cs;
3241
3242
cgroup_taskset_first(tset, &css);
3243
cs = css_cs(css);
3244
3245
mutex_lock(&cpuset_mutex);
3246
dec_attach_in_progress_locked(cs);
3247
3248
if (cs->nr_migrate_dl_tasks) {
3249
int cpu = cpumask_any(cs->effective_cpus);
3250
3251
dl_bw_free(cpu, cs->sum_migrate_dl_bw);
3252
reset_migrate_dl_data(cs);
3253
}
3254
3255
mutex_unlock(&cpuset_mutex);
3256
}
3257
3258
/*
3259
* Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task()
3260
* but we can't allocate it dynamically there. Define it global and
3261
* allocate from cpuset_init().
3262
*/
3263
static cpumask_var_t cpus_attach;
3264
static nodemask_t cpuset_attach_nodemask_to;
3265
3266
static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
3267
{
3268
lockdep_assert_held(&cpuset_mutex);
3269
3270
if (cs != &top_cpuset)
3271
guarantee_active_cpus(task, cpus_attach);
3272
else
3273
cpumask_andnot(cpus_attach, task_cpu_possible_mask(task),
3274
subpartitions_cpus);
3275
/*
3276
* can_attach beforehand should guarantee that this doesn't
3277
* fail. TODO: have a better way to handle failure here
3278
*/
3279
WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
3280
3281
cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
3282
cpuset1_update_task_spread_flags(cs, task);
3283
}
3284
3285
static void cpuset_attach(struct cgroup_taskset *tset)
3286
{
3287
struct task_struct *task;
3288
struct task_struct *leader;
3289
struct cgroup_subsys_state *css;
3290
struct cpuset *cs;
3291
struct cpuset *oldcs = cpuset_attach_old_cs;
3292
bool cpus_updated, mems_updated;
3293
bool queue_task_work = false;
3294
3295
cgroup_taskset_first(tset, &css);
3296
cs = css_cs(css);
3297
3298
lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
3299
mutex_lock(&cpuset_mutex);
3300
cpus_updated = !cpumask_equal(cs->effective_cpus,
3301
oldcs->effective_cpus);
3302
mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
3303
3304
/*
3305
* In the default hierarchy, enabling cpuset in the child cgroups
3306
* will trigger a number of cpuset_attach() calls with no change
3307
* in effective cpus and mems. In that case, we can optimize out
3308
* by skipping the task iteration and update.
3309
*/
3310
if (cpuset_v2() && !cpus_updated && !mems_updated) {
3311
cpuset_attach_nodemask_to = cs->effective_mems;
3312
goto out;
3313
}
3314
3315
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3316
3317
cgroup_taskset_for_each(task, css, tset)
3318
cpuset_attach_task(cs, task);
3319
3320
/*
3321
* Change mm for all threadgroup leaders. This is expensive and may
3322
* sleep and should be moved outside migration path proper. Skip it
3323
* if there is no change in effective_mems and CS_MEMORY_MIGRATE is
3324
* not set.
3325
*/
3326
cpuset_attach_nodemask_to = cs->effective_mems;
3327
if (!is_memory_migrate(cs) && !mems_updated)
3328
goto out;
3329
3330
cgroup_taskset_for_each_leader(leader, css, tset) {
3331
struct mm_struct *mm = get_task_mm(leader);
3332
3333
if (mm) {
3334
mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
3335
3336
/*
3337
* old_mems_allowed is the same with mems_allowed
3338
* here, except if this task is being moved
3339
* automatically due to hotplug. In that case
3340
* @mems_allowed has been updated and is empty, so
3341
* @old_mems_allowed is the right nodesets that we
3342
* migrate mm from.
3343
*/
3344
if (is_memory_migrate(cs)) {
3345
cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
3346
&cpuset_attach_nodemask_to);
3347
queue_task_work = true;
3348
} else
3349
mmput(mm);
3350
}
3351
}
3352
3353
out:
3354
if (queue_task_work)
3355
schedule_flush_migrate_mm();
3356
cs->old_mems_allowed = cpuset_attach_nodemask_to;
3357
3358
if (cs->nr_migrate_dl_tasks) {
3359
cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
3360
oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
3361
reset_migrate_dl_data(cs);
3362
}
3363
3364
dec_attach_in_progress_locked(cs);
3365
3366
mutex_unlock(&cpuset_mutex);
3367
}
3368
3369
/*
3370
* Common handling for a write to a "cpus" or "mems" file.
3371
*/
3372
ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
3373
char *buf, size_t nbytes, loff_t off)
3374
{
3375
struct cpuset *cs = css_cs(of_css(of));
3376
struct cpuset *trialcs;
3377
int retval = -ENODEV;
3378
3379
/* root is read-only */
3380
if (cs == &top_cpuset)
3381
return -EACCES;
3382
3383
buf = strstrip(buf);
3384
cpuset_full_lock();
3385
if (!is_cpuset_online(cs))
3386
goto out_unlock;
3387
3388
trialcs = dup_or_alloc_cpuset(cs);
3389
if (!trialcs) {
3390
retval = -ENOMEM;
3391
goto out_unlock;
3392
}
3393
3394
switch (of_cft(of)->private) {
3395
case FILE_CPULIST:
3396
retval = update_cpumask(cs, trialcs, buf);
3397
break;
3398
case FILE_EXCLUSIVE_CPULIST:
3399
retval = update_exclusive_cpumask(cs, trialcs, buf);
3400
break;
3401
case FILE_MEMLIST:
3402
retval = update_nodemask(cs, trialcs, buf);
3403
break;
3404
default:
3405
retval = -EINVAL;
3406
break;
3407
}
3408
3409
free_cpuset(trialcs);
3410
if (force_sd_rebuild)
3411
rebuild_sched_domains_locked();
3412
out_unlock:
3413
cpuset_full_unlock();
3414
if (of_cft(of)->private == FILE_MEMLIST)
3415
schedule_flush_migrate_mm();
3416
return retval ?: nbytes;
3417
}
3418
3419
/*
3420
* These ascii lists should be read in a single call, by using a user
3421
* buffer large enough to hold the entire map. If read in smaller
3422
* chunks, there is no guarantee of atomicity. Since the display format
3423
* used, list of ranges of sequential numbers, is variable length,
3424
* and since these maps can change value dynamically, one could read
3425
* gibberish by doing partial reads while a list was changing.
3426
*/
3427
int cpuset_common_seq_show(struct seq_file *sf, void *v)
3428
{
3429
struct cpuset *cs = css_cs(seq_css(sf));
3430
cpuset_filetype_t type = seq_cft(sf)->private;
3431
int ret = 0;
3432
3433
spin_lock_irq(&callback_lock);
3434
3435
switch (type) {
3436
case FILE_CPULIST:
3437
seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
3438
break;
3439
case FILE_MEMLIST:
3440
seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
3441
break;
3442
case FILE_EFFECTIVE_CPULIST:
3443
seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
3444
break;
3445
case FILE_EFFECTIVE_MEMLIST:
3446
seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
3447
break;
3448
case FILE_EXCLUSIVE_CPULIST:
3449
seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->exclusive_cpus));
3450
break;
3451
case FILE_EFFECTIVE_XCPULIST:
3452
seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_xcpus));
3453
break;
3454
case FILE_SUBPARTS_CPULIST:
3455
seq_printf(sf, "%*pbl\n", cpumask_pr_args(subpartitions_cpus));
3456
break;
3457
case FILE_ISOLATED_CPULIST:
3458
seq_printf(sf, "%*pbl\n", cpumask_pr_args(isolated_cpus));
3459
break;
3460
default:
3461
ret = -EINVAL;
3462
}
3463
3464
spin_unlock_irq(&callback_lock);
3465
return ret;
3466
}
3467
3468
static int cpuset_partition_show(struct seq_file *seq, void *v)
3469
{
3470
struct cpuset *cs = css_cs(seq_css(seq));
3471
const char *err, *type = NULL;
3472
3473
switch (cs->partition_root_state) {
3474
case PRS_ROOT:
3475
seq_puts(seq, "root\n");
3476
break;
3477
case PRS_ISOLATED:
3478
seq_puts(seq, "isolated\n");
3479
break;
3480
case PRS_MEMBER:
3481
seq_puts(seq, "member\n");
3482
break;
3483
case PRS_INVALID_ROOT:
3484
type = "root";
3485
fallthrough;
3486
case PRS_INVALID_ISOLATED:
3487
if (!type)
3488
type = "isolated";
3489
err = perr_strings[READ_ONCE(cs->prs_err)];
3490
if (err)
3491
seq_printf(seq, "%s invalid (%s)\n", type, err);
3492
else
3493
seq_printf(seq, "%s invalid\n", type);
3494
break;
3495
}
3496
return 0;
3497
}
3498
3499
static ssize_t cpuset_partition_write(struct kernfs_open_file *of, char *buf,
3500
size_t nbytes, loff_t off)
3501
{
3502
struct cpuset *cs = css_cs(of_css(of));
3503
int val;
3504
int retval = -ENODEV;
3505
3506
buf = strstrip(buf);
3507
3508
if (!strcmp(buf, "root"))
3509
val = PRS_ROOT;
3510
else if (!strcmp(buf, "member"))
3511
val = PRS_MEMBER;
3512
else if (!strcmp(buf, "isolated"))
3513
val = PRS_ISOLATED;
3514
else
3515
return -EINVAL;
3516
3517
cpuset_full_lock();
3518
if (is_cpuset_online(cs))
3519
retval = update_prstate(cs, val);
3520
cpuset_full_unlock();
3521
return retval ?: nbytes;
3522
}
3523
3524
/*
3525
* This is currently a minimal set for the default hierarchy. It can be
3526
* expanded later on by migrating more features and control files from v1.
3527
*/
3528
static struct cftype dfl_files[] = {
3529
{
3530
.name = "cpus",
3531
.seq_show = cpuset_common_seq_show,
3532
.write = cpuset_write_resmask,
3533
.max_write_len = (100U + 6 * NR_CPUS),
3534
.private = FILE_CPULIST,
3535
.flags = CFTYPE_NOT_ON_ROOT,
3536
},
3537
3538
{
3539
.name = "mems",
3540
.seq_show = cpuset_common_seq_show,
3541
.write = cpuset_write_resmask,
3542
.max_write_len = (100U + 6 * MAX_NUMNODES),
3543
.private = FILE_MEMLIST,
3544
.flags = CFTYPE_NOT_ON_ROOT,
3545
},
3546
3547
{
3548
.name = "cpus.effective",
3549
.seq_show = cpuset_common_seq_show,
3550
.private = FILE_EFFECTIVE_CPULIST,
3551
},
3552
3553
{
3554
.name = "mems.effective",
3555
.seq_show = cpuset_common_seq_show,
3556
.private = FILE_EFFECTIVE_MEMLIST,
3557
},
3558
3559
{
3560
.name = "cpus.partition",
3561
.seq_show = cpuset_partition_show,
3562
.write = cpuset_partition_write,
3563
.private = FILE_PARTITION_ROOT,
3564
.flags = CFTYPE_NOT_ON_ROOT,
3565
.file_offset = offsetof(struct cpuset, partition_file),
3566
},
3567
3568
{
3569
.name = "cpus.exclusive",
3570
.seq_show = cpuset_common_seq_show,
3571
.write = cpuset_write_resmask,
3572
.max_write_len = (100U + 6 * NR_CPUS),
3573
.private = FILE_EXCLUSIVE_CPULIST,
3574
.flags = CFTYPE_NOT_ON_ROOT,
3575
},
3576
3577
{
3578
.name = "cpus.exclusive.effective",
3579
.seq_show = cpuset_common_seq_show,
3580
.private = FILE_EFFECTIVE_XCPULIST,
3581
.flags = CFTYPE_NOT_ON_ROOT,
3582
},
3583
3584
{
3585
.name = "cpus.subpartitions",
3586
.seq_show = cpuset_common_seq_show,
3587
.private = FILE_SUBPARTS_CPULIST,
3588
.flags = CFTYPE_ONLY_ON_ROOT | CFTYPE_DEBUG,
3589
},
3590
3591
{
3592
.name = "cpus.isolated",
3593
.seq_show = cpuset_common_seq_show,
3594
.private = FILE_ISOLATED_CPULIST,
3595
.flags = CFTYPE_ONLY_ON_ROOT,
3596
},
3597
3598
{ } /* terminate */
3599
};
3600
3601
3602
/**
3603
* cpuset_css_alloc - Allocate a cpuset css
3604
* @parent_css: Parent css of the control group that the new cpuset will be
3605
* part of
3606
* Return: cpuset css on success, -ENOMEM on failure.
3607
*
3608
* Allocate and initialize a new cpuset css, for non-NULL @parent_css, return
3609
* top cpuset css otherwise.
3610
*/
3611
static struct cgroup_subsys_state *
3612
cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
3613
{
3614
struct cpuset *cs;
3615
3616
if (!parent_css)
3617
return &top_cpuset.css;
3618
3619
cs = dup_or_alloc_cpuset(NULL);
3620
if (!cs)
3621
return ERR_PTR(-ENOMEM);
3622
3623
__set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3624
fmeter_init(&cs->fmeter);
3625
cs->relax_domain_level = -1;
3626
3627
/* Set CS_MEMORY_MIGRATE for default hierarchy */
3628
if (cpuset_v2())
3629
__set_bit(CS_MEMORY_MIGRATE, &cs->flags);
3630
3631
return &cs->css;
3632
}
3633
3634
static int cpuset_css_online(struct cgroup_subsys_state *css)
3635
{
3636
struct cpuset *cs = css_cs(css);
3637
struct cpuset *parent = parent_cs(cs);
3638
struct cpuset *tmp_cs;
3639
struct cgroup_subsys_state *pos_css;
3640
3641
if (!parent)
3642
return 0;
3643
3644
cpuset_full_lock();
3645
if (is_spread_page(parent))
3646
set_bit(CS_SPREAD_PAGE, &cs->flags);
3647
if (is_spread_slab(parent))
3648
set_bit(CS_SPREAD_SLAB, &cs->flags);
3649
/*
3650
* For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
3651
*/
3652
if (cpuset_v2() && !is_sched_load_balance(parent))
3653
clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3654
3655
cpuset_inc();
3656
3657
spin_lock_irq(&callback_lock);
3658
if (is_in_v2_mode()) {
3659
cpumask_copy(cs->effective_cpus, parent->effective_cpus);
3660
cs->effective_mems = parent->effective_mems;
3661
}
3662
spin_unlock_irq(&callback_lock);
3663
3664
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
3665
goto out_unlock;
3666
3667
/*
3668
* Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
3669
* set. This flag handling is implemented in cgroup core for
3670
* historical reasons - the flag may be specified during mount.
3671
*
3672
* Currently, if any sibling cpusets have exclusive cpus or mem, we
3673
* refuse to clone the configuration - thereby refusing the task to
3674
* be entered, and as a result refusing the sys_unshare() or
3675
* clone() which initiated it. If this becomes a problem for some
3676
* users who wish to allow that scenario, then this could be
3677
* changed to grant parent->cpus_allowed-sibling_cpus_exclusive
3678
* (and likewise for mems) to the new cgroup.
3679
*/
3680
rcu_read_lock();
3681
cpuset_for_each_child(tmp_cs, pos_css, parent) {
3682
if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
3683
rcu_read_unlock();
3684
goto out_unlock;
3685
}
3686
}
3687
rcu_read_unlock();
3688
3689
spin_lock_irq(&callback_lock);
3690
cs->mems_allowed = parent->mems_allowed;
3691
cs->effective_mems = parent->mems_allowed;
3692
cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
3693
cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
3694
spin_unlock_irq(&callback_lock);
3695
out_unlock:
3696
cpuset_full_unlock();
3697
return 0;
3698
}
3699
3700
/*
3701
* If the cpuset being removed has its flag 'sched_load_balance'
3702
* enabled, then simulate turning sched_load_balance off, which
3703
* will call rebuild_sched_domains_locked(). That is not needed
3704
* in the default hierarchy where only changes in partition
3705
* will cause repartitioning.
3706
*/
3707
static void cpuset_css_offline(struct cgroup_subsys_state *css)
3708
{
3709
struct cpuset *cs = css_cs(css);
3710
3711
cpuset_full_lock();
3712
if (!cpuset_v2() && is_sched_load_balance(cs))
3713
cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
3714
3715
cpuset_dec();
3716
cpuset_full_unlock();
3717
}
3718
3719
/*
3720
* If a dying cpuset has the 'cpus.partition' enabled, turn it off by
3721
* changing it back to member to free its exclusive CPUs back to the pool to
3722
* be used by other online cpusets.
3723
*/
3724
static void cpuset_css_killed(struct cgroup_subsys_state *css)
3725
{
3726
struct cpuset *cs = css_cs(css);
3727
3728
cpuset_full_lock();
3729
/* Reset valid partition back to member */
3730
if (is_partition_valid(cs))
3731
update_prstate(cs, PRS_MEMBER);
3732
cpuset_full_unlock();
3733
}
3734
3735
static void cpuset_css_free(struct cgroup_subsys_state *css)
3736
{
3737
struct cpuset *cs = css_cs(css);
3738
3739
free_cpuset(cs);
3740
}
3741
3742
static void cpuset_bind(struct cgroup_subsys_state *root_css)
3743
{
3744
mutex_lock(&cpuset_mutex);
3745
spin_lock_irq(&callback_lock);
3746
3747
if (is_in_v2_mode()) {
3748
cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
3749
cpumask_copy(top_cpuset.effective_xcpus, cpu_possible_mask);
3750
top_cpuset.mems_allowed = node_possible_map;
3751
} else {
3752
cpumask_copy(top_cpuset.cpus_allowed,
3753
top_cpuset.effective_cpus);
3754
top_cpuset.mems_allowed = top_cpuset.effective_mems;
3755
}
3756
3757
spin_unlock_irq(&callback_lock);
3758
mutex_unlock(&cpuset_mutex);
3759
}
3760
3761
/*
3762
* In case the child is cloned into a cpuset different from its parent,
3763
* additional checks are done to see if the move is allowed.
3764
*/
3765
static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
3766
{
3767
struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3768
bool same_cs;
3769
int ret;
3770
3771
rcu_read_lock();
3772
same_cs = (cs == task_cs(current));
3773
rcu_read_unlock();
3774
3775
if (same_cs)
3776
return 0;
3777
3778
lockdep_assert_held(&cgroup_mutex);
3779
mutex_lock(&cpuset_mutex);
3780
3781
/* Check to see if task is allowed in the cpuset */
3782
ret = cpuset_can_attach_check(cs);
3783
if (ret)
3784
goto out_unlock;
3785
3786
ret = task_can_attach(task);
3787
if (ret)
3788
goto out_unlock;
3789
3790
ret = security_task_setscheduler(task);
3791
if (ret)
3792
goto out_unlock;
3793
3794
/*
3795
* Mark attach is in progress. This makes validate_change() fail
3796
* changes which zero cpus/mems_allowed.
3797
*/
3798
cs->attach_in_progress++;
3799
out_unlock:
3800
mutex_unlock(&cpuset_mutex);
3801
return ret;
3802
}
3803
3804
static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
3805
{
3806
struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3807
bool same_cs;
3808
3809
rcu_read_lock();
3810
same_cs = (cs == task_cs(current));
3811
rcu_read_unlock();
3812
3813
if (same_cs)
3814
return;
3815
3816
dec_attach_in_progress(cs);
3817
}
3818
3819
/*
3820
* Make sure the new task conform to the current state of its parent,
3821
* which could have been changed by cpuset just after it inherits the
3822
* state from the parent and before it sits on the cgroup's task list.
3823
*/
3824
static void cpuset_fork(struct task_struct *task)
3825
{
3826
struct cpuset *cs;
3827
bool same_cs;
3828
3829
rcu_read_lock();
3830
cs = task_cs(task);
3831
same_cs = (cs == task_cs(current));
3832
rcu_read_unlock();
3833
3834
if (same_cs) {
3835
if (cs == &top_cpuset)
3836
return;
3837
3838
set_cpus_allowed_ptr(task, current->cpus_ptr);
3839
task->mems_allowed = current->mems_allowed;
3840
return;
3841
}
3842
3843
/* CLONE_INTO_CGROUP */
3844
mutex_lock(&cpuset_mutex);
3845
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3846
cpuset_attach_task(cs, task);
3847
3848
dec_attach_in_progress_locked(cs);
3849
mutex_unlock(&cpuset_mutex);
3850
}
3851
3852
struct cgroup_subsys cpuset_cgrp_subsys = {
3853
.css_alloc = cpuset_css_alloc,
3854
.css_online = cpuset_css_online,
3855
.css_offline = cpuset_css_offline,
3856
.css_killed = cpuset_css_killed,
3857
.css_free = cpuset_css_free,
3858
.can_attach = cpuset_can_attach,
3859
.cancel_attach = cpuset_cancel_attach,
3860
.attach = cpuset_attach,
3861
.bind = cpuset_bind,
3862
.can_fork = cpuset_can_fork,
3863
.cancel_fork = cpuset_cancel_fork,
3864
.fork = cpuset_fork,
3865
#ifdef CONFIG_CPUSETS_V1
3866
.legacy_cftypes = cpuset1_files,
3867
#endif
3868
.dfl_cftypes = dfl_files,
3869
.early_init = true,
3870
.threaded = true,
3871
};
3872
3873
/**
3874
* cpuset_init - initialize cpusets at system boot
3875
*
3876
* Description: Initialize top_cpuset
3877
**/
3878
3879
int __init cpuset_init(void)
3880
{
3881
BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
3882
BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
3883
BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_xcpus, GFP_KERNEL));
3884
BUG_ON(!alloc_cpumask_var(&top_cpuset.exclusive_cpus, GFP_KERNEL));
3885
BUG_ON(!zalloc_cpumask_var(&subpartitions_cpus, GFP_KERNEL));
3886
BUG_ON(!zalloc_cpumask_var(&isolated_cpus, GFP_KERNEL));
3887
3888
cpumask_setall(top_cpuset.cpus_allowed);
3889
nodes_setall(top_cpuset.mems_allowed);
3890
cpumask_setall(top_cpuset.effective_cpus);
3891
cpumask_setall(top_cpuset.effective_xcpus);
3892
cpumask_setall(top_cpuset.exclusive_cpus);
3893
nodes_setall(top_cpuset.effective_mems);
3894
3895
fmeter_init(&top_cpuset.fmeter);
3896
3897
BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
3898
3899
have_boot_isolcpus = housekeeping_enabled(HK_TYPE_DOMAIN);
3900
if (have_boot_isolcpus) {
3901
BUG_ON(!alloc_cpumask_var(&boot_hk_cpus, GFP_KERNEL));
3902
cpumask_copy(boot_hk_cpus, housekeeping_cpumask(HK_TYPE_DOMAIN));
3903
cpumask_andnot(isolated_cpus, cpu_possible_mask, boot_hk_cpus);
3904
}
3905
3906
return 0;
3907
}
3908
3909
static void
3910
hotplug_update_tasks(struct cpuset *cs,
3911
struct cpumask *new_cpus, nodemask_t *new_mems,
3912
bool cpus_updated, bool mems_updated)
3913
{
3914
/* A partition root is allowed to have empty effective cpus */
3915
if (cpumask_empty(new_cpus) && !is_partition_valid(cs))
3916
cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
3917
if (nodes_empty(*new_mems))
3918
*new_mems = parent_cs(cs)->effective_mems;
3919
3920
spin_lock_irq(&callback_lock);
3921
cpumask_copy(cs->effective_cpus, new_cpus);
3922
cs->effective_mems = *new_mems;
3923
spin_unlock_irq(&callback_lock);
3924
3925
if (cpus_updated)
3926
cpuset_update_tasks_cpumask(cs, new_cpus);
3927
if (mems_updated)
3928
cpuset_update_tasks_nodemask(cs);
3929
}
3930
3931
void cpuset_force_rebuild(void)
3932
{
3933
force_sd_rebuild = true;
3934
}
3935
3936
/**
3937
* cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3938
* @cs: cpuset in interest
3939
* @tmp: the tmpmasks structure pointer
3940
*
3941
* Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3942
* offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3943
* all its tasks are moved to the nearest ancestor with both resources.
3944
*/
3945
static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3946
{
3947
static cpumask_t new_cpus;
3948
static nodemask_t new_mems;
3949
bool cpus_updated;
3950
bool mems_updated;
3951
bool remote;
3952
int partcmd = -1;
3953
struct cpuset *parent;
3954
retry:
3955
wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
3956
3957
mutex_lock(&cpuset_mutex);
3958
3959
/*
3960
* We have raced with task attaching. We wait until attaching
3961
* is finished, so we won't attach a task to an empty cpuset.
3962
*/
3963
if (cs->attach_in_progress) {
3964
mutex_unlock(&cpuset_mutex);
3965
goto retry;
3966
}
3967
3968
parent = parent_cs(cs);
3969
compute_effective_cpumask(&new_cpus, cs, parent);
3970
nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3971
3972
if (!tmp || !cs->partition_root_state)
3973
goto update_tasks;
3974
3975
/*
3976
* Compute effective_cpus for valid partition root, may invalidate
3977
* child partition roots if necessary.
3978
*/
3979
remote = is_remote_partition(cs);
3980
if (remote || (is_partition_valid(cs) && is_partition_valid(parent)))
3981
compute_partition_effective_cpumask(cs, &new_cpus);
3982
3983
if (remote && (cpumask_empty(subpartitions_cpus) ||
3984
(cpumask_empty(&new_cpus) &&
3985
partition_is_populated(cs, NULL)))) {
3986
cs->prs_err = PERR_HOTPLUG;
3987
remote_partition_disable(cs, tmp);
3988
compute_effective_cpumask(&new_cpus, cs, parent);
3989
remote = false;
3990
}
3991
3992
/*
3993
* Force the partition to become invalid if either one of
3994
* the following conditions hold:
3995
* 1) empty effective cpus but not valid empty partition.
3996
* 2) parent is invalid or doesn't grant any cpus to child
3997
* partitions.
3998
* 3) subpartitions_cpus is empty.
3999
*/
4000
if (is_local_partition(cs) &&
4001
(!is_partition_valid(parent) ||
4002
tasks_nocpu_error(parent, cs, &new_cpus) ||
4003
cpumask_empty(subpartitions_cpus)))
4004
partcmd = partcmd_invalidate;
4005
/*
4006
* On the other hand, an invalid partition root may be transitioned
4007
* back to a regular one with a non-empty effective xcpus.
4008
*/
4009
else if (is_partition_valid(parent) && is_partition_invalid(cs) &&
4010
!cpumask_empty(cs->effective_xcpus))
4011
partcmd = partcmd_update;
4012
4013
if (partcmd >= 0) {
4014
update_parent_effective_cpumask(cs, partcmd, NULL, tmp);
4015
if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) {
4016
compute_partition_effective_cpumask(cs, &new_cpus);
4017
cpuset_force_rebuild();
4018
}
4019
}
4020
4021
update_tasks:
4022
cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
4023
mems_updated = !nodes_equal(new_mems, cs->effective_mems);
4024
if (!cpus_updated && !mems_updated)
4025
goto unlock; /* Hotplug doesn't affect this cpuset */
4026
4027
if (mems_updated)
4028
check_insane_mems_config(&new_mems);
4029
4030
if (is_in_v2_mode())
4031
hotplug_update_tasks(cs, &new_cpus, &new_mems,
4032
cpus_updated, mems_updated);
4033
else
4034
cpuset1_hotplug_update_tasks(cs, &new_cpus, &new_mems,
4035
cpus_updated, mems_updated);
4036
4037
unlock:
4038
mutex_unlock(&cpuset_mutex);
4039
}
4040
4041
/**
4042
* cpuset_handle_hotplug - handle CPU/memory hot{,un}plug for a cpuset
4043
*
4044
* This function is called after either CPU or memory configuration has
4045
* changed and updates cpuset accordingly. The top_cpuset is always
4046
* synchronized to cpu_active_mask and N_MEMORY, which is necessary in
4047
* order to make cpusets transparent (of no affect) on systems that are
4048
* actively using CPU hotplug but making no active use of cpusets.
4049
*
4050
* Non-root cpusets are only affected by offlining. If any CPUs or memory
4051
* nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
4052
* all descendants.
4053
*
4054
* Note that CPU offlining during suspend is ignored. We don't modify
4055
* cpusets across suspend/resume cycles at all.
4056
*
4057
* CPU / memory hotplug is handled synchronously.
4058
*/
4059
static void cpuset_handle_hotplug(void)
4060
{
4061
static cpumask_t new_cpus;
4062
static nodemask_t new_mems;
4063
bool cpus_updated, mems_updated;
4064
bool on_dfl = is_in_v2_mode();
4065
struct tmpmasks tmp, *ptmp = NULL;
4066
4067
if (on_dfl && !alloc_tmpmasks(&tmp))
4068
ptmp = &tmp;
4069
4070
lockdep_assert_cpus_held();
4071
mutex_lock(&cpuset_mutex);
4072
4073
/* fetch the available cpus/mems and find out which changed how */
4074
cpumask_copy(&new_cpus, cpu_active_mask);
4075
new_mems = node_states[N_MEMORY];
4076
4077
/*
4078
* If subpartitions_cpus is populated, it is likely that the check
4079
* below will produce a false positive on cpus_updated when the cpu
4080
* list isn't changed. It is extra work, but it is better to be safe.
4081
*/
4082
cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus) ||
4083
!cpumask_empty(subpartitions_cpus);
4084
mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
4085
4086
/* For v1, synchronize cpus_allowed to cpu_active_mask */
4087
if (cpus_updated) {
4088
cpuset_force_rebuild();
4089
spin_lock_irq(&callback_lock);
4090
if (!on_dfl)
4091
cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
4092
/*
4093
* Make sure that CPUs allocated to child partitions
4094
* do not show up in effective_cpus. If no CPU is left,
4095
* we clear the subpartitions_cpus & let the child partitions
4096
* fight for the CPUs again.
4097
*/
4098
if (!cpumask_empty(subpartitions_cpus)) {
4099
if (cpumask_subset(&new_cpus, subpartitions_cpus)) {
4100
cpumask_clear(subpartitions_cpus);
4101
} else {
4102
cpumask_andnot(&new_cpus, &new_cpus,
4103
subpartitions_cpus);
4104
}
4105
}
4106
cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
4107
spin_unlock_irq(&callback_lock);
4108
/* we don't mess with cpumasks of tasks in top_cpuset */
4109
}
4110
4111
/* synchronize mems_allowed to N_MEMORY */
4112
if (mems_updated) {
4113
spin_lock_irq(&callback_lock);
4114
if (!on_dfl)
4115
top_cpuset.mems_allowed = new_mems;
4116
top_cpuset.effective_mems = new_mems;
4117
spin_unlock_irq(&callback_lock);
4118
cpuset_update_tasks_nodemask(&top_cpuset);
4119
}
4120
4121
mutex_unlock(&cpuset_mutex);
4122
4123
/* if cpus or mems changed, we need to propagate to descendants */
4124
if (cpus_updated || mems_updated) {
4125
struct cpuset *cs;
4126
struct cgroup_subsys_state *pos_css;
4127
4128
rcu_read_lock();
4129
cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
4130
if (cs == &top_cpuset || !css_tryget_online(&cs->css))
4131
continue;
4132
rcu_read_unlock();
4133
4134
cpuset_hotplug_update_tasks(cs, ptmp);
4135
4136
rcu_read_lock();
4137
css_put(&cs->css);
4138
}
4139
rcu_read_unlock();
4140
}
4141
4142
/* rebuild sched domains if necessary */
4143
if (force_sd_rebuild)
4144
rebuild_sched_domains_cpuslocked();
4145
4146
free_tmpmasks(ptmp);
4147
}
4148
4149
void cpuset_update_active_cpus(void)
4150
{
4151
/*
4152
* We're inside cpu hotplug critical region which usually nests
4153
* inside cgroup synchronization. Bounce actual hotplug processing
4154
* to a work item to avoid reverse locking order.
4155
*/
4156
cpuset_handle_hotplug();
4157
}
4158
4159
/*
4160
* Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
4161
* Call this routine anytime after node_states[N_MEMORY] changes.
4162
* See cpuset_update_active_cpus() for CPU hotplug handling.
4163
*/
4164
static int cpuset_track_online_nodes(struct notifier_block *self,
4165
unsigned long action, void *arg)
4166
{
4167
cpuset_handle_hotplug();
4168
return NOTIFY_OK;
4169
}
4170
4171
/**
4172
* cpuset_init_smp - initialize cpus_allowed
4173
*
4174
* Description: Finish top cpuset after cpu, node maps are initialized
4175
*/
4176
void __init cpuset_init_smp(void)
4177
{
4178
/*
4179
* cpus_allowd/mems_allowed set to v2 values in the initial
4180
* cpuset_bind() call will be reset to v1 values in another
4181
* cpuset_bind() call when v1 cpuset is mounted.
4182
*/
4183
top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
4184
4185
cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
4186
top_cpuset.effective_mems = node_states[N_MEMORY];
4187
4188
hotplug_node_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI);
4189
4190
cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
4191
BUG_ON(!cpuset_migrate_mm_wq);
4192
}
4193
4194
/*
4195
* Return cpus_allowed mask from a task's cpuset.
4196
*/
4197
static void __cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
4198
{
4199
struct cpuset *cs;
4200
4201
cs = task_cs(tsk);
4202
if (cs != &top_cpuset)
4203
guarantee_active_cpus(tsk, pmask);
4204
/*
4205
* Tasks in the top cpuset won't get update to their cpumasks
4206
* when a hotplug online/offline event happens. So we include all
4207
* offline cpus in the allowed cpu list.
4208
*/
4209
if ((cs == &top_cpuset) || cpumask_empty(pmask)) {
4210
const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
4211
4212
/*
4213
* We first exclude cpus allocated to partitions. If there is no
4214
* allowable online cpu left, we fall back to all possible cpus.
4215
*/
4216
cpumask_andnot(pmask, possible_mask, subpartitions_cpus);
4217
if (!cpumask_intersects(pmask, cpu_active_mask))
4218
cpumask_copy(pmask, possible_mask);
4219
}
4220
}
4221
4222
/**
4223
* cpuset_cpus_allowed_locked - return cpus_allowed mask from a task's cpuset.
4224
* @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
4225
* @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
4226
*
4227
* Similir to cpuset_cpus_allowed() except that the caller must have acquired
4228
* cpuset_mutex.
4229
*/
4230
void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
4231
{
4232
lockdep_assert_held(&cpuset_mutex);
4233
__cpuset_cpus_allowed_locked(tsk, pmask);
4234
}
4235
4236
/**
4237
* cpuset_cpus_allowed - return cpus_allowed mask from a task's cpuset.
4238
* @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
4239
* @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
4240
*
4241
* Description: Returns the cpumask_var_t cpus_allowed of the cpuset
4242
* attached to the specified @tsk. Guaranteed to return some non-empty
4243
* subset of cpu_active_mask, even if this means going outside the
4244
* tasks cpuset, except when the task is in the top cpuset.
4245
**/
4246
4247
void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
4248
{
4249
unsigned long flags;
4250
4251
spin_lock_irqsave(&callback_lock, flags);
4252
__cpuset_cpus_allowed_locked(tsk, pmask);
4253
spin_unlock_irqrestore(&callback_lock, flags);
4254
}
4255
4256
/**
4257
* cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
4258
* @tsk: pointer to task_struct with which the scheduler is struggling
4259
*
4260
* Description: In the case that the scheduler cannot find an allowed cpu in
4261
* tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
4262
* mode however, this value is the same as task_cs(tsk)->effective_cpus,
4263
* which will not contain a sane cpumask during cases such as cpu hotplugging.
4264
* This is the absolute last resort for the scheduler and it is only used if
4265
* _every_ other avenue has been traveled.
4266
*
4267
* Returns true if the affinity of @tsk was changed, false otherwise.
4268
**/
4269
4270
bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
4271
{
4272
const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
4273
const struct cpumask *cs_mask;
4274
bool changed = false;
4275
4276
rcu_read_lock();
4277
cs_mask = task_cs(tsk)->cpus_allowed;
4278
if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
4279
set_cpus_allowed_force(tsk, cs_mask);
4280
changed = true;
4281
}
4282
rcu_read_unlock();
4283
4284
/*
4285
* We own tsk->cpus_allowed, nobody can change it under us.
4286
*
4287
* But we used cs && cs->cpus_allowed lockless and thus can
4288
* race with cgroup_attach_task() or update_cpumask() and get
4289
* the wrong tsk->cpus_allowed. However, both cases imply the
4290
* subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
4291
* which takes task_rq_lock().
4292
*
4293
* If we are called after it dropped the lock we must see all
4294
* changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
4295
* set any mask even if it is not right from task_cs() pov,
4296
* the pending set_cpus_allowed_ptr() will fix things.
4297
*
4298
* select_fallback_rq() will fix things ups and set cpu_possible_mask
4299
* if required.
4300
*/
4301
return changed;
4302
}
4303
4304
void __init cpuset_init_current_mems_allowed(void)
4305
{
4306
nodes_setall(current->mems_allowed);
4307
}
4308
4309
/**
4310
* cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
4311
* @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
4312
*
4313
* Description: Returns the nodemask_t mems_allowed of the cpuset
4314
* attached to the specified @tsk. Guaranteed to return some non-empty
4315
* subset of node_states[N_MEMORY], even if this means going outside the
4316
* tasks cpuset.
4317
**/
4318
4319
nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
4320
{
4321
nodemask_t mask;
4322
unsigned long flags;
4323
4324
spin_lock_irqsave(&callback_lock, flags);
4325
guarantee_online_mems(task_cs(tsk), &mask);
4326
spin_unlock_irqrestore(&callback_lock, flags);
4327
4328
return mask;
4329
}
4330
4331
/**
4332
* cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
4333
* @nodemask: the nodemask to be checked
4334
*
4335
* Are any of the nodes in the nodemask allowed in current->mems_allowed?
4336
*/
4337
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
4338
{
4339
return nodes_intersects(*nodemask, current->mems_allowed);
4340
}
4341
4342
/*
4343
* nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
4344
* mem_hardwall ancestor to the specified cpuset. Call holding
4345
* callback_lock. If no ancestor is mem_exclusive or mem_hardwall
4346
* (an unusual configuration), then returns the root cpuset.
4347
*/
4348
static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
4349
{
4350
while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
4351
cs = parent_cs(cs);
4352
return cs;
4353
}
4354
4355
/*
4356
* cpuset_current_node_allowed - Can current task allocate on a memory node?
4357
* @node: is this an allowed node?
4358
* @gfp_mask: memory allocation flags
4359
*
4360
* If we're in interrupt, yes, we can always allocate. If @node is set in
4361
* current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this
4362
* node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
4363
* yes. If current has access to memory reserves as an oom victim, yes.
4364
* Otherwise, no.
4365
*
4366
* GFP_USER allocations are marked with the __GFP_HARDWALL bit,
4367
* and do not allow allocations outside the current tasks cpuset
4368
* unless the task has been OOM killed.
4369
* GFP_KERNEL allocations are not so marked, so can escape to the
4370
* nearest enclosing hardwalled ancestor cpuset.
4371
*
4372
* Scanning up parent cpusets requires callback_lock. The
4373
* __alloc_pages() routine only calls here with __GFP_HARDWALL bit
4374
* _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
4375
* current tasks mems_allowed came up empty on the first pass over
4376
* the zonelist. So only GFP_KERNEL allocations, if all nodes in the
4377
* cpuset are short of memory, might require taking the callback_lock.
4378
*
4379
* The first call here from mm/page_alloc:get_page_from_freelist()
4380
* has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
4381
* so no allocation on a node outside the cpuset is allowed (unless
4382
* in interrupt, of course).
4383
*
4384
* The second pass through get_page_from_freelist() doesn't even call
4385
* here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
4386
* variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
4387
* in alloc_flags. That logic and the checks below have the combined
4388
* affect that:
4389
* in_interrupt - any node ok (current task context irrelevant)
4390
* GFP_ATOMIC - any node ok
4391
* tsk_is_oom_victim - any node ok
4392
* GFP_KERNEL - any node in enclosing hardwalled cpuset ok
4393
* GFP_USER - only nodes in current tasks mems allowed ok.
4394
*/
4395
bool cpuset_current_node_allowed(int node, gfp_t gfp_mask)
4396
{
4397
struct cpuset *cs; /* current cpuset ancestors */
4398
bool allowed; /* is allocation in zone z allowed? */
4399
unsigned long flags;
4400
4401
if (in_interrupt())
4402
return true;
4403
if (node_isset(node, current->mems_allowed))
4404
return true;
4405
/*
4406
* Allow tasks that have access to memory reserves because they have
4407
* been OOM killed to get memory anywhere.
4408
*/
4409
if (unlikely(tsk_is_oom_victim(current)))
4410
return true;
4411
if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
4412
return false;
4413
4414
if (current->flags & PF_EXITING) /* Let dying task have memory */
4415
return true;
4416
4417
/* Not hardwall and node outside mems_allowed: scan up cpusets */
4418
spin_lock_irqsave(&callback_lock, flags);
4419
4420
cs = nearest_hardwall_ancestor(task_cs(current));
4421
allowed = node_isset(node, cs->mems_allowed);
4422
4423
spin_unlock_irqrestore(&callback_lock, flags);
4424
return allowed;
4425
}
4426
4427
bool cpuset_node_allowed(struct cgroup *cgroup, int nid)
4428
{
4429
struct cgroup_subsys_state *css;
4430
struct cpuset *cs;
4431
bool allowed;
4432
4433
/*
4434
* In v1, mem_cgroup and cpuset are unlikely in the same hierarchy
4435
* and mems_allowed is likely to be empty even if we could get to it,
4436
* so return true to avoid taking a global lock on the empty check.
4437
*/
4438
if (!cpuset_v2())
4439
return true;
4440
4441
css = cgroup_get_e_css(cgroup, &cpuset_cgrp_subsys);
4442
if (!css)
4443
return true;
4444
4445
/*
4446
* Normally, accessing effective_mems would require the cpuset_mutex
4447
* or callback_lock - but node_isset is atomic and the reference
4448
* taken via cgroup_get_e_css is sufficient to protect css.
4449
*
4450
* Since this interface is intended for use by migration paths, we
4451
* relax locking here to avoid taking global locks - while accepting
4452
* there may be rare scenarios where the result may be innaccurate.
4453
*
4454
* Reclaim and migration are subject to these same race conditions, and
4455
* cannot make strong isolation guarantees, so this is acceptable.
4456
*/
4457
cs = container_of(css, struct cpuset, css);
4458
allowed = node_isset(nid, cs->effective_mems);
4459
css_put(css);
4460
return allowed;
4461
}
4462
4463
/**
4464
* cpuset_spread_node() - On which node to begin search for a page
4465
* @rotor: round robin rotor
4466
*
4467
* If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
4468
* tasks in a cpuset with is_spread_page or is_spread_slab set),
4469
* and if the memory allocation used cpuset_mem_spread_node()
4470
* to determine on which node to start looking, as it will for
4471
* certain page cache or slab cache pages such as used for file
4472
* system buffers and inode caches, then instead of starting on the
4473
* local node to look for a free page, rather spread the starting
4474
* node around the tasks mems_allowed nodes.
4475
*
4476
* We don't have to worry about the returned node being offline
4477
* because "it can't happen", and even if it did, it would be ok.
4478
*
4479
* The routines calling guarantee_online_mems() are careful to
4480
* only set nodes in task->mems_allowed that are online. So it
4481
* should not be possible for the following code to return an
4482
* offline node. But if it did, that would be ok, as this routine
4483
* is not returning the node where the allocation must be, only
4484
* the node where the search should start. The zonelist passed to
4485
* __alloc_pages() will include all nodes. If the slab allocator
4486
* is passed an offline node, it will fall back to the local node.
4487
* See kmem_cache_alloc_node().
4488
*/
4489
static int cpuset_spread_node(int *rotor)
4490
{
4491
return *rotor = next_node_in(*rotor, current->mems_allowed);
4492
}
4493
4494
/**
4495
* cpuset_mem_spread_node() - On which node to begin search for a file page
4496
*/
4497
int cpuset_mem_spread_node(void)
4498
{
4499
if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
4500
current->cpuset_mem_spread_rotor =
4501
node_random(&current->mems_allowed);
4502
4503
return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
4504
}
4505
4506
/**
4507
* cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
4508
* @tsk1: pointer to task_struct of some task.
4509
* @tsk2: pointer to task_struct of some other task.
4510
*
4511
* Description: Return true if @tsk1's mems_allowed intersects the
4512
* mems_allowed of @tsk2. Used by the OOM killer to determine if
4513
* one of the task's memory usage might impact the memory available
4514
* to the other.
4515
**/
4516
4517
int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
4518
const struct task_struct *tsk2)
4519
{
4520
return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
4521
}
4522
4523
/**
4524
* cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
4525
*
4526
* Description: Prints current's name, cpuset name, and cached copy of its
4527
* mems_allowed to the kernel log.
4528
*/
4529
void cpuset_print_current_mems_allowed(void)
4530
{
4531
struct cgroup *cgrp;
4532
4533
rcu_read_lock();
4534
4535
cgrp = task_cs(current)->css.cgroup;
4536
pr_cont(",cpuset=");
4537
pr_cont_cgroup_name(cgrp);
4538
pr_cont(",mems_allowed=%*pbl",
4539
nodemask_pr_args(&current->mems_allowed));
4540
4541
rcu_read_unlock();
4542
}
4543
4544
/* Display task mems_allowed in /proc/<pid>/status file. */
4545
void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
4546
{
4547
seq_printf(m, "Mems_allowed:\t%*pb\n",
4548
nodemask_pr_args(&task->mems_allowed));
4549
seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
4550
nodemask_pr_args(&task->mems_allowed));
4551
}
4552
4553