/* SPDX-License-Identifier: GPL-2.0-or-later */12#ifndef __CPUSET_INTERNAL_H3#define __CPUSET_INTERNAL_H45#include <linux/cgroup.h>6#include <linux/cpu.h>7#include <linux/cpumask.h>8#include <linux/cpuset.h>9#include <linux/spinlock.h>10#include <linux/union_find.h>1112/* See "Frequency meter" comments, below. */1314struct fmeter {15int cnt; /* unprocessed events count */16int val; /* most recent output value */17time64_t time; /* clock (secs) when val computed */18spinlock_t lock; /* guards read or write of above */19};2021/*22* Invalid partition error code23*/24enum prs_errcode {25PERR_NONE = 0,26PERR_INVCPUS,27PERR_INVPARENT,28PERR_NOTPART,29PERR_NOTEXCL,30PERR_NOCPUS,31PERR_HOTPLUG,32PERR_CPUSEMPTY,33PERR_HKEEPING,34PERR_ACCESS,35PERR_REMOTE,36};3738/* bits in struct cpuset flags field */39typedef enum {40CS_CPU_EXCLUSIVE,41CS_MEM_EXCLUSIVE,42CS_MEM_HARDWALL,43CS_MEMORY_MIGRATE,44CS_SCHED_LOAD_BALANCE,45CS_SPREAD_PAGE,46CS_SPREAD_SLAB,47} cpuset_flagbits_t;4849/* The various types of files and directories in a cpuset file system */5051typedef enum {52FILE_MEMORY_MIGRATE,53FILE_CPULIST,54FILE_MEMLIST,55FILE_EFFECTIVE_CPULIST,56FILE_EFFECTIVE_MEMLIST,57FILE_SUBPARTS_CPULIST,58FILE_EXCLUSIVE_CPULIST,59FILE_EFFECTIVE_XCPULIST,60FILE_ISOLATED_CPULIST,61FILE_CPU_EXCLUSIVE,62FILE_MEM_EXCLUSIVE,63FILE_MEM_HARDWALL,64FILE_SCHED_LOAD_BALANCE,65FILE_PARTITION_ROOT,66FILE_SCHED_RELAX_DOMAIN_LEVEL,67FILE_MEMORY_PRESSURE_ENABLED,68FILE_MEMORY_PRESSURE,69FILE_SPREAD_PAGE,70FILE_SPREAD_SLAB,71} cpuset_filetype_t;7273struct cpuset {74struct cgroup_subsys_state css;7576unsigned long flags; /* "unsigned long" so bitops work */7778/*79* On default hierarchy:80*81* The user-configured masks can only be changed by writing to82* cpuset.cpus and cpuset.mems, and won't be limited by the83* parent masks.84*85* The effective masks is the real masks that apply to the tasks86* in the cpuset. They may be changed if the configured masks are87* changed or hotplug happens.88*89* effective_mask == configured_mask & parent's effective_mask,90* and if it ends up empty, it will inherit the parent's mask.91*92*93* On legacy hierarchy:94*95* The user-configured masks are always the same with effective masks.96*/9798/* user-configured CPUs and Memory Nodes allow to tasks */99cpumask_var_t cpus_allowed;100nodemask_t mems_allowed;101102/* effective CPUs and Memory Nodes allow to tasks */103cpumask_var_t effective_cpus;104nodemask_t effective_mems;105106/*107* Exclusive CPUs dedicated to current cgroup (default hierarchy only)108*109* The effective_cpus of a valid partition root comes solely from its110* effective_xcpus and some of the effective_xcpus may be distributed111* to sub-partitions below & hence excluded from its effective_cpus.112* For a valid partition root, its effective_cpus have no relationship113* with cpus_allowed unless its exclusive_cpus isn't set.114*115* This value will only be set if either exclusive_cpus is set or116* when this cpuset becomes a local partition root.117*/118cpumask_var_t effective_xcpus;119120/*121* Exclusive CPUs as requested by the user (default hierarchy only)122*123* Its value is independent of cpus_allowed and designates the set of124* CPUs that can be granted to the current cpuset or its children when125* it becomes a valid partition root. The effective set of exclusive126* CPUs granted (effective_xcpus) depends on whether those exclusive127* CPUs are passed down by its ancestors and not yet taken up by128* another sibling partition root along the way.129*130* If its value isn't set, it defaults to cpus_allowed.131*/132cpumask_var_t exclusive_cpus;133134/*135* This is old Memory Nodes tasks took on.136*137* - top_cpuset.old_mems_allowed is initialized to mems_allowed.138* - A new cpuset's old_mems_allowed is initialized when some139* task is moved into it.140* - old_mems_allowed is used in cpuset_migrate_mm() when we change141* cpuset.mems_allowed and have tasks' nodemask updated, and142* then old_mems_allowed is updated to mems_allowed.143*/144nodemask_t old_mems_allowed;145146struct fmeter fmeter; /* memory_pressure filter */147148/*149* Tasks are being attached to this cpuset. Used to prevent150* zeroing cpus/mems_allowed between ->can_attach() and ->attach().151*/152int attach_in_progress;153154/* for custom sched domain */155int relax_domain_level;156157/* partition root state */158int partition_root_state;159160/*161* Whether cpuset is a remote partition.162* It used to be a list anchoring all remote partitions — we can switch back163* to a list if we need to iterate over the remote partitions.164*/165bool remote_partition;166167/*168* number of SCHED_DEADLINE tasks attached to this cpuset, so that we169* know when to rebuild associated root domain bandwidth information.170*/171int nr_deadline_tasks;172int nr_migrate_dl_tasks;173u64 sum_migrate_dl_bw;174175/* Invalid partition error code, not lock protected */176enum prs_errcode prs_err;177178/* Handle for cpuset.cpus.partition */179struct cgroup_file partition_file;180181/* Used to merge intersecting subsets for generate_sched_domains */182struct uf_node node;183};184185static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)186{187return css ? container_of(css, struct cpuset, css) : NULL;188}189190/* Retrieve the cpuset for a task */191static inline struct cpuset *task_cs(struct task_struct *task)192{193return css_cs(task_css(task, cpuset_cgrp_id));194}195196static inline struct cpuset *parent_cs(struct cpuset *cs)197{198return css_cs(cs->css.parent);199}200201/* convenient tests for these bits */202static inline bool is_cpuset_online(struct cpuset *cs)203{204return css_is_online(&cs->css) && !css_is_dying(&cs->css);205}206207static inline int is_cpu_exclusive(const struct cpuset *cs)208{209return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);210}211212static inline int is_mem_exclusive(const struct cpuset *cs)213{214return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);215}216217static inline int is_mem_hardwall(const struct cpuset *cs)218{219return test_bit(CS_MEM_HARDWALL, &cs->flags);220}221222static inline int is_sched_load_balance(const struct cpuset *cs)223{224return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);225}226227static inline int is_memory_migrate(const struct cpuset *cs)228{229return test_bit(CS_MEMORY_MIGRATE, &cs->flags);230}231232static inline int is_spread_page(const struct cpuset *cs)233{234return test_bit(CS_SPREAD_PAGE, &cs->flags);235}236237static inline int is_spread_slab(const struct cpuset *cs)238{239return test_bit(CS_SPREAD_SLAB, &cs->flags);240}241242/**243* cpuset_for_each_child - traverse online children of a cpuset244* @child_cs: loop cursor pointing to the current child245* @pos_css: used for iteration246* @parent_cs: target cpuset to walk children of247*248* Walk @child_cs through the online children of @parent_cs. Must be used249* with RCU read locked.250*/251#define cpuset_for_each_child(child_cs, pos_css, parent_cs) \252css_for_each_child((pos_css), &(parent_cs)->css) \253if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))254255/**256* cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants257* @des_cs: loop cursor pointing to the current descendant258* @pos_css: used for iteration259* @root_cs: target cpuset to walk ancestor of260*261* Walk @des_cs through the online descendants of @root_cs. Must be used262* with RCU read locked. The caller may modify @pos_css by calling263* css_rightmost_descendant() to skip subtree. @root_cs is included in the264* iteration and the first node to be visited.265*/266#define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \267css_for_each_descendant_pre((pos_css), &(root_cs)->css) \268if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))269270void rebuild_sched_domains_locked(void);271void cpuset_callback_lock_irq(void);272void cpuset_callback_unlock_irq(void);273void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus);274void cpuset_update_tasks_nodemask(struct cpuset *cs);275int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs, int turning_on);276ssize_t cpuset_write_resmask(struct kernfs_open_file *of,277char *buf, size_t nbytes, loff_t off);278int cpuset_common_seq_show(struct seq_file *sf, void *v);279void cpuset_full_lock(void);280void cpuset_full_unlock(void);281282/*283* cpuset-v1.c284*/285#ifdef CONFIG_CPUSETS_V1286extern struct cftype cpuset1_files[];287void fmeter_init(struct fmeter *fmp);288void cpuset1_update_task_spread_flags(struct cpuset *cs,289struct task_struct *tsk);290void cpuset1_update_tasks_flags(struct cpuset *cs);291void cpuset1_hotplug_update_tasks(struct cpuset *cs,292struct cpumask *new_cpus, nodemask_t *new_mems,293bool cpus_updated, bool mems_updated);294int cpuset1_validate_change(struct cpuset *cur, struct cpuset *trial);295#else296static inline void fmeter_init(struct fmeter *fmp) {}297static inline void cpuset1_update_task_spread_flags(struct cpuset *cs,298struct task_struct *tsk) {}299static inline void cpuset1_update_tasks_flags(struct cpuset *cs) {}300static inline void cpuset1_hotplug_update_tasks(struct cpuset *cs,301struct cpumask *new_cpus, nodemask_t *new_mems,302bool cpus_updated, bool mems_updated) {}303static inline int cpuset1_validate_change(struct cpuset *cur,304struct cpuset *trial) { return 0; }305#endif /* CONFIG_CPUSETS_V1 */306307#endif /* __CPUSET_INTERNAL_H */308309310