4 * Processor and Memory placement constraints for sets of tasks.
6 * Copyright (C) 2003 BULL SA.
7 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
8 * Copyright (C) 2006 Google, Inc
10 * Portions derived from Patrick Mochel's sysfs code.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
13 * 2003-10-10 Written by Simon Derr.
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson.
16 * 2006 Rework by Paul Menage to use generic cgroups
17 * 2008 Rework of the scheduler domains and CPU hotplug handling
20 * This file is subject to the terms and conditions of the GNU General Public
21 * License. See the file COPYING in the main directory of the Linux
22 * distribution for more details.
25 #include <linux/cpu.h>
26 #include <linux/cpumask.h>
27 #include <linux/cpuset.h>
28 #include <linux/err.h>
29 #include <linux/errno.h>
30 #include <linux/file.h>
32 #include <linux/init.h>
33 #include <linux/interrupt.h>
34 #include <linux/kernel.h>
35 #include <linux/kmod.h>
36 #include <linux/kthread.h>
37 #include <linux/list.h>
38 #include <linux/mempolicy.h>
40 #include <linux/memory.h>
41 #include <linux/export.h>
42 #include <linux/mount.h>
43 #include <linux/fs_context.h>
44 #include <linux/namei.h>
45 #include <linux/pagemap.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rcupdate.h>
48 #include <linux/sched.h>
49 #include <linux/sched/deadline.h>
50 #include <linux/sched/mm.h>
51 #include <linux/sched/task.h>
52 #include <linux/seq_file.h>
53 #include <linux/security.h>
54 #include <linux/slab.h>
55 #include <linux/spinlock.h>
56 #include <linux/stat.h>
57 #include <linux/string.h>
58 #include <linux/time.h>
59 #include <linux/time64.h>
60 #include <linux/backing-dev.h>
61 #include <linux/sort.h>
62 #include <linux/oom.h>
63 #include <linux/sched/isolation.h>
64 #include <linux/uaccess.h>
65 #include <linux/atomic.h>
66 #include <linux/mutex.h>
67 #include <linux/cgroup.h>
68 #include <linux/wait.h>
70 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
71 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
74 * There could be abnormal cpuset configurations for cpu or memory
75 * node binding, add this key to provide a quick low-cost judgment
78 DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
80 /* See "Frequency meter" comments, below. */
83 int cnt; /* unprocessed events count */
84 int val; /* most recent output value */
85 time64_t time; /* clock (secs) when val computed */
86 spinlock_t lock; /* guards read or write of above */
90 * Invalid partition error code
103 static const char * const perr_strings[] = {
104 [PERR_INVCPUS] = "Invalid cpu list in cpuset.cpus",
105 [PERR_INVPARENT] = "Parent is an invalid partition root",
106 [PERR_NOTPART] = "Parent is not a partition root",
107 [PERR_NOTEXCL] = "Cpu list in cpuset.cpus not exclusive",
108 [PERR_NOCPUS] = "Parent unable to distribute cpu downstream",
109 [PERR_HOTPLUG] = "No cpu available due to hotplug",
110 [PERR_CPUSEMPTY] = "cpuset.cpus is empty",
114 struct cgroup_subsys_state css;
116 unsigned long flags; /* "unsigned long" so bitops work */
119 * On default hierarchy:
121 * The user-configured masks can only be changed by writing to
122 * cpuset.cpus and cpuset.mems, and won't be limited by the
125 * The effective masks is the real masks that apply to the tasks
126 * in the cpuset. They may be changed if the configured masks are
127 * changed or hotplug happens.
129 * effective_mask == configured_mask & parent's effective_mask,
130 * and if it ends up empty, it will inherit the parent's mask.
133 * On legacy hierarchy:
135 * The user-configured masks are always the same with effective masks.
138 /* user-configured CPUs and Memory Nodes allow to tasks */
139 cpumask_var_t cpus_allowed;
140 nodemask_t mems_allowed;
142 /* effective CPUs and Memory Nodes allow to tasks */
143 cpumask_var_t effective_cpus;
144 nodemask_t effective_mems;
147 * CPUs allocated to child sub-partitions (default hierarchy only)
148 * - CPUs granted by the parent = effective_cpus U subparts_cpus
149 * - effective_cpus and subparts_cpus are mutually exclusive.
151 * effective_cpus contains only onlined CPUs, but subparts_cpus
152 * may have offlined ones.
154 cpumask_var_t subparts_cpus;
157 * This is old Memory Nodes tasks took on.
159 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
160 * - A new cpuset's old_mems_allowed is initialized when some
161 * task is moved into it.
162 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
163 * cpuset.mems_allowed and have tasks' nodemask updated, and
164 * then old_mems_allowed is updated to mems_allowed.
166 nodemask_t old_mems_allowed;
168 struct fmeter fmeter; /* memory_pressure filter */
171 * Tasks are being attached to this cpuset. Used to prevent
172 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
174 int attach_in_progress;
176 /* partition number for rebuild_sched_domains() */
179 /* for custom sched domain */
180 int relax_domain_level;
182 /* number of CPUs in subparts_cpus */
183 int nr_subparts_cpus;
185 /* partition root state */
186 int partition_root_state;
189 * Default hierarchy only:
190 * use_parent_ecpus - set if using parent's effective_cpus
191 * child_ecpus_count - # of children with use_parent_ecpus set
193 int use_parent_ecpus;
194 int child_ecpus_count;
196 /* Invalid partition error code, not lock protected */
197 enum prs_errcode prs_err;
199 /* Handle for cpuset.cpus.partition */
200 struct cgroup_file partition_file;
204 * Partition root states:
206 * 0 - member (not a partition root)
208 * 2 - partition root without load balancing (isolated)
209 * -1 - invalid partition root
210 * -2 - invalid isolated partition root
214 #define PRS_ISOLATED 2
215 #define PRS_INVALID_ROOT -1
216 #define PRS_INVALID_ISOLATED -2
218 static inline bool is_prs_invalid(int prs_state)
220 return prs_state < 0;
224 * Temporary cpumasks for working with partitions that are passed among
225 * functions to avoid memory allocation in inner functions.
228 cpumask_var_t addmask, delmask; /* For partition root */
229 cpumask_var_t new_cpus; /* For update_cpumasks_hier() */
232 static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
234 return css ? container_of(css, struct cpuset, css) : NULL;
237 /* Retrieve the cpuset for a task */
238 static inline struct cpuset *task_cs(struct task_struct *task)
240 return css_cs(task_css(task, cpuset_cgrp_id));
243 static inline struct cpuset *parent_cs(struct cpuset *cs)
245 return css_cs(cs->css.parent);
248 /* bits in struct cpuset flags field */
255 CS_SCHED_LOAD_BALANCE,
260 /* convenient tests for these bits */
261 static inline bool is_cpuset_online(struct cpuset *cs)
263 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
266 static inline int is_cpu_exclusive(const struct cpuset *cs)
268 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
271 static inline int is_mem_exclusive(const struct cpuset *cs)
273 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
276 static inline int is_mem_hardwall(const struct cpuset *cs)
278 return test_bit(CS_MEM_HARDWALL, &cs->flags);
281 static inline int is_sched_load_balance(const struct cpuset *cs)
283 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
286 static inline int is_memory_migrate(const struct cpuset *cs)
288 return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
291 static inline int is_spread_page(const struct cpuset *cs)
293 return test_bit(CS_SPREAD_PAGE, &cs->flags);
296 static inline int is_spread_slab(const struct cpuset *cs)
298 return test_bit(CS_SPREAD_SLAB, &cs->flags);
301 static inline int is_partition_valid(const struct cpuset *cs)
303 return cs->partition_root_state > 0;
306 static inline int is_partition_invalid(const struct cpuset *cs)
308 return cs->partition_root_state < 0;
312 * Callers should hold callback_lock to modify partition_root_state.
314 static inline void make_partition_invalid(struct cpuset *cs)
316 if (is_partition_valid(cs))
317 cs->partition_root_state = -cs->partition_root_state;
321 * Send notification event of whenever partition_root_state changes.
323 static inline void notify_partition_change(struct cpuset *cs, int old_prs)
325 if (old_prs == cs->partition_root_state)
327 cgroup_file_notify(&cs->partition_file);
329 /* Reset prs_err if not invalid */
330 if (is_partition_valid(cs))
331 WRITE_ONCE(cs->prs_err, PERR_NONE);
334 static struct cpuset top_cpuset = {
335 .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
336 (1 << CS_MEM_EXCLUSIVE)),
337 .partition_root_state = PRS_ROOT,
341 * cpuset_for_each_child - traverse online children of a cpuset
342 * @child_cs: loop cursor pointing to the current child
343 * @pos_css: used for iteration
344 * @parent_cs: target cpuset to walk children of
346 * Walk @child_cs through the online children of @parent_cs. Must be used
347 * with RCU read locked.
349 #define cpuset_for_each_child(child_cs, pos_css, parent_cs) \
350 css_for_each_child((pos_css), &(parent_cs)->css) \
351 if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
354 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
355 * @des_cs: loop cursor pointing to the current descendant
356 * @pos_css: used for iteration
357 * @root_cs: target cpuset to walk ancestor of
359 * Walk @des_cs through the online descendants of @root_cs. Must be used
360 * with RCU read locked. The caller may modify @pos_css by calling
361 * css_rightmost_descendant() to skip subtree. @root_cs is included in the
362 * iteration and the first node to be visited.
364 #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \
365 css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
366 if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
369 * There are two global locks guarding cpuset structures - cpuset_rwsem and
370 * callback_lock. We also require taking task_lock() when dereferencing a
371 * task's cpuset pointer. See "The task_lock() exception", at the end of this
372 * comment. The cpuset code uses only cpuset_rwsem write lock. Other
373 * kernel subsystems can use cpuset_read_lock()/cpuset_read_unlock() to
374 * prevent change to cpuset structures.
376 * A task must hold both locks to modify cpusets. If a task holds
377 * cpuset_rwsem, it blocks others wanting that rwsem, ensuring that it
378 * is the only task able to also acquire callback_lock and be able to
379 * modify cpusets. It can perform various checks on the cpuset structure
380 * first, knowing nothing will change. It can also allocate memory while
381 * just holding cpuset_rwsem. While it is performing these checks, various
382 * callback routines can briefly acquire callback_lock to query cpusets.
383 * Once it is ready to make the changes, it takes callback_lock, blocking
386 * Calls to the kernel memory allocator can not be made while holding
387 * callback_lock, as that would risk double tripping on callback_lock
388 * from one of the callbacks into the cpuset code from within
391 * If a task is only holding callback_lock, then it has read-only
394 * Now, the task_struct fields mems_allowed and mempolicy may be changed
395 * by other task, we use alloc_lock in the task_struct fields to protect
398 * The cpuset_common_file_read() handlers only hold callback_lock across
399 * small pieces of code, such as when reading out possibly multi-word
400 * cpumasks and nodemasks.
402 * Accessing a task's cpuset should be done in accordance with the
403 * guidelines for accessing subsystem state in kernel/cgroup.c
406 DEFINE_STATIC_PERCPU_RWSEM(cpuset_rwsem);
408 void cpuset_read_lock(void)
410 percpu_down_read(&cpuset_rwsem);
413 void cpuset_read_unlock(void)
415 percpu_up_read(&cpuset_rwsem);
418 static DEFINE_SPINLOCK(callback_lock);
420 static struct workqueue_struct *cpuset_migrate_mm_wq;
423 * CPU / memory hotplug is handled asynchronously.
425 static void cpuset_hotplug_workfn(struct work_struct *work);
426 static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
428 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
430 static inline void check_insane_mems_config(nodemask_t *nodes)
432 if (!cpusets_insane_config() &&
433 movable_only_nodes(nodes)) {
434 static_branch_enable(&cpusets_insane_config_key);
435 pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
436 "Cpuset allocations might fail even with a lot of memory available.\n",
437 nodemask_pr_args(nodes));
442 * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
443 * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
444 * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
445 * With v2 behavior, "cpus" and "mems" are always what the users have
446 * requested and won't be changed by hotplug events. Only the effective
447 * cpus or mems will be affected.
449 static inline bool is_in_v2_mode(void)
451 return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
452 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
456 * partition_is_populated - check if partition has tasks
457 * @cs: partition root to be checked
458 * @excluded_child: a child cpuset to be excluded in task checking
459 * Return: true if there are tasks, false otherwise
461 * It is assumed that @cs is a valid partition root. @excluded_child should
462 * be non-NULL when this cpuset is going to become a partition itself.
464 static inline bool partition_is_populated(struct cpuset *cs,
465 struct cpuset *excluded_child)
467 struct cgroup_subsys_state *css;
468 struct cpuset *child;
470 if (cs->css.cgroup->nr_populated_csets)
472 if (!excluded_child && !cs->nr_subparts_cpus)
473 return cgroup_is_populated(cs->css.cgroup);
476 cpuset_for_each_child(child, css, cs) {
477 if (child == excluded_child)
479 if (is_partition_valid(child))
481 if (cgroup_is_populated(child->css.cgroup)) {
491 * Return in pmask the portion of a task's cpusets's cpus_allowed that
492 * are online and are capable of running the task. If none are found,
493 * walk up the cpuset hierarchy until we find one that does have some
496 * One way or another, we guarantee to return some non-empty subset
497 * of cpu_online_mask.
499 * Call with callback_lock or cpuset_rwsem held.
501 static void guarantee_online_cpus(struct task_struct *tsk,
502 struct cpumask *pmask)
504 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
507 if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask)))
508 cpumask_copy(pmask, cpu_online_mask);
513 while (!cpumask_intersects(cs->effective_cpus, pmask)) {
517 * The top cpuset doesn't have any online cpu as a
518 * consequence of a race between cpuset_hotplug_work
519 * and cpu hotplug notifier. But we know the top
520 * cpuset's effective_cpus is on its way to be
521 * identical to cpu_online_mask.
526 cpumask_and(pmask, pmask, cs->effective_cpus);
533 * Return in *pmask the portion of a cpusets's mems_allowed that
534 * are online, with memory. If none are online with memory, walk
535 * up the cpuset hierarchy until we find one that does have some
536 * online mems. The top cpuset always has some mems online.
538 * One way or another, we guarantee to return some non-empty subset
539 * of node_states[N_MEMORY].
541 * Call with callback_lock or cpuset_rwsem held.
543 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
545 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
547 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
551 * update task's spread flag if cpuset's page/slab spread flag is set
553 * Call with callback_lock or cpuset_rwsem held. The check can be skipped
554 * if on default hierarchy.
556 static void cpuset_update_task_spread_flags(struct cpuset *cs,
557 struct task_struct *tsk)
559 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
562 if (is_spread_page(cs))
563 task_set_spread_page(tsk);
565 task_clear_spread_page(tsk);
567 if (is_spread_slab(cs))
568 task_set_spread_slab(tsk);
570 task_clear_spread_slab(tsk);
574 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
576 * One cpuset is a subset of another if all its allowed CPUs and
577 * Memory Nodes are a subset of the other, and its exclusive flags
578 * are only set if the other's are set. Call holding cpuset_rwsem.
581 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
583 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
584 nodes_subset(p->mems_allowed, q->mems_allowed) &&
585 is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
586 is_mem_exclusive(p) <= is_mem_exclusive(q);
590 * alloc_cpumasks - allocate three cpumasks for cpuset
591 * @cs: the cpuset that have cpumasks to be allocated.
592 * @tmp: the tmpmasks structure pointer
593 * Return: 0 if successful, -ENOMEM otherwise.
595 * Only one of the two input arguments should be non-NULL.
597 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
599 cpumask_var_t *pmask1, *pmask2, *pmask3;
602 pmask1 = &cs->cpus_allowed;
603 pmask2 = &cs->effective_cpus;
604 pmask3 = &cs->subparts_cpus;
606 pmask1 = &tmp->new_cpus;
607 pmask2 = &tmp->addmask;
608 pmask3 = &tmp->delmask;
611 if (!zalloc_cpumask_var(pmask1, GFP_KERNEL))
614 if (!zalloc_cpumask_var(pmask2, GFP_KERNEL))
617 if (!zalloc_cpumask_var(pmask3, GFP_KERNEL))
623 free_cpumask_var(*pmask2);
625 free_cpumask_var(*pmask1);
630 * free_cpumasks - free cpumasks in a tmpmasks structure
631 * @cs: the cpuset that have cpumasks to be free.
632 * @tmp: the tmpmasks structure pointer
634 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
637 free_cpumask_var(cs->cpus_allowed);
638 free_cpumask_var(cs->effective_cpus);
639 free_cpumask_var(cs->subparts_cpus);
642 free_cpumask_var(tmp->new_cpus);
643 free_cpumask_var(tmp->addmask);
644 free_cpumask_var(tmp->delmask);
649 * alloc_trial_cpuset - allocate a trial cpuset
650 * @cs: the cpuset that the trial cpuset duplicates
652 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
654 struct cpuset *trial;
656 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
660 if (alloc_cpumasks(trial, NULL)) {
665 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
666 cpumask_copy(trial->effective_cpus, cs->effective_cpus);
671 * free_cpuset - free the cpuset
672 * @cs: the cpuset to be freed
674 static inline void free_cpuset(struct cpuset *cs)
676 free_cpumasks(cs, NULL);
681 * validate_change_legacy() - Validate conditions specific to legacy (v1)
684 static int validate_change_legacy(struct cpuset *cur, struct cpuset *trial)
686 struct cgroup_subsys_state *css;
687 struct cpuset *c, *par;
690 WARN_ON_ONCE(!rcu_read_lock_held());
692 /* Each of our child cpusets must be a subset of us */
694 cpuset_for_each_child(c, css, cur)
695 if (!is_cpuset_subset(c, trial))
698 /* On legacy hierarchy, we must be a subset of our parent cpuset. */
700 par = parent_cs(cur);
701 if (par && !is_cpuset_subset(trial, par))
710 * validate_change() - Used to validate that any proposed cpuset change
711 * follows the structural rules for cpusets.
713 * If we replaced the flag and mask values of the current cpuset
714 * (cur) with those values in the trial cpuset (trial), would
715 * our various subset and exclusive rules still be valid? Presumes
718 * 'cur' is the address of an actual, in-use cpuset. Operations
719 * such as list traversal that depend on the actual address of the
720 * cpuset in the list must use cur below, not trial.
722 * 'trial' is the address of bulk structure copy of cur, with
723 * perhaps one or more of the fields cpus_allowed, mems_allowed,
724 * or flags changed to new, trial values.
726 * Return 0 if valid, -errno if not.
729 static int validate_change(struct cpuset *cur, struct cpuset *trial)
731 struct cgroup_subsys_state *css;
732 struct cpuset *c, *par;
737 if (!is_in_v2_mode())
738 ret = validate_change_legacy(cur, trial);
742 /* Remaining checks don't apply to root cpuset */
743 if (cur == &top_cpuset)
746 par = parent_cs(cur);
749 * Cpusets with tasks - existing or newly being attached - can't
750 * be changed to have empty cpus_allowed or mems_allowed.
753 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
754 if (!cpumask_empty(cur->cpus_allowed) &&
755 cpumask_empty(trial->cpus_allowed))
757 if (!nodes_empty(cur->mems_allowed) &&
758 nodes_empty(trial->mems_allowed))
763 * We can't shrink if we won't have enough room for SCHED_DEADLINE
767 if (is_cpu_exclusive(cur) &&
768 !cpuset_cpumask_can_shrink(cur->cpus_allowed,
769 trial->cpus_allowed))
773 * If either I or some sibling (!= me) is exclusive, we can't
777 cpuset_for_each_child(c, css, par) {
778 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
780 cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
782 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
784 nodes_intersects(trial->mems_allowed, c->mems_allowed))
796 * Helper routine for generate_sched_domains().
797 * Do cpusets a, b have overlapping effective cpus_allowed masks?
799 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
801 return cpumask_intersects(a->effective_cpus, b->effective_cpus);
805 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
807 if (dattr->relax_domain_level < c->relax_domain_level)
808 dattr->relax_domain_level = c->relax_domain_level;
812 static void update_domain_attr_tree(struct sched_domain_attr *dattr,
813 struct cpuset *root_cs)
816 struct cgroup_subsys_state *pos_css;
819 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
820 /* skip the whole subtree if @cp doesn't have any CPU */
821 if (cpumask_empty(cp->cpus_allowed)) {
822 pos_css = css_rightmost_descendant(pos_css);
826 if (is_sched_load_balance(cp))
827 update_domain_attr(dattr, cp);
832 /* Must be called with cpuset_rwsem held. */
833 static inline int nr_cpusets(void)
835 /* jump label reference count + the top-level cpuset */
836 return static_key_count(&cpusets_enabled_key.key) + 1;
840 * generate_sched_domains()
842 * This function builds a partial partition of the systems CPUs
843 * A 'partial partition' is a set of non-overlapping subsets whose
844 * union is a subset of that set.
845 * The output of this function needs to be passed to kernel/sched/core.c
846 * partition_sched_domains() routine, which will rebuild the scheduler's
847 * load balancing domains (sched domains) as specified by that partial
850 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
851 * for a background explanation of this.
853 * Does not return errors, on the theory that the callers of this
854 * routine would rather not worry about failures to rebuild sched
855 * domains when operating in the severe memory shortage situations
856 * that could cause allocation failures below.
858 * Must be called with cpuset_rwsem held.
860 * The three key local variables below are:
861 * cp - cpuset pointer, used (together with pos_css) to perform a
862 * top-down scan of all cpusets. For our purposes, rebuilding
863 * the schedulers sched domains, we can ignore !is_sched_load_
865 * csa - (for CpuSet Array) Array of pointers to all the cpusets
866 * that need to be load balanced, for convenient iterative
867 * access by the subsequent code that finds the best partition,
868 * i.e the set of domains (subsets) of CPUs such that the
869 * cpus_allowed of every cpuset marked is_sched_load_balance
870 * is a subset of one of these domains, while there are as
871 * many such domains as possible, each as small as possible.
872 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
873 * the kernel/sched/core.c routine partition_sched_domains() in a
874 * convenient format, that can be easily compared to the prior
875 * value to determine what partition elements (sched domains)
876 * were changed (added or removed.)
878 * Finding the best partition (set of domains):
879 * The triple nested loops below over i, j, k scan over the
880 * load balanced cpusets (using the array of cpuset pointers in
881 * csa[]) looking for pairs of cpusets that have overlapping
882 * cpus_allowed, but which don't have the same 'pn' partition
883 * number and gives them in the same partition number. It keeps
884 * looping on the 'restart' label until it can no longer find
887 * The union of the cpus_allowed masks from the set of
888 * all cpusets having the same 'pn' value then form the one
889 * element of the partition (one sched domain) to be passed to
890 * partition_sched_domains().
892 static int generate_sched_domains(cpumask_var_t **domains,
893 struct sched_domain_attr **attributes)
895 struct cpuset *cp; /* top-down scan of cpusets */
896 struct cpuset **csa; /* array of all cpuset ptrs */
897 int csn; /* how many cpuset ptrs in csa so far */
898 int i, j, k; /* indices for partition finding loops */
899 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
900 struct sched_domain_attr *dattr; /* attributes for custom domains */
901 int ndoms = 0; /* number of sched domains in result */
902 int nslot; /* next empty doms[] struct cpumask slot */
903 struct cgroup_subsys_state *pos_css;
904 bool root_load_balance = is_sched_load_balance(&top_cpuset);
910 /* Special case for the 99% of systems with one, full, sched domain */
911 if (root_load_balance && !top_cpuset.nr_subparts_cpus) {
913 doms = alloc_sched_domains(ndoms);
917 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
919 *dattr = SD_ATTR_INIT;
920 update_domain_attr_tree(dattr, &top_cpuset);
922 cpumask_and(doms[0], top_cpuset.effective_cpus,
923 housekeeping_cpumask(HK_TYPE_DOMAIN));
928 csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
934 if (root_load_balance)
935 csa[csn++] = &top_cpuset;
936 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
937 if (cp == &top_cpuset)
940 * Continue traversing beyond @cp iff @cp has some CPUs and
941 * isn't load balancing. The former is obvious. The
942 * latter: All child cpusets contain a subset of the
943 * parent's cpus, so just skip them, and then we call
944 * update_domain_attr_tree() to calc relax_domain_level of
945 * the corresponding sched domain.
947 * If root is load-balancing, we can skip @cp if it
948 * is a subset of the root's effective_cpus.
950 if (!cpumask_empty(cp->cpus_allowed) &&
951 !(is_sched_load_balance(cp) &&
952 cpumask_intersects(cp->cpus_allowed,
953 housekeeping_cpumask(HK_TYPE_DOMAIN))))
956 if (root_load_balance &&
957 cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
960 if (is_sched_load_balance(cp) &&
961 !cpumask_empty(cp->effective_cpus))
964 /* skip @cp's subtree if not a partition root */
965 if (!is_partition_valid(cp))
966 pos_css = css_rightmost_descendant(pos_css);
970 for (i = 0; i < csn; i++)
975 /* Find the best partition (set of sched domains) */
976 for (i = 0; i < csn; i++) {
977 struct cpuset *a = csa[i];
980 for (j = 0; j < csn; j++) {
981 struct cpuset *b = csa[j];
984 if (apn != bpn && cpusets_overlap(a, b)) {
985 for (k = 0; k < csn; k++) {
986 struct cpuset *c = csa[k];
991 ndoms--; /* one less element */
998 * Now we know how many domains to create.
999 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
1001 doms = alloc_sched_domains(ndoms);
1006 * The rest of the code, including the scheduler, can deal with
1007 * dattr==NULL case. No need to abort if alloc fails.
1009 dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
1012 for (nslot = 0, i = 0; i < csn; i++) {
1013 struct cpuset *a = csa[i];
1018 /* Skip completed partitions */
1024 if (nslot == ndoms) {
1025 static int warnings = 10;
1027 pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n",
1028 nslot, ndoms, csn, i, apn);
1036 *(dattr + nslot) = SD_ATTR_INIT;
1037 for (j = i; j < csn; j++) {
1038 struct cpuset *b = csa[j];
1041 cpumask_or(dp, dp, b->effective_cpus);
1042 cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN));
1044 update_domain_attr_tree(dattr + nslot, b);
1046 /* Done with this partition */
1052 BUG_ON(nslot != ndoms);
1058 * Fallback to the default domain if kmalloc() failed.
1059 * See comments in partition_sched_domains().
1065 *attributes = dattr;
1069 static void update_tasks_root_domain(struct cpuset *cs)
1071 struct css_task_iter it;
1072 struct task_struct *task;
1074 css_task_iter_start(&cs->css, 0, &it);
1076 while ((task = css_task_iter_next(&it)))
1077 dl_add_task_root_domain(task);
1079 css_task_iter_end(&it);
1082 static void rebuild_root_domains(void)
1084 struct cpuset *cs = NULL;
1085 struct cgroup_subsys_state *pos_css;
1087 percpu_rwsem_assert_held(&cpuset_rwsem);
1088 lockdep_assert_cpus_held();
1089 lockdep_assert_held(&sched_domains_mutex);
1094 * Clear default root domain DL accounting, it will be computed again
1095 * if a task belongs to it.
1097 dl_clear_root_domain(&def_root_domain);
1099 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1101 if (cpumask_empty(cs->effective_cpus)) {
1102 pos_css = css_rightmost_descendant(pos_css);
1110 update_tasks_root_domain(cs);
1119 partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1120 struct sched_domain_attr *dattr_new)
1122 mutex_lock(&sched_domains_mutex);
1123 partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
1124 rebuild_root_domains();
1125 mutex_unlock(&sched_domains_mutex);
1129 * Rebuild scheduler domains.
1131 * If the flag 'sched_load_balance' of any cpuset with non-empty
1132 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
1133 * which has that flag enabled, or if any cpuset with a non-empty
1134 * 'cpus' is removed, then call this routine to rebuild the
1135 * scheduler's dynamic sched domains.
1137 * Call with cpuset_rwsem held. Takes cpus_read_lock().
1139 static void rebuild_sched_domains_locked(void)
1141 struct cgroup_subsys_state *pos_css;
1142 struct sched_domain_attr *attr;
1143 cpumask_var_t *doms;
1147 lockdep_assert_cpus_held();
1148 percpu_rwsem_assert_held(&cpuset_rwsem);
1151 * If we have raced with CPU hotplug, return early to avoid
1152 * passing doms with offlined cpu to partition_sched_domains().
1153 * Anyways, cpuset_hotplug_workfn() will rebuild sched domains.
1155 * With no CPUs in any subpartitions, top_cpuset's effective CPUs
1156 * should be the same as the active CPUs, so checking only top_cpuset
1157 * is enough to detect racing CPU offlines.
1159 if (!top_cpuset.nr_subparts_cpus &&
1160 !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
1164 * With subpartition CPUs, however, the effective CPUs of a partition
1165 * root should be only a subset of the active CPUs. Since a CPU in any
1166 * partition root could be offlined, all must be checked.
1168 if (top_cpuset.nr_subparts_cpus) {
1170 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1171 if (!is_partition_valid(cs)) {
1172 pos_css = css_rightmost_descendant(pos_css);
1175 if (!cpumask_subset(cs->effective_cpus,
1184 /* Generate domain masks and attrs */
1185 ndoms = generate_sched_domains(&doms, &attr);
1187 /* Have scheduler rebuild the domains */
1188 partition_and_rebuild_sched_domains(ndoms, doms, attr);
1190 #else /* !CONFIG_SMP */
1191 static void rebuild_sched_domains_locked(void)
1194 #endif /* CONFIG_SMP */
1196 void rebuild_sched_domains(void)
1199 percpu_down_write(&cpuset_rwsem);
1200 rebuild_sched_domains_locked();
1201 percpu_up_write(&cpuset_rwsem);
1206 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1207 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1208 * @new_cpus: the temp variable for the new effective_cpus mask
1210 * Iterate through each task of @cs updating its cpus_allowed to the
1211 * effective cpuset's. As this function is called with cpuset_rwsem held,
1212 * cpuset membership stays stable. For top_cpuset, task_cpu_possible_mask()
1213 * is used instead of effective_cpus to make sure all offline CPUs are also
1214 * included as hotplug code won't update cpumasks for tasks in top_cpuset.
1216 static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
1218 struct css_task_iter it;
1219 struct task_struct *task;
1220 bool top_cs = cs == &top_cpuset;
1222 css_task_iter_start(&cs->css, 0, &it);
1223 while ((task = css_task_iter_next(&it))) {
1224 const struct cpumask *possible_mask = task_cpu_possible_mask(task);
1228 * Percpu kthreads in top_cpuset are ignored
1230 if ((task->flags & PF_KTHREAD) && kthread_is_per_cpu(task))
1232 cpumask_andnot(new_cpus, possible_mask, cs->subparts_cpus);
1234 cpumask_and(new_cpus, possible_mask, cs->effective_cpus);
1236 set_cpus_allowed_ptr(task, new_cpus);
1238 css_task_iter_end(&it);
1242 * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1243 * @new_cpus: the temp variable for the new effective_cpus mask
1244 * @cs: the cpuset the need to recompute the new effective_cpus mask
1245 * @parent: the parent cpuset
1247 * If the parent has subpartition CPUs, include them in the list of
1248 * allowable CPUs in computing the new effective_cpus mask. Since offlined
1249 * CPUs are not removed from subparts_cpus, we have to use cpu_active_mask
1250 * to mask those out.
1252 static void compute_effective_cpumask(struct cpumask *new_cpus,
1253 struct cpuset *cs, struct cpuset *parent)
1255 if (parent->nr_subparts_cpus) {
1256 cpumask_or(new_cpus, parent->effective_cpus,
1257 parent->subparts_cpus);
1258 cpumask_and(new_cpus, new_cpus, cs->cpus_allowed);
1259 cpumask_and(new_cpus, new_cpus, cpu_active_mask);
1261 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
1266 * Commands for update_parent_subparts_cpumask
1269 partcmd_enable, /* Enable partition root */
1270 partcmd_disable, /* Disable partition root */
1271 partcmd_update, /* Update parent's subparts_cpus */
1272 partcmd_invalidate, /* Make partition invalid */
1275 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1278 * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset
1279 * @cs: The cpuset that requests change in partition root state
1280 * @cmd: Partition root state change command
1281 * @newmask: Optional new cpumask for partcmd_update
1282 * @tmp: Temporary addmask and delmask
1283 * Return: 0 or a partition root state error code
1285 * For partcmd_enable, the cpuset is being transformed from a non-partition
1286 * root to a partition root. The cpus_allowed mask of the given cpuset will
1287 * be put into parent's subparts_cpus and taken away from parent's
1288 * effective_cpus. The function will return 0 if all the CPUs listed in
1289 * cpus_allowed can be granted or an error code will be returned.
1291 * For partcmd_disable, the cpuset is being transformed from a partition
1292 * root back to a non-partition root. Any CPUs in cpus_allowed that are in
1293 * parent's subparts_cpus will be taken away from that cpumask and put back
1294 * into parent's effective_cpus. 0 will always be returned.
1296 * For partcmd_update, if the optional newmask is specified, the cpu list is
1297 * to be changed from cpus_allowed to newmask. Otherwise, cpus_allowed is
1298 * assumed to remain the same. The cpuset should either be a valid or invalid
1299 * partition root. The partition root state may change from valid to invalid
1300 * or vice versa. An error code will only be returned if transitioning from
1301 * invalid to valid violates the exclusivity rule.
1303 * For partcmd_invalidate, the current partition will be made invalid.
1305 * The partcmd_enable and partcmd_disable commands are used by
1306 * update_prstate(). An error code may be returned and the caller will check
1309 * The partcmd_update command is used by update_cpumasks_hier() with newmask
1310 * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
1311 * by update_cpumask() with NULL newmask. In both cases, the callers won't
1312 * check for error and so partition_root_state and prs_error will be updated
1315 static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
1316 struct cpumask *newmask,
1317 struct tmpmasks *tmp)
1319 struct cpuset *parent = parent_cs(cs);
1320 int adding; /* Moving cpus from effective_cpus to subparts_cpus */
1321 int deleting; /* Moving cpus from subparts_cpus to effective_cpus */
1322 int old_prs, new_prs;
1323 int part_error = PERR_NONE; /* Partition error? */
1325 percpu_rwsem_assert_held(&cpuset_rwsem);
1328 * The parent must be a partition root.
1329 * The new cpumask, if present, or the current cpus_allowed must
1332 if (!is_partition_valid(parent)) {
1333 return is_partition_invalid(parent)
1334 ? PERR_INVPARENT : PERR_NOTPART;
1336 if ((newmask && cpumask_empty(newmask)) ||
1337 (!newmask && cpumask_empty(cs->cpus_allowed)))
1338 return PERR_CPUSEMPTY;
1341 * new_prs will only be changed for the partcmd_update and
1342 * partcmd_invalidate commands.
1344 adding = deleting = false;
1345 old_prs = new_prs = cs->partition_root_state;
1346 if (cmd == partcmd_enable) {
1348 * Enabling partition root is not allowed if cpus_allowed
1349 * doesn't overlap parent's cpus_allowed.
1351 if (!cpumask_intersects(cs->cpus_allowed, parent->cpus_allowed))
1352 return PERR_INVCPUS;
1355 * A parent can be left with no CPU as long as there is no
1356 * task directly associated with the parent partition.
1358 if (cpumask_subset(parent->effective_cpus, cs->cpus_allowed) &&
1359 partition_is_populated(parent, cs))
1362 cpumask_copy(tmp->addmask, cs->cpus_allowed);
1364 } else if (cmd == partcmd_disable) {
1366 * Need to remove cpus from parent's subparts_cpus for valid
1369 deleting = !is_prs_invalid(old_prs) &&
1370 cpumask_and(tmp->delmask, cs->cpus_allowed,
1371 parent->subparts_cpus);
1372 } else if (cmd == partcmd_invalidate) {
1373 if (is_prs_invalid(old_prs))
1377 * Make the current partition invalid. It is assumed that
1378 * invalidation is caused by violating cpu exclusivity rule.
1380 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed,
1381 parent->subparts_cpus);
1384 part_error = PERR_NOTEXCL;
1386 } else if (newmask) {
1388 * partcmd_update with newmask:
1390 * Compute add/delete mask to/from subparts_cpus
1392 * delmask = cpus_allowed & ~newmask & parent->subparts_cpus
1393 * addmask = newmask & parent->cpus_allowed
1394 * & ~parent->subparts_cpus
1396 cpumask_andnot(tmp->delmask, cs->cpus_allowed, newmask);
1397 deleting = cpumask_and(tmp->delmask, tmp->delmask,
1398 parent->subparts_cpus);
1400 cpumask_and(tmp->addmask, newmask, parent->cpus_allowed);
1401 adding = cpumask_andnot(tmp->addmask, tmp->addmask,
1402 parent->subparts_cpus);
1404 * Make partition invalid if parent's effective_cpus could
1405 * become empty and there are tasks in the parent.
1408 cpumask_subset(parent->effective_cpus, tmp->addmask) &&
1409 !cpumask_intersects(tmp->delmask, cpu_active_mask) &&
1410 partition_is_populated(parent, cs)) {
1411 part_error = PERR_NOCPUS;
1413 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed,
1414 parent->subparts_cpus);
1418 * partcmd_update w/o newmask:
1420 * delmask = cpus_allowed & parent->subparts_cpus
1421 * addmask = cpus_allowed & parent->cpus_allowed
1422 * & ~parent->subparts_cpus
1424 * This gets invoked either due to a hotplug event or from
1425 * update_cpumasks_hier(). This can cause the state of a
1426 * partition root to transition from valid to invalid or vice
1427 * versa. So we still need to compute the addmask and delmask.
1429 * A partition error happens when:
1430 * 1) Cpuset is valid partition, but parent does not distribute
1432 * 2) Parent has tasks and all its effective CPUs will have
1433 * to be distributed out.
1435 cpumask_and(tmp->addmask, cs->cpus_allowed,
1436 parent->cpus_allowed);
1437 adding = cpumask_andnot(tmp->addmask, tmp->addmask,
1438 parent->subparts_cpus);
1440 if ((is_partition_valid(cs) && !parent->nr_subparts_cpus) ||
1442 cpumask_subset(parent->effective_cpus, tmp->addmask) &&
1443 partition_is_populated(parent, cs))) {
1444 part_error = PERR_NOCPUS;
1448 if (part_error && is_partition_valid(cs) &&
1449 parent->nr_subparts_cpus)
1450 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed,
1451 parent->subparts_cpus);
1454 WRITE_ONCE(cs->prs_err, part_error);
1456 if (cmd == partcmd_update) {
1458 * Check for possible transition between valid and invalid
1461 switch (cs->partition_root_state) {
1467 case PRS_INVALID_ROOT:
1468 case PRS_INVALID_ISOLATED:
1475 if (!adding && !deleting && (new_prs == old_prs))
1479 * Transitioning between invalid to valid or vice versa may require
1480 * changing CS_CPU_EXCLUSIVE and CS_SCHED_LOAD_BALANCE.
1482 if (old_prs != new_prs) {
1483 if (is_prs_invalid(old_prs) && !is_cpu_exclusive(cs) &&
1484 (update_flag(CS_CPU_EXCLUSIVE, cs, 1) < 0))
1485 return PERR_NOTEXCL;
1486 if (is_prs_invalid(new_prs) && is_cpu_exclusive(cs))
1487 update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1491 * Change the parent's subparts_cpus.
1492 * Newly added CPUs will be removed from effective_cpus and
1493 * newly deleted ones will be added back to effective_cpus.
1495 spin_lock_irq(&callback_lock);
1497 cpumask_or(parent->subparts_cpus,
1498 parent->subparts_cpus, tmp->addmask);
1499 cpumask_andnot(parent->effective_cpus,
1500 parent->effective_cpus, tmp->addmask);
1503 cpumask_andnot(parent->subparts_cpus,
1504 parent->subparts_cpus, tmp->delmask);
1506 * Some of the CPUs in subparts_cpus might have been offlined.
1508 cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask);
1509 cpumask_or(parent->effective_cpus,
1510 parent->effective_cpus, tmp->delmask);
1513 parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus);
1515 if (old_prs != new_prs)
1516 cs->partition_root_state = new_prs;
1518 spin_unlock_irq(&callback_lock);
1520 if (adding || deleting)
1521 update_tasks_cpumask(parent, tmp->addmask);
1524 * Set or clear CS_SCHED_LOAD_BALANCE when partcmd_update, if necessary.
1525 * rebuild_sched_domains_locked() may be called.
1527 if (old_prs != new_prs) {
1528 if (old_prs == PRS_ISOLATED)
1529 update_flag(CS_SCHED_LOAD_BALANCE, cs, 1);
1530 else if (new_prs == PRS_ISOLATED)
1531 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
1533 notify_partition_change(cs, old_prs);
1538 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
1539 * @cs: the cpuset to consider
1540 * @tmp: temp variables for calculating effective_cpus & partition setup
1541 * @force: don't skip any descendant cpusets if set
1543 * When configured cpumask is changed, the effective cpumasks of this cpuset
1544 * and all its descendants need to be updated.
1546 * On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
1548 * Called with cpuset_rwsem held
1550 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
1554 struct cgroup_subsys_state *pos_css;
1555 bool need_rebuild_sched_domains = false;
1556 int old_prs, new_prs;
1559 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1560 struct cpuset *parent = parent_cs(cp);
1561 bool update_parent = false;
1563 compute_effective_cpumask(tmp->new_cpus, cp, parent);
1566 * If it becomes empty, inherit the effective mask of the
1567 * parent, which is guaranteed to have some CPUs unless
1568 * it is a partition root that has explicitly distributed
1571 if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) {
1572 if (is_partition_valid(cp) &&
1573 cpumask_equal(cp->cpus_allowed, cp->subparts_cpus))
1574 goto update_parent_subparts;
1576 cpumask_copy(tmp->new_cpus, parent->effective_cpus);
1577 if (!cp->use_parent_ecpus) {
1578 cp->use_parent_ecpus = true;
1579 parent->child_ecpus_count++;
1581 } else if (cp->use_parent_ecpus) {
1582 cp->use_parent_ecpus = false;
1583 WARN_ON_ONCE(!parent->child_ecpus_count);
1584 parent->child_ecpus_count--;
1588 * Skip the whole subtree if the cpumask remains the same
1589 * and has no partition root state and force flag not set.
1591 if (!cp->partition_root_state && !force &&
1592 cpumask_equal(tmp->new_cpus, cp->effective_cpus)) {
1593 pos_css = css_rightmost_descendant(pos_css);
1597 update_parent_subparts:
1599 * update_parent_subparts_cpumask() should have been called
1600 * for cs already in update_cpumask(). We should also call
1601 * update_tasks_cpumask() again for tasks in the parent
1602 * cpuset if the parent's subparts_cpus changes.
1604 old_prs = new_prs = cp->partition_root_state;
1605 if ((cp != cs) && old_prs) {
1606 switch (parent->partition_root_state) {
1609 update_parent = true;
1614 * When parent is not a partition root or is
1615 * invalid, child partition roots become
1618 if (is_partition_valid(cp))
1619 new_prs = -cp->partition_root_state;
1620 WRITE_ONCE(cp->prs_err,
1621 is_partition_invalid(parent)
1622 ? PERR_INVPARENT : PERR_NOTPART);
1627 if (!css_tryget_online(&cp->css))
1631 if (update_parent) {
1632 update_parent_subparts_cpumask(cp, partcmd_update, NULL,
1635 * The cpuset partition_root_state may become
1636 * invalid. Capture it.
1638 new_prs = cp->partition_root_state;
1641 spin_lock_irq(&callback_lock);
1643 if (cp->nr_subparts_cpus && !is_partition_valid(cp)) {
1645 * Put all active subparts_cpus back to effective_cpus.
1647 cpumask_or(tmp->new_cpus, tmp->new_cpus,
1649 cpumask_and(tmp->new_cpus, tmp->new_cpus,
1651 cp->nr_subparts_cpus = 0;
1652 cpumask_clear(cp->subparts_cpus);
1655 cpumask_copy(cp->effective_cpus, tmp->new_cpus);
1656 if (cp->nr_subparts_cpus) {
1658 * Make sure that effective_cpus & subparts_cpus
1659 * are mutually exclusive.
1661 cpumask_andnot(cp->effective_cpus, cp->effective_cpus,
1665 cp->partition_root_state = new_prs;
1666 spin_unlock_irq(&callback_lock);
1668 notify_partition_change(cp, old_prs);
1670 WARN_ON(!is_in_v2_mode() &&
1671 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
1673 update_tasks_cpumask(cp, tmp->new_cpus);
1676 * On legacy hierarchy, if the effective cpumask of any non-
1677 * empty cpuset is changed, we need to rebuild sched domains.
1678 * On default hierarchy, the cpuset needs to be a partition
1681 if (!cpumask_empty(cp->cpus_allowed) &&
1682 is_sched_load_balance(cp) &&
1683 (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
1684 is_partition_valid(cp)))
1685 need_rebuild_sched_domains = true;
1692 if (need_rebuild_sched_domains)
1693 rebuild_sched_domains_locked();
1697 * update_sibling_cpumasks - Update siblings cpumasks
1698 * @parent: Parent cpuset
1699 * @cs: Current cpuset
1700 * @tmp: Temp variables
1702 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1703 struct tmpmasks *tmp)
1705 struct cpuset *sibling;
1706 struct cgroup_subsys_state *pos_css;
1708 percpu_rwsem_assert_held(&cpuset_rwsem);
1711 * Check all its siblings and call update_cpumasks_hier()
1712 * if their use_parent_ecpus flag is set in order for them
1713 * to use the right effective_cpus value.
1715 * The update_cpumasks_hier() function may sleep. So we have to
1716 * release the RCU read lock before calling it.
1719 cpuset_for_each_child(sibling, pos_css, parent) {
1722 if (!sibling->use_parent_ecpus)
1724 if (!css_tryget_online(&sibling->css))
1728 update_cpumasks_hier(sibling, tmp, false);
1730 css_put(&sibling->css);
1736 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
1737 * @cs: the cpuset to consider
1738 * @trialcs: trial cpuset
1739 * @buf: buffer of cpu numbers written to this cpuset
1741 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
1745 struct tmpmasks tmp;
1746 bool invalidate = false;
1748 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
1749 if (cs == &top_cpuset)
1753 * An empty cpus_allowed is ok only if the cpuset has no tasks.
1754 * Since cpulist_parse() fails on an empty mask, we special case
1755 * that parsing. The validate_change() call ensures that cpusets
1756 * with tasks have cpus.
1759 cpumask_clear(trialcs->cpus_allowed);
1761 retval = cpulist_parse(buf, trialcs->cpus_allowed);
1765 if (!cpumask_subset(trialcs->cpus_allowed,
1766 top_cpuset.cpus_allowed))
1770 /* Nothing to do if the cpus didn't change */
1771 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
1774 #ifdef CONFIG_CPUMASK_OFFSTACK
1776 * Use the cpumasks in trialcs for tmpmasks when they are pointers
1777 * to allocated cpumasks.
1779 * Note that update_parent_subparts_cpumask() uses only addmask &
1780 * delmask, but not new_cpus.
1782 tmp.addmask = trialcs->subparts_cpus;
1783 tmp.delmask = trialcs->effective_cpus;
1784 tmp.new_cpus = NULL;
1787 retval = validate_change(cs, trialcs);
1789 if ((retval == -EINVAL) && cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
1790 struct cpuset *cp, *parent;
1791 struct cgroup_subsys_state *css;
1794 * The -EINVAL error code indicates that partition sibling
1795 * CPU exclusivity rule has been violated. We still allow
1796 * the cpumask change to proceed while invalidating the
1797 * partition. However, any conflicting sibling partitions
1798 * have to be marked as invalid too.
1802 parent = parent_cs(cs);
1803 cpuset_for_each_child(cp, css, parent)
1804 if (is_partition_valid(cp) &&
1805 cpumask_intersects(trialcs->cpus_allowed, cp->cpus_allowed)) {
1807 update_parent_subparts_cpumask(cp, partcmd_invalidate, NULL, &tmp);
1816 if (cs->partition_root_state) {
1818 update_parent_subparts_cpumask(cs, partcmd_invalidate,
1821 update_parent_subparts_cpumask(cs, partcmd_update,
1822 trialcs->cpus_allowed, &tmp);
1825 compute_effective_cpumask(trialcs->effective_cpus, trialcs,
1827 spin_lock_irq(&callback_lock);
1828 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
1831 * Make sure that subparts_cpus, if not empty, is a subset of
1832 * cpus_allowed. Clear subparts_cpus if partition not valid or
1833 * empty effective cpus with tasks.
1835 if (cs->nr_subparts_cpus) {
1836 if (!is_partition_valid(cs) ||
1837 (cpumask_subset(trialcs->effective_cpus, cs->subparts_cpus) &&
1838 partition_is_populated(cs, NULL))) {
1839 cs->nr_subparts_cpus = 0;
1840 cpumask_clear(cs->subparts_cpus);
1842 cpumask_and(cs->subparts_cpus, cs->subparts_cpus,
1844 cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
1847 spin_unlock_irq(&callback_lock);
1849 #ifdef CONFIG_CPUMASK_OFFSTACK
1850 /* Now trialcs->cpus_allowed is available */
1851 tmp.new_cpus = trialcs->cpus_allowed;
1854 /* effective_cpus will be updated here */
1855 update_cpumasks_hier(cs, &tmp, false);
1857 if (cs->partition_root_state) {
1858 struct cpuset *parent = parent_cs(cs);
1861 * For partition root, update the cpumasks of sibling
1862 * cpusets if they use parent's effective_cpus.
1864 if (parent->child_ecpus_count)
1865 update_sibling_cpumasks(parent, cs, &tmp);
1871 * Migrate memory region from one set of nodes to another. This is
1872 * performed asynchronously as it can be called from process migration path
1873 * holding locks involved in process management. All mm migrations are
1874 * performed in the queued order and can be waited for by flushing
1875 * cpuset_migrate_mm_wq.
1878 struct cpuset_migrate_mm_work {
1879 struct work_struct work;
1880 struct mm_struct *mm;
1885 static void cpuset_migrate_mm_workfn(struct work_struct *work)
1887 struct cpuset_migrate_mm_work *mwork =
1888 container_of(work, struct cpuset_migrate_mm_work, work);
1890 /* on a wq worker, no need to worry about %current's mems_allowed */
1891 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
1896 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
1897 const nodemask_t *to)
1899 struct cpuset_migrate_mm_work *mwork;
1901 if (nodes_equal(*from, *to)) {
1906 mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
1909 mwork->from = *from;
1911 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
1912 queue_work(cpuset_migrate_mm_wq, &mwork->work);
1918 static void cpuset_post_attach(void)
1920 flush_workqueue(cpuset_migrate_mm_wq);
1924 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
1925 * @tsk: the task to change
1926 * @newmems: new nodes that the task will be set
1928 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
1929 * and rebind an eventual tasks' mempolicy. If the task is allocating in
1930 * parallel, it might temporarily see an empty intersection, which results in
1931 * a seqlock check and retry before OOM or allocation failure.
1933 static void cpuset_change_task_nodemask(struct task_struct *tsk,
1934 nodemask_t *newmems)
1938 local_irq_disable();
1939 write_seqcount_begin(&tsk->mems_allowed_seq);
1941 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
1942 mpol_rebind_task(tsk, newmems);
1943 tsk->mems_allowed = *newmems;
1945 write_seqcount_end(&tsk->mems_allowed_seq);
1951 static void *cpuset_being_rebound;
1954 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1955 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1957 * Iterate through each task of @cs updating its mems_allowed to the
1958 * effective cpuset's. As this function is called with cpuset_rwsem held,
1959 * cpuset membership stays stable.
1961 static void update_tasks_nodemask(struct cpuset *cs)
1963 static nodemask_t newmems; /* protected by cpuset_rwsem */
1964 struct css_task_iter it;
1965 struct task_struct *task;
1967 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
1969 guarantee_online_mems(cs, &newmems);
1972 * The mpol_rebind_mm() call takes mmap_lock, which we couldn't
1973 * take while holding tasklist_lock. Forks can happen - the
1974 * mpol_dup() cpuset_being_rebound check will catch such forks,
1975 * and rebind their vma mempolicies too. Because we still hold
1976 * the global cpuset_rwsem, we know that no other rebind effort
1977 * will be contending for the global variable cpuset_being_rebound.
1978 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1979 * is idempotent. Also migrate pages in each mm to new nodes.
1981 css_task_iter_start(&cs->css, 0, &it);
1982 while ((task = css_task_iter_next(&it))) {
1983 struct mm_struct *mm;
1986 cpuset_change_task_nodemask(task, &newmems);
1988 mm = get_task_mm(task);
1992 migrate = is_memory_migrate(cs);
1994 mpol_rebind_mm(mm, &cs->mems_allowed);
1996 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
2000 css_task_iter_end(&it);
2003 * All the tasks' nodemasks have been updated, update
2004 * cs->old_mems_allowed.
2006 cs->old_mems_allowed = newmems;
2008 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
2009 cpuset_being_rebound = NULL;
2013 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
2014 * @cs: the cpuset to consider
2015 * @new_mems: a temp variable for calculating new effective_mems
2017 * When configured nodemask is changed, the effective nodemasks of this cpuset
2018 * and all its descendants need to be updated.
2020 * On legacy hierarchy, effective_mems will be the same with mems_allowed.
2022 * Called with cpuset_rwsem held
2024 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
2027 struct cgroup_subsys_state *pos_css;
2030 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2031 struct cpuset *parent = parent_cs(cp);
2033 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
2036 * If it becomes empty, inherit the effective mask of the
2037 * parent, which is guaranteed to have some MEMs.
2039 if (is_in_v2_mode() && nodes_empty(*new_mems))
2040 *new_mems = parent->effective_mems;
2042 /* Skip the whole subtree if the nodemask remains the same. */
2043 if (nodes_equal(*new_mems, cp->effective_mems)) {
2044 pos_css = css_rightmost_descendant(pos_css);
2048 if (!css_tryget_online(&cp->css))
2052 spin_lock_irq(&callback_lock);
2053 cp->effective_mems = *new_mems;
2054 spin_unlock_irq(&callback_lock);
2056 WARN_ON(!is_in_v2_mode() &&
2057 !nodes_equal(cp->mems_allowed, cp->effective_mems));
2059 update_tasks_nodemask(cp);
2068 * Handle user request to change the 'mems' memory placement
2069 * of a cpuset. Needs to validate the request, update the
2070 * cpusets mems_allowed, and for each task in the cpuset,
2071 * update mems_allowed and rebind task's mempolicy and any vma
2072 * mempolicies and if the cpuset is marked 'memory_migrate',
2073 * migrate the tasks pages to the new memory.
2075 * Call with cpuset_rwsem held. May take callback_lock during call.
2076 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2077 * lock each such tasks mm->mmap_lock, scan its vma's and rebind
2078 * their mempolicies to the cpusets new mems_allowed.
2080 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
2086 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
2089 if (cs == &top_cpuset) {
2095 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
2096 * Since nodelist_parse() fails on an empty mask, we special case
2097 * that parsing. The validate_change() call ensures that cpusets
2098 * with tasks have memory.
2101 nodes_clear(trialcs->mems_allowed);
2103 retval = nodelist_parse(buf, trialcs->mems_allowed);
2107 if (!nodes_subset(trialcs->mems_allowed,
2108 top_cpuset.mems_allowed)) {
2114 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
2115 retval = 0; /* Too easy - nothing to do */
2118 retval = validate_change(cs, trialcs);
2122 check_insane_mems_config(&trialcs->mems_allowed);
2124 spin_lock_irq(&callback_lock);
2125 cs->mems_allowed = trialcs->mems_allowed;
2126 spin_unlock_irq(&callback_lock);
2128 /* use trialcs->mems_allowed as a temp variable */
2129 update_nodemasks_hier(cs, &trialcs->mems_allowed);
2134 bool current_cpuset_is_being_rebound(void)
2139 ret = task_cs(current) == cpuset_being_rebound;
2145 static int update_relax_domain_level(struct cpuset *cs, s64 val)
2148 if (val < -1 || val >= sched_domain_level_max)
2152 if (val != cs->relax_domain_level) {
2153 cs->relax_domain_level = val;
2154 if (!cpumask_empty(cs->cpus_allowed) &&
2155 is_sched_load_balance(cs))
2156 rebuild_sched_domains_locked();
2163 * update_tasks_flags - update the spread flags of tasks in the cpuset.
2164 * @cs: the cpuset in which each task's spread flags needs to be changed
2166 * Iterate through each task of @cs updating its spread flags. As this
2167 * function is called with cpuset_rwsem held, cpuset membership stays
2170 static void update_tasks_flags(struct cpuset *cs)
2172 struct css_task_iter it;
2173 struct task_struct *task;
2175 css_task_iter_start(&cs->css, 0, &it);
2176 while ((task = css_task_iter_next(&it)))
2177 cpuset_update_task_spread_flags(cs, task);
2178 css_task_iter_end(&it);
2182 * update_flag - read a 0 or a 1 in a file and update associated flag
2183 * bit: the bit to update (see cpuset_flagbits_t)
2184 * cs: the cpuset to update
2185 * turning_on: whether the flag is being set or cleared
2187 * Call with cpuset_rwsem held.
2190 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
2193 struct cpuset *trialcs;
2194 int balance_flag_changed;
2195 int spread_flag_changed;
2198 trialcs = alloc_trial_cpuset(cs);
2203 set_bit(bit, &trialcs->flags);
2205 clear_bit(bit, &trialcs->flags);
2207 err = validate_change(cs, trialcs);
2211 balance_flag_changed = (is_sched_load_balance(cs) !=
2212 is_sched_load_balance(trialcs));
2214 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
2215 || (is_spread_page(cs) != is_spread_page(trialcs)));
2217 spin_lock_irq(&callback_lock);
2218 cs->flags = trialcs->flags;
2219 spin_unlock_irq(&callback_lock);
2221 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
2222 rebuild_sched_domains_locked();
2224 if (spread_flag_changed)
2225 update_tasks_flags(cs);
2227 free_cpuset(trialcs);
2232 * update_prstate - update partition_root_state
2233 * @cs: the cpuset to update
2234 * @new_prs: new partition root state
2235 * Return: 0 if successful, != 0 if error
2237 * Call with cpuset_rwsem held.
2239 static int update_prstate(struct cpuset *cs, int new_prs)
2241 int err = PERR_NONE, old_prs = cs->partition_root_state;
2242 bool sched_domain_rebuilt = false;
2243 struct cpuset *parent = parent_cs(cs);
2244 struct tmpmasks tmpmask;
2246 if (old_prs == new_prs)
2250 * For a previously invalid partition root, leave it at being
2251 * invalid if new_prs is not "member".
2253 if (new_prs && is_prs_invalid(old_prs)) {
2254 cs->partition_root_state = -new_prs;
2258 if (alloc_cpumasks(NULL, &tmpmask))
2263 * Turning on partition root requires setting the
2264 * CS_CPU_EXCLUSIVE bit implicitly as well and cpus_allowed
2267 if (cpumask_empty(cs->cpus_allowed)) {
2268 err = PERR_CPUSEMPTY;
2272 err = update_flag(CS_CPU_EXCLUSIVE, cs, 1);
2278 err = update_parent_subparts_cpumask(cs, partcmd_enable,
2281 update_flag(CS_CPU_EXCLUSIVE, cs, 0);
2285 if (new_prs == PRS_ISOLATED) {
2287 * Disable the load balance flag should not return an
2288 * error unless the system is running out of memory.
2290 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
2291 sched_domain_rebuilt = true;
2293 } else if (old_prs && new_prs) {
2295 * A change in load balance state only, no change in cpumasks.
2297 update_flag(CS_SCHED_LOAD_BALANCE, cs, (new_prs != PRS_ISOLATED));
2298 sched_domain_rebuilt = true;
2299 goto out; /* Sched domain is rebuilt in update_flag() */
2302 * Switching back to member is always allowed even if it
2303 * disables child partitions.
2305 update_parent_subparts_cpumask(cs, partcmd_disable, NULL,
2309 * If there are child partitions, they will all become invalid.
2311 if (unlikely(cs->nr_subparts_cpus)) {
2312 spin_lock_irq(&callback_lock);
2313 cs->nr_subparts_cpus = 0;
2314 cpumask_clear(cs->subparts_cpus);
2315 compute_effective_cpumask(cs->effective_cpus, cs, parent);
2316 spin_unlock_irq(&callback_lock);
2319 /* Turning off CS_CPU_EXCLUSIVE will not return error */
2320 update_flag(CS_CPU_EXCLUSIVE, cs, 0);
2322 if (!is_sched_load_balance(cs)) {
2323 /* Make sure load balance is on */
2324 update_flag(CS_SCHED_LOAD_BALANCE, cs, 1);
2325 sched_domain_rebuilt = true;
2329 update_tasks_cpumask(parent, tmpmask.new_cpus);
2331 if (parent->child_ecpus_count)
2332 update_sibling_cpumasks(parent, cs, &tmpmask);
2334 if (!sched_domain_rebuilt)
2335 rebuild_sched_domains_locked();
2338 * Make partition invalid if an error happen
2342 spin_lock_irq(&callback_lock);
2343 cs->partition_root_state = new_prs;
2344 WRITE_ONCE(cs->prs_err, err);
2345 spin_unlock_irq(&callback_lock);
2347 * Update child cpusets, if present.
2348 * Force update if switching back to member.
2350 if (!list_empty(&cs->css.children))
2351 update_cpumasks_hier(cs, &tmpmask, !new_prs);
2353 notify_partition_change(cs, old_prs);
2354 free_cpumasks(NULL, &tmpmask);
2359 * Frequency meter - How fast is some event occurring?
2361 * These routines manage a digitally filtered, constant time based,
2362 * event frequency meter. There are four routines:
2363 * fmeter_init() - initialize a frequency meter.
2364 * fmeter_markevent() - called each time the event happens.
2365 * fmeter_getrate() - returns the recent rate of such events.
2366 * fmeter_update() - internal routine used to update fmeter.
2368 * A common data structure is passed to each of these routines,
2369 * which is used to keep track of the state required to manage the
2370 * frequency meter and its digital filter.
2372 * The filter works on the number of events marked per unit time.
2373 * The filter is single-pole low-pass recursive (IIR). The time unit
2374 * is 1 second. Arithmetic is done using 32-bit integers scaled to
2375 * simulate 3 decimal digits of precision (multiplied by 1000).
2377 * With an FM_COEF of 933, and a time base of 1 second, the filter
2378 * has a half-life of 10 seconds, meaning that if the events quit
2379 * happening, then the rate returned from the fmeter_getrate()
2380 * will be cut in half each 10 seconds, until it converges to zero.
2382 * It is not worth doing a real infinitely recursive filter. If more
2383 * than FM_MAXTICKS ticks have elapsed since the last filter event,
2384 * just compute FM_MAXTICKS ticks worth, by which point the level
2387 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
2388 * arithmetic overflow in the fmeter_update() routine.
2390 * Given the simple 32 bit integer arithmetic used, this meter works
2391 * best for reporting rates between one per millisecond (msec) and
2392 * one per 32 (approx) seconds. At constant rates faster than one
2393 * per msec it maxes out at values just under 1,000,000. At constant
2394 * rates between one per msec, and one per second it will stabilize
2395 * to a value N*1000, where N is the rate of events per second.
2396 * At constant rates between one per second and one per 32 seconds,
2397 * it will be choppy, moving up on the seconds that have an event,
2398 * and then decaying until the next event. At rates slower than
2399 * about one in 32 seconds, it decays all the way back to zero between
2403 #define FM_COEF 933 /* coefficient for half-life of 10 secs */
2404 #define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */
2405 #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */
2406 #define FM_SCALE 1000 /* faux fixed point scale */
2408 /* Initialize a frequency meter */
2409 static void fmeter_init(struct fmeter *fmp)
2414 spin_lock_init(&fmp->lock);
2417 /* Internal meter update - process cnt events and update value */
2418 static void fmeter_update(struct fmeter *fmp)
2423 now = ktime_get_seconds();
2424 ticks = now - fmp->time;
2429 ticks = min(FM_MAXTICKS, ticks);
2431 fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
2434 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
2438 /* Process any previous ticks, then bump cnt by one (times scale). */
2439 static void fmeter_markevent(struct fmeter *fmp)
2441 spin_lock(&fmp->lock);
2443 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
2444 spin_unlock(&fmp->lock);
2447 /* Process any previous ticks, then return current value. */
2448 static int fmeter_getrate(struct fmeter *fmp)
2452 spin_lock(&fmp->lock);
2455 spin_unlock(&fmp->lock);
2459 static struct cpuset *cpuset_attach_old_cs;
2462 * Check to see if a cpuset can accept a new task
2463 * For v1, cpus_allowed and mems_allowed can't be empty.
2464 * For v2, effective_cpus can't be empty.
2465 * Note that in v1, effective_cpus = cpus_allowed.
2467 static int cpuset_can_attach_check(struct cpuset *cs)
2469 if (cpumask_empty(cs->effective_cpus) ||
2470 (!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
2475 /* Called by cgroups to determine if a cpuset is usable; cpuset_rwsem held */
2476 static int cpuset_can_attach(struct cgroup_taskset *tset)
2478 struct cgroup_subsys_state *css;
2480 struct task_struct *task;
2483 /* used later by cpuset_attach() */
2484 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
2487 percpu_down_write(&cpuset_rwsem);
2489 /* Check to see if task is allowed in the cpuset */
2490 ret = cpuset_can_attach_check(cs);
2494 cgroup_taskset_for_each(task, css, tset) {
2495 ret = task_can_attach(task, cs->effective_cpus);
2498 ret = security_task_setscheduler(task);
2504 * Mark attach is in progress. This makes validate_change() fail
2505 * changes which zero cpus/mems_allowed.
2507 cs->attach_in_progress++;
2509 percpu_up_write(&cpuset_rwsem);
2513 static void cpuset_cancel_attach(struct cgroup_taskset *tset)
2515 struct cgroup_subsys_state *css;
2518 cgroup_taskset_first(tset, &css);
2521 percpu_down_write(&cpuset_rwsem);
2522 cs->attach_in_progress--;
2523 if (!cs->attach_in_progress)
2524 wake_up(&cpuset_attach_wq);
2525 percpu_up_write(&cpuset_rwsem);
2529 * Protected by cpuset_rwsem. cpus_attach is used only by cpuset_attach_task()
2530 * but we can't allocate it dynamically there. Define it global and
2531 * allocate from cpuset_init().
2533 static cpumask_var_t cpus_attach;
2534 static nodemask_t cpuset_attach_nodemask_to;
2536 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
2538 percpu_rwsem_assert_held(&cpuset_rwsem);
2540 if (cs != &top_cpuset)
2541 guarantee_online_cpus(task, cpus_attach);
2543 cpumask_andnot(cpus_attach, task_cpu_possible_mask(task),
2546 * can_attach beforehand should guarantee that this doesn't
2547 * fail. TODO: have a better way to handle failure here
2549 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
2551 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
2552 cpuset_update_task_spread_flags(cs, task);
2555 static void cpuset_attach(struct cgroup_taskset *tset)
2557 struct task_struct *task;
2558 struct task_struct *leader;
2559 struct cgroup_subsys_state *css;
2561 struct cpuset *oldcs = cpuset_attach_old_cs;
2562 bool cpus_updated, mems_updated;
2564 cgroup_taskset_first(tset, &css);
2567 lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
2568 percpu_down_write(&cpuset_rwsem);
2569 cpus_updated = !cpumask_equal(cs->effective_cpus,
2570 oldcs->effective_cpus);
2571 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
2574 * In the default hierarchy, enabling cpuset in the child cgroups
2575 * will trigger a number of cpuset_attach() calls with no change
2576 * in effective cpus and mems. In that case, we can optimize out
2577 * by skipping the task iteration and update.
2579 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
2580 !cpus_updated && !mems_updated) {
2581 cpuset_attach_nodemask_to = cs->effective_mems;
2585 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
2587 cgroup_taskset_for_each(task, css, tset)
2588 cpuset_attach_task(cs, task);
2591 * Change mm for all threadgroup leaders. This is expensive and may
2592 * sleep and should be moved outside migration path proper. Skip it
2593 * if there is no change in effective_mems and CS_MEMORY_MIGRATE is
2596 cpuset_attach_nodemask_to = cs->effective_mems;
2597 if (!is_memory_migrate(cs) && !mems_updated)
2600 cgroup_taskset_for_each_leader(leader, css, tset) {
2601 struct mm_struct *mm = get_task_mm(leader);
2604 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
2607 * old_mems_allowed is the same with mems_allowed
2608 * here, except if this task is being moved
2609 * automatically due to hotplug. In that case
2610 * @mems_allowed has been updated and is empty, so
2611 * @old_mems_allowed is the right nodesets that we
2614 if (is_memory_migrate(cs))
2615 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
2616 &cpuset_attach_nodemask_to);
2623 cs->old_mems_allowed = cpuset_attach_nodemask_to;
2625 cs->attach_in_progress--;
2626 if (!cs->attach_in_progress)
2627 wake_up(&cpuset_attach_wq);
2629 percpu_up_write(&cpuset_rwsem);
2632 /* The various types of files and directories in a cpuset file system */
2635 FILE_MEMORY_MIGRATE,
2638 FILE_EFFECTIVE_CPULIST,
2639 FILE_EFFECTIVE_MEMLIST,
2640 FILE_SUBPARTS_CPULIST,
2644 FILE_SCHED_LOAD_BALANCE,
2645 FILE_PARTITION_ROOT,
2646 FILE_SCHED_RELAX_DOMAIN_LEVEL,
2647 FILE_MEMORY_PRESSURE_ENABLED,
2648 FILE_MEMORY_PRESSURE,
2651 } cpuset_filetype_t;
2653 static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
2656 struct cpuset *cs = css_cs(css);
2657 cpuset_filetype_t type = cft->private;
2661 percpu_down_write(&cpuset_rwsem);
2662 if (!is_cpuset_online(cs)) {
2668 case FILE_CPU_EXCLUSIVE:
2669 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
2671 case FILE_MEM_EXCLUSIVE:
2672 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
2674 case FILE_MEM_HARDWALL:
2675 retval = update_flag(CS_MEM_HARDWALL, cs, val);
2677 case FILE_SCHED_LOAD_BALANCE:
2678 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
2680 case FILE_MEMORY_MIGRATE:
2681 retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
2683 case FILE_MEMORY_PRESSURE_ENABLED:
2684 cpuset_memory_pressure_enabled = !!val;
2686 case FILE_SPREAD_PAGE:
2687 retval = update_flag(CS_SPREAD_PAGE, cs, val);
2689 case FILE_SPREAD_SLAB:
2690 retval = update_flag(CS_SPREAD_SLAB, cs, val);
2697 percpu_up_write(&cpuset_rwsem);
2702 static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
2705 struct cpuset *cs = css_cs(css);
2706 cpuset_filetype_t type = cft->private;
2707 int retval = -ENODEV;
2710 percpu_down_write(&cpuset_rwsem);
2711 if (!is_cpuset_online(cs))
2715 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2716 retval = update_relax_domain_level(cs, val);
2723 percpu_up_write(&cpuset_rwsem);
2729 * Common handling for a write to a "cpus" or "mems" file.
2731 static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
2732 char *buf, size_t nbytes, loff_t off)
2734 struct cpuset *cs = css_cs(of_css(of));
2735 struct cpuset *trialcs;
2736 int retval = -ENODEV;
2738 buf = strstrip(buf);
2741 * CPU or memory hotunplug may leave @cs w/o any execution
2742 * resources, in which case the hotplug code asynchronously updates
2743 * configuration and transfers all tasks to the nearest ancestor
2744 * which can execute.
2746 * As writes to "cpus" or "mems" may restore @cs's execution
2747 * resources, wait for the previously scheduled operations before
2748 * proceeding, so that we don't end up keep removing tasks added
2749 * after execution capability is restored.
2751 * cpuset_hotplug_work calls back into cgroup core via
2752 * cgroup_transfer_tasks() and waiting for it from a cgroupfs
2753 * operation like this one can lead to a deadlock through kernfs
2754 * active_ref protection. Let's break the protection. Losing the
2755 * protection is okay as we check whether @cs is online after
2756 * grabbing cpuset_rwsem anyway. This only happens on the legacy
2760 kernfs_break_active_protection(of->kn);
2761 flush_work(&cpuset_hotplug_work);
2764 percpu_down_write(&cpuset_rwsem);
2765 if (!is_cpuset_online(cs))
2768 trialcs = alloc_trial_cpuset(cs);
2774 switch (of_cft(of)->private) {
2776 retval = update_cpumask(cs, trialcs, buf);
2779 retval = update_nodemask(cs, trialcs, buf);
2786 free_cpuset(trialcs);
2788 percpu_up_write(&cpuset_rwsem);
2790 kernfs_unbreak_active_protection(of->kn);
2792 flush_workqueue(cpuset_migrate_mm_wq);
2793 return retval ?: nbytes;
2797 * These ascii lists should be read in a single call, by using a user
2798 * buffer large enough to hold the entire map. If read in smaller
2799 * chunks, there is no guarantee of atomicity. Since the display format
2800 * used, list of ranges of sequential numbers, is variable length,
2801 * and since these maps can change value dynamically, one could read
2802 * gibberish by doing partial reads while a list was changing.
2804 static int cpuset_common_seq_show(struct seq_file *sf, void *v)
2806 struct cpuset *cs = css_cs(seq_css(sf));
2807 cpuset_filetype_t type = seq_cft(sf)->private;
2810 spin_lock_irq(&callback_lock);
2814 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
2817 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
2819 case FILE_EFFECTIVE_CPULIST:
2820 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
2822 case FILE_EFFECTIVE_MEMLIST:
2823 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
2825 case FILE_SUBPARTS_CPULIST:
2826 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus));
2832 spin_unlock_irq(&callback_lock);
2836 static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
2838 struct cpuset *cs = css_cs(css);
2839 cpuset_filetype_t type = cft->private;
2841 case FILE_CPU_EXCLUSIVE:
2842 return is_cpu_exclusive(cs);
2843 case FILE_MEM_EXCLUSIVE:
2844 return is_mem_exclusive(cs);
2845 case FILE_MEM_HARDWALL:
2846 return is_mem_hardwall(cs);
2847 case FILE_SCHED_LOAD_BALANCE:
2848 return is_sched_load_balance(cs);
2849 case FILE_MEMORY_MIGRATE:
2850 return is_memory_migrate(cs);
2851 case FILE_MEMORY_PRESSURE_ENABLED:
2852 return cpuset_memory_pressure_enabled;
2853 case FILE_MEMORY_PRESSURE:
2854 return fmeter_getrate(&cs->fmeter);
2855 case FILE_SPREAD_PAGE:
2856 return is_spread_page(cs);
2857 case FILE_SPREAD_SLAB:
2858 return is_spread_slab(cs);
2863 /* Unreachable but makes gcc happy */
2867 static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
2869 struct cpuset *cs = css_cs(css);
2870 cpuset_filetype_t type = cft->private;
2872 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2873 return cs->relax_domain_level;
2878 /* Unreachable but makes gcc happy */
2882 static int sched_partition_show(struct seq_file *seq, void *v)
2884 struct cpuset *cs = css_cs(seq_css(seq));
2885 const char *err, *type = NULL;
2887 switch (cs->partition_root_state) {
2889 seq_puts(seq, "root\n");
2892 seq_puts(seq, "isolated\n");
2895 seq_puts(seq, "member\n");
2897 case PRS_INVALID_ROOT:
2900 case PRS_INVALID_ISOLATED:
2903 err = perr_strings[READ_ONCE(cs->prs_err)];
2905 seq_printf(seq, "%s invalid (%s)\n", type, err);
2907 seq_printf(seq, "%s invalid\n", type);
2913 static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
2914 size_t nbytes, loff_t off)
2916 struct cpuset *cs = css_cs(of_css(of));
2918 int retval = -ENODEV;
2920 buf = strstrip(buf);
2923 * Convert "root" to ENABLED, and convert "member" to DISABLED.
2925 if (!strcmp(buf, "root"))
2927 else if (!strcmp(buf, "member"))
2929 else if (!strcmp(buf, "isolated"))
2936 percpu_down_write(&cpuset_rwsem);
2937 if (!is_cpuset_online(cs))
2940 retval = update_prstate(cs, val);
2942 percpu_up_write(&cpuset_rwsem);
2945 return retval ?: nbytes;
2949 * for the common functions, 'private' gives the type of file
2952 static struct cftype legacy_files[] = {
2955 .seq_show = cpuset_common_seq_show,
2956 .write = cpuset_write_resmask,
2957 .max_write_len = (100U + 6 * NR_CPUS),
2958 .private = FILE_CPULIST,
2963 .seq_show = cpuset_common_seq_show,
2964 .write = cpuset_write_resmask,
2965 .max_write_len = (100U + 6 * MAX_NUMNODES),
2966 .private = FILE_MEMLIST,
2970 .name = "effective_cpus",
2971 .seq_show = cpuset_common_seq_show,
2972 .private = FILE_EFFECTIVE_CPULIST,
2976 .name = "effective_mems",
2977 .seq_show = cpuset_common_seq_show,
2978 .private = FILE_EFFECTIVE_MEMLIST,
2982 .name = "cpu_exclusive",
2983 .read_u64 = cpuset_read_u64,
2984 .write_u64 = cpuset_write_u64,
2985 .private = FILE_CPU_EXCLUSIVE,
2989 .name = "mem_exclusive",
2990 .read_u64 = cpuset_read_u64,
2991 .write_u64 = cpuset_write_u64,
2992 .private = FILE_MEM_EXCLUSIVE,
2996 .name = "mem_hardwall",
2997 .read_u64 = cpuset_read_u64,
2998 .write_u64 = cpuset_write_u64,
2999 .private = FILE_MEM_HARDWALL,
3003 .name = "sched_load_balance",
3004 .read_u64 = cpuset_read_u64,
3005 .write_u64 = cpuset_write_u64,
3006 .private = FILE_SCHED_LOAD_BALANCE,
3010 .name = "sched_relax_domain_level",
3011 .read_s64 = cpuset_read_s64,
3012 .write_s64 = cpuset_write_s64,
3013 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
3017 .name = "memory_migrate",
3018 .read_u64 = cpuset_read_u64,
3019 .write_u64 = cpuset_write_u64,
3020 .private = FILE_MEMORY_MIGRATE,
3024 .name = "memory_pressure",
3025 .read_u64 = cpuset_read_u64,
3026 .private = FILE_MEMORY_PRESSURE,
3030 .name = "memory_spread_page",
3031 .read_u64 = cpuset_read_u64,
3032 .write_u64 = cpuset_write_u64,
3033 .private = FILE_SPREAD_PAGE,
3037 .name = "memory_spread_slab",
3038 .read_u64 = cpuset_read_u64,
3039 .write_u64 = cpuset_write_u64,
3040 .private = FILE_SPREAD_SLAB,
3044 .name = "memory_pressure_enabled",
3045 .flags = CFTYPE_ONLY_ON_ROOT,
3046 .read_u64 = cpuset_read_u64,
3047 .write_u64 = cpuset_write_u64,
3048 .private = FILE_MEMORY_PRESSURE_ENABLED,
3055 * This is currently a minimal set for the default hierarchy. It can be
3056 * expanded later on by migrating more features and control files from v1.
3058 static struct cftype dfl_files[] = {
3061 .seq_show = cpuset_common_seq_show,
3062 .write = cpuset_write_resmask,
3063 .max_write_len = (100U + 6 * NR_CPUS),
3064 .private = FILE_CPULIST,
3065 .flags = CFTYPE_NOT_ON_ROOT,
3070 .seq_show = cpuset_common_seq_show,
3071 .write = cpuset_write_resmask,
3072 .max_write_len = (100U + 6 * MAX_NUMNODES),
3073 .private = FILE_MEMLIST,
3074 .flags = CFTYPE_NOT_ON_ROOT,
3078 .name = "cpus.effective",
3079 .seq_show = cpuset_common_seq_show,
3080 .private = FILE_EFFECTIVE_CPULIST,
3084 .name = "mems.effective",
3085 .seq_show = cpuset_common_seq_show,
3086 .private = FILE_EFFECTIVE_MEMLIST,
3090 .name = "cpus.partition",
3091 .seq_show = sched_partition_show,
3092 .write = sched_partition_write,
3093 .private = FILE_PARTITION_ROOT,
3094 .flags = CFTYPE_NOT_ON_ROOT,
3095 .file_offset = offsetof(struct cpuset, partition_file),
3099 .name = "cpus.subpartitions",
3100 .seq_show = cpuset_common_seq_show,
3101 .private = FILE_SUBPARTS_CPULIST,
3102 .flags = CFTYPE_DEBUG,
3110 * cpuset_css_alloc - Allocate a cpuset css
3111 * @parent_css: Parent css of the control group that the new cpuset will be
3113 * Return: cpuset css on success, -ENOMEM on failure.
3115 * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return
3116 * top cpuset css otherwise.
3118 static struct cgroup_subsys_state *
3119 cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
3124 return &top_cpuset.css;
3126 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
3128 return ERR_PTR(-ENOMEM);
3130 if (alloc_cpumasks(cs, NULL)) {
3132 return ERR_PTR(-ENOMEM);
3135 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3136 nodes_clear(cs->mems_allowed);
3137 nodes_clear(cs->effective_mems);
3138 fmeter_init(&cs->fmeter);
3139 cs->relax_domain_level = -1;
3141 /* Set CS_MEMORY_MIGRATE for default hierarchy */
3142 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
3143 __set_bit(CS_MEMORY_MIGRATE, &cs->flags);
3148 static int cpuset_css_online(struct cgroup_subsys_state *css)
3150 struct cpuset *cs = css_cs(css);
3151 struct cpuset *parent = parent_cs(cs);
3152 struct cpuset *tmp_cs;
3153 struct cgroup_subsys_state *pos_css;
3159 percpu_down_write(&cpuset_rwsem);
3161 set_bit(CS_ONLINE, &cs->flags);
3162 if (is_spread_page(parent))
3163 set_bit(CS_SPREAD_PAGE, &cs->flags);
3164 if (is_spread_slab(parent))
3165 set_bit(CS_SPREAD_SLAB, &cs->flags);
3169 spin_lock_irq(&callback_lock);
3170 if (is_in_v2_mode()) {
3171 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
3172 cs->effective_mems = parent->effective_mems;
3173 cs->use_parent_ecpus = true;
3174 parent->child_ecpus_count++;
3176 spin_unlock_irq(&callback_lock);
3178 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
3182 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
3183 * set. This flag handling is implemented in cgroup core for
3184 * historical reasons - the flag may be specified during mount.
3186 * Currently, if any sibling cpusets have exclusive cpus or mem, we
3187 * refuse to clone the configuration - thereby refusing the task to
3188 * be entered, and as a result refusing the sys_unshare() or
3189 * clone() which initiated it. If this becomes a problem for some
3190 * users who wish to allow that scenario, then this could be
3191 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
3192 * (and likewise for mems) to the new cgroup.
3195 cpuset_for_each_child(tmp_cs, pos_css, parent) {
3196 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
3203 spin_lock_irq(&callback_lock);
3204 cs->mems_allowed = parent->mems_allowed;
3205 cs->effective_mems = parent->mems_allowed;
3206 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
3207 cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
3208 spin_unlock_irq(&callback_lock);
3210 percpu_up_write(&cpuset_rwsem);
3216 * If the cpuset being removed has its flag 'sched_load_balance'
3217 * enabled, then simulate turning sched_load_balance off, which
3218 * will call rebuild_sched_domains_locked(). That is not needed
3219 * in the default hierarchy where only changes in partition
3220 * will cause repartitioning.
3222 * If the cpuset has the 'sched.partition' flag enabled, simulate
3223 * turning 'sched.partition" off.
3226 static void cpuset_css_offline(struct cgroup_subsys_state *css)
3228 struct cpuset *cs = css_cs(css);
3231 percpu_down_write(&cpuset_rwsem);
3233 if (is_partition_valid(cs))
3234 update_prstate(cs, 0);
3236 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
3237 is_sched_load_balance(cs))
3238 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
3240 if (cs->use_parent_ecpus) {
3241 struct cpuset *parent = parent_cs(cs);
3243 cs->use_parent_ecpus = false;
3244 parent->child_ecpus_count--;
3248 clear_bit(CS_ONLINE, &cs->flags);
3250 percpu_up_write(&cpuset_rwsem);
3254 static void cpuset_css_free(struct cgroup_subsys_state *css)
3256 struct cpuset *cs = css_cs(css);
3261 static void cpuset_bind(struct cgroup_subsys_state *root_css)
3263 percpu_down_write(&cpuset_rwsem);
3264 spin_lock_irq(&callback_lock);
3266 if (is_in_v2_mode()) {
3267 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
3268 top_cpuset.mems_allowed = node_possible_map;
3270 cpumask_copy(top_cpuset.cpus_allowed,
3271 top_cpuset.effective_cpus);
3272 top_cpuset.mems_allowed = top_cpuset.effective_mems;
3275 spin_unlock_irq(&callback_lock);
3276 percpu_up_write(&cpuset_rwsem);
3280 * In case the child is cloned into a cpuset different from its parent,
3281 * additional checks are done to see if the move is allowed.
3283 static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
3285 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3290 same_cs = (cs == task_cs(current));
3296 lockdep_assert_held(&cgroup_mutex);
3297 percpu_down_write(&cpuset_rwsem);
3299 /* Check to see if task is allowed in the cpuset */
3300 ret = cpuset_can_attach_check(cs);
3304 ret = task_can_attach(task, cs->effective_cpus);
3308 ret = security_task_setscheduler(task);
3313 * Mark attach is in progress. This makes validate_change() fail
3314 * changes which zero cpus/mems_allowed.
3316 cs->attach_in_progress++;
3318 percpu_up_write(&cpuset_rwsem);
3322 static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
3324 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3328 same_cs = (cs == task_cs(current));
3334 percpu_down_write(&cpuset_rwsem);
3335 cs->attach_in_progress--;
3336 if (!cs->attach_in_progress)
3337 wake_up(&cpuset_attach_wq);
3338 percpu_up_write(&cpuset_rwsem);
3342 * Make sure the new task conform to the current state of its parent,
3343 * which could have been changed by cpuset just after it inherits the
3344 * state from the parent and before it sits on the cgroup's task list.
3346 static void cpuset_fork(struct task_struct *task)
3353 same_cs = (cs == task_cs(current));
3357 if (cs == &top_cpuset)
3360 set_cpus_allowed_ptr(task, current->cpus_ptr);
3361 task->mems_allowed = current->mems_allowed;
3365 /* CLONE_INTO_CGROUP */
3366 percpu_down_write(&cpuset_rwsem);
3367 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3368 cpuset_attach_task(cs, task);
3370 cs->attach_in_progress--;
3371 if (!cs->attach_in_progress)
3372 wake_up(&cpuset_attach_wq);
3374 percpu_up_write(&cpuset_rwsem);
3377 struct cgroup_subsys cpuset_cgrp_subsys = {
3378 .css_alloc = cpuset_css_alloc,
3379 .css_online = cpuset_css_online,
3380 .css_offline = cpuset_css_offline,
3381 .css_free = cpuset_css_free,
3382 .can_attach = cpuset_can_attach,
3383 .cancel_attach = cpuset_cancel_attach,
3384 .attach = cpuset_attach,
3385 .post_attach = cpuset_post_attach,
3386 .bind = cpuset_bind,
3387 .can_fork = cpuset_can_fork,
3388 .cancel_fork = cpuset_cancel_fork,
3389 .fork = cpuset_fork,
3390 .legacy_cftypes = legacy_files,
3391 .dfl_cftypes = dfl_files,
3397 * cpuset_init - initialize cpusets at system boot
3399 * Description: Initialize top_cpuset
3402 int __init cpuset_init(void)
3404 BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
3405 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
3406 BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL));
3408 cpumask_setall(top_cpuset.cpus_allowed);
3409 nodes_setall(top_cpuset.mems_allowed);
3410 cpumask_setall(top_cpuset.effective_cpus);
3411 nodes_setall(top_cpuset.effective_mems);
3413 fmeter_init(&top_cpuset.fmeter);
3414 set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
3415 top_cpuset.relax_domain_level = -1;
3417 BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
3423 * If CPU and/or memory hotplug handlers, below, unplug any CPUs
3424 * or memory nodes, we need to walk over the cpuset hierarchy,
3425 * removing that CPU or node from all cpusets. If this removes the
3426 * last CPU or node from a cpuset, then move the tasks in the empty
3427 * cpuset to its next-highest non-empty parent.
3429 static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
3431 struct cpuset *parent;
3434 * Find its next-highest non-empty parent, (top cpuset
3435 * has online cpus, so can't be empty).
3437 parent = parent_cs(cs);
3438 while (cpumask_empty(parent->cpus_allowed) ||
3439 nodes_empty(parent->mems_allowed))
3440 parent = parent_cs(parent);
3442 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
3443 pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
3444 pr_cont_cgroup_name(cs->css.cgroup);
3450 hotplug_update_tasks_legacy(struct cpuset *cs,
3451 struct cpumask *new_cpus, nodemask_t *new_mems,
3452 bool cpus_updated, bool mems_updated)
3456 spin_lock_irq(&callback_lock);
3457 cpumask_copy(cs->cpus_allowed, new_cpus);
3458 cpumask_copy(cs->effective_cpus, new_cpus);
3459 cs->mems_allowed = *new_mems;
3460 cs->effective_mems = *new_mems;
3461 spin_unlock_irq(&callback_lock);
3464 * Don't call update_tasks_cpumask() if the cpuset becomes empty,
3465 * as the tasks will be migrated to an ancestor.
3467 if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
3468 update_tasks_cpumask(cs, new_cpus);
3469 if (mems_updated && !nodes_empty(cs->mems_allowed))
3470 update_tasks_nodemask(cs);
3472 is_empty = cpumask_empty(cs->cpus_allowed) ||
3473 nodes_empty(cs->mems_allowed);
3475 percpu_up_write(&cpuset_rwsem);
3478 * Move tasks to the nearest ancestor with execution resources,
3479 * This is full cgroup operation which will also call back into
3480 * cpuset. Should be done outside any lock.
3483 remove_tasks_in_empty_cpuset(cs);
3485 percpu_down_write(&cpuset_rwsem);
3489 hotplug_update_tasks(struct cpuset *cs,
3490 struct cpumask *new_cpus, nodemask_t *new_mems,
3491 bool cpus_updated, bool mems_updated)
3493 /* A partition root is allowed to have empty effective cpus */
3494 if (cpumask_empty(new_cpus) && !is_partition_valid(cs))
3495 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
3496 if (nodes_empty(*new_mems))
3497 *new_mems = parent_cs(cs)->effective_mems;
3499 spin_lock_irq(&callback_lock);
3500 cpumask_copy(cs->effective_cpus, new_cpus);
3501 cs->effective_mems = *new_mems;
3502 spin_unlock_irq(&callback_lock);
3505 update_tasks_cpumask(cs, new_cpus);
3507 update_tasks_nodemask(cs);
3510 static bool force_rebuild;
3512 void cpuset_force_rebuild(void)
3514 force_rebuild = true;
3518 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3519 * @cs: cpuset in interest
3520 * @tmp: the tmpmasks structure pointer
3522 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3523 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3524 * all its tasks are moved to the nearest ancestor with both resources.
3526 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3528 static cpumask_t new_cpus;
3529 static nodemask_t new_mems;
3532 struct cpuset *parent;
3534 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
3536 percpu_down_write(&cpuset_rwsem);
3539 * We have raced with task attaching. We wait until attaching
3540 * is finished, so we won't attach a task to an empty cpuset.
3542 if (cs->attach_in_progress) {
3543 percpu_up_write(&cpuset_rwsem);
3547 parent = parent_cs(cs);
3548 compute_effective_cpumask(&new_cpus, cs, parent);
3549 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3551 if (cs->nr_subparts_cpus)
3553 * Make sure that CPUs allocated to child partitions
3554 * do not show up in effective_cpus.
3556 cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus);
3558 if (!tmp || !cs->partition_root_state)
3562 * In the unlikely event that a partition root has empty
3563 * effective_cpus with tasks, we will have to invalidate child
3564 * partitions, if present, by setting nr_subparts_cpus to 0 to
3565 * reclaim their cpus.
3567 if (cs->nr_subparts_cpus && is_partition_valid(cs) &&
3568 cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)) {
3569 spin_lock_irq(&callback_lock);
3570 cs->nr_subparts_cpus = 0;
3571 cpumask_clear(cs->subparts_cpus);
3572 spin_unlock_irq(&callback_lock);
3573 compute_effective_cpumask(&new_cpus, cs, parent);
3577 * Force the partition to become invalid if either one of
3578 * the following conditions hold:
3579 * 1) empty effective cpus but not valid empty partition.
3580 * 2) parent is invalid or doesn't grant any cpus to child
3583 if (is_partition_valid(cs) && (!parent->nr_subparts_cpus ||
3584 (cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)))) {
3585 int old_prs, parent_prs;
3587 update_parent_subparts_cpumask(cs, partcmd_disable, NULL, tmp);
3588 if (cs->nr_subparts_cpus) {
3589 spin_lock_irq(&callback_lock);
3590 cs->nr_subparts_cpus = 0;
3591 cpumask_clear(cs->subparts_cpus);
3592 spin_unlock_irq(&callback_lock);
3593 compute_effective_cpumask(&new_cpus, cs, parent);
3596 old_prs = cs->partition_root_state;
3597 parent_prs = parent->partition_root_state;
3598 if (is_partition_valid(cs)) {
3599 spin_lock_irq(&callback_lock);
3600 make_partition_invalid(cs);
3601 spin_unlock_irq(&callback_lock);
3602 if (is_prs_invalid(parent_prs))
3603 WRITE_ONCE(cs->prs_err, PERR_INVPARENT);
3604 else if (!parent_prs)
3605 WRITE_ONCE(cs->prs_err, PERR_NOTPART);
3607 WRITE_ONCE(cs->prs_err, PERR_HOTPLUG);
3608 notify_partition_change(cs, old_prs);
3610 cpuset_force_rebuild();
3614 * On the other hand, an invalid partition root may be transitioned
3615 * back to a regular one.
3617 else if (is_partition_valid(parent) && is_partition_invalid(cs)) {
3618 update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp);
3619 if (is_partition_valid(cs))
3620 cpuset_force_rebuild();
3624 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
3625 mems_updated = !nodes_equal(new_mems, cs->effective_mems);
3626 if (!cpus_updated && !mems_updated)
3627 goto unlock; /* Hotplug doesn't affect this cpuset */
3630 check_insane_mems_config(&new_mems);
3632 if (is_in_v2_mode())
3633 hotplug_update_tasks(cs, &new_cpus, &new_mems,
3634 cpus_updated, mems_updated);
3636 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
3637 cpus_updated, mems_updated);
3640 percpu_up_write(&cpuset_rwsem);
3644 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
3646 * This function is called after either CPU or memory configuration has
3647 * changed and updates cpuset accordingly. The top_cpuset is always
3648 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
3649 * order to make cpusets transparent (of no affect) on systems that are
3650 * actively using CPU hotplug but making no active use of cpusets.
3652 * Non-root cpusets are only affected by offlining. If any CPUs or memory
3653 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
3656 * Note that CPU offlining during suspend is ignored. We don't modify
3657 * cpusets across suspend/resume cycles at all.
3659 static void cpuset_hotplug_workfn(struct work_struct *work)
3661 static cpumask_t new_cpus;
3662 static nodemask_t new_mems;
3663 bool cpus_updated, mems_updated;
3664 bool on_dfl = is_in_v2_mode();
3665 struct tmpmasks tmp, *ptmp = NULL;
3667 if (on_dfl && !alloc_cpumasks(NULL, &tmp))
3670 percpu_down_write(&cpuset_rwsem);
3672 /* fetch the available cpus/mems and find out which changed how */
3673 cpumask_copy(&new_cpus, cpu_active_mask);
3674 new_mems = node_states[N_MEMORY];
3677 * If subparts_cpus is populated, it is likely that the check below
3678 * will produce a false positive on cpus_updated when the cpu list
3679 * isn't changed. It is extra work, but it is better to be safe.
3681 cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
3682 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
3685 * In the rare case that hotplug removes all the cpus in subparts_cpus,
3686 * we assumed that cpus are updated.
3688 if (!cpus_updated && top_cpuset.nr_subparts_cpus)
3689 cpus_updated = true;
3691 /* synchronize cpus_allowed to cpu_active_mask */
3693 spin_lock_irq(&callback_lock);
3695 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
3697 * Make sure that CPUs allocated to child partitions
3698 * do not show up in effective_cpus. If no CPU is left,
3699 * we clear the subparts_cpus & let the child partitions
3700 * fight for the CPUs again.
3702 if (top_cpuset.nr_subparts_cpus) {
3703 if (cpumask_subset(&new_cpus,
3704 top_cpuset.subparts_cpus)) {
3705 top_cpuset.nr_subparts_cpus = 0;
3706 cpumask_clear(top_cpuset.subparts_cpus);
3708 cpumask_andnot(&new_cpus, &new_cpus,
3709 top_cpuset.subparts_cpus);
3712 cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
3713 spin_unlock_irq(&callback_lock);
3714 /* we don't mess with cpumasks of tasks in top_cpuset */
3717 /* synchronize mems_allowed to N_MEMORY */
3719 spin_lock_irq(&callback_lock);
3721 top_cpuset.mems_allowed = new_mems;
3722 top_cpuset.effective_mems = new_mems;
3723 spin_unlock_irq(&callback_lock);
3724 update_tasks_nodemask(&top_cpuset);
3727 percpu_up_write(&cpuset_rwsem);
3729 /* if cpus or mems changed, we need to propagate to descendants */
3730 if (cpus_updated || mems_updated) {
3732 struct cgroup_subsys_state *pos_css;
3735 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
3736 if (cs == &top_cpuset || !css_tryget_online(&cs->css))
3740 cpuset_hotplug_update_tasks(cs, ptmp);
3748 /* rebuild sched domains if cpus_allowed has changed */
3749 if (cpus_updated || force_rebuild) {
3750 force_rebuild = false;
3751 rebuild_sched_domains();
3754 free_cpumasks(NULL, ptmp);
3757 void cpuset_update_active_cpus(void)
3760 * We're inside cpu hotplug critical region which usually nests
3761 * inside cgroup synchronization. Bounce actual hotplug processing
3762 * to a work item to avoid reverse locking order.
3764 schedule_work(&cpuset_hotplug_work);
3767 void cpuset_wait_for_hotplug(void)
3769 flush_work(&cpuset_hotplug_work);
3773 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
3774 * Call this routine anytime after node_states[N_MEMORY] changes.
3775 * See cpuset_update_active_cpus() for CPU hotplug handling.
3777 static int cpuset_track_online_nodes(struct notifier_block *self,
3778 unsigned long action, void *arg)
3780 schedule_work(&cpuset_hotplug_work);
3785 * cpuset_init_smp - initialize cpus_allowed
3787 * Description: Finish top cpuset after cpu, node maps are initialized
3789 void __init cpuset_init_smp(void)
3792 * cpus_allowd/mems_allowed set to v2 values in the initial
3793 * cpuset_bind() call will be reset to v1 values in another
3794 * cpuset_bind() call when v1 cpuset is mounted.
3796 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
3798 cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
3799 top_cpuset.effective_mems = node_states[N_MEMORY];
3801 hotplug_memory_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI);
3803 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
3804 BUG_ON(!cpuset_migrate_mm_wq);
3808 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
3809 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
3810 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
3812 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
3813 * attached to the specified @tsk. Guaranteed to return some non-empty
3814 * subset of cpu_online_mask, even if this means going outside the
3815 * tasks cpuset, except when the task is in the top cpuset.
3818 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
3820 unsigned long flags;
3823 spin_lock_irqsave(&callback_lock, flags);
3827 if (cs != &top_cpuset)
3828 guarantee_online_cpus(tsk, pmask);
3830 * Tasks in the top cpuset won't get update to their cpumasks
3831 * when a hotplug online/offline event happens. So we include all
3832 * offline cpus in the allowed cpu list.
3834 if ((cs == &top_cpuset) || cpumask_empty(pmask)) {
3835 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
3838 * We first exclude cpus allocated to partitions. If there is no
3839 * allowable online cpu left, we fall back to all possible cpus.
3841 cpumask_andnot(pmask, possible_mask, top_cpuset.subparts_cpus);
3842 if (!cpumask_intersects(pmask, cpu_online_mask))
3843 cpumask_copy(pmask, possible_mask);
3847 spin_unlock_irqrestore(&callback_lock, flags);
3851 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
3852 * @tsk: pointer to task_struct with which the scheduler is struggling
3854 * Description: In the case that the scheduler cannot find an allowed cpu in
3855 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
3856 * mode however, this value is the same as task_cs(tsk)->effective_cpus,
3857 * which will not contain a sane cpumask during cases such as cpu hotplugging.
3858 * This is the absolute last resort for the scheduler and it is only used if
3859 * _every_ other avenue has been traveled.
3861 * Returns true if the affinity of @tsk was changed, false otherwise.
3864 bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
3866 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
3867 const struct cpumask *cs_mask;
3868 bool changed = false;
3871 cs_mask = task_cs(tsk)->cpus_allowed;
3872 if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
3873 do_set_cpus_allowed(tsk, cs_mask);
3879 * We own tsk->cpus_allowed, nobody can change it under us.
3881 * But we used cs && cs->cpus_allowed lockless and thus can
3882 * race with cgroup_attach_task() or update_cpumask() and get
3883 * the wrong tsk->cpus_allowed. However, both cases imply the
3884 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
3885 * which takes task_rq_lock().
3887 * If we are called after it dropped the lock we must see all
3888 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
3889 * set any mask even if it is not right from task_cs() pov,
3890 * the pending set_cpus_allowed_ptr() will fix things.
3892 * select_fallback_rq() will fix things ups and set cpu_possible_mask
3898 void __init cpuset_init_current_mems_allowed(void)
3900 nodes_setall(current->mems_allowed);
3904 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
3905 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
3907 * Description: Returns the nodemask_t mems_allowed of the cpuset
3908 * attached to the specified @tsk. Guaranteed to return some non-empty
3909 * subset of node_states[N_MEMORY], even if this means going outside the
3913 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
3916 unsigned long flags;
3918 spin_lock_irqsave(&callback_lock, flags);
3920 guarantee_online_mems(task_cs(tsk), &mask);
3922 spin_unlock_irqrestore(&callback_lock, flags);
3928 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
3929 * @nodemask: the nodemask to be checked
3931 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
3933 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
3935 return nodes_intersects(*nodemask, current->mems_allowed);
3939 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
3940 * mem_hardwall ancestor to the specified cpuset. Call holding
3941 * callback_lock. If no ancestor is mem_exclusive or mem_hardwall
3942 * (an unusual configuration), then returns the root cpuset.
3944 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
3946 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
3952 * cpuset_node_allowed - Can we allocate on a memory node?
3953 * @node: is this an allowed node?
3954 * @gfp_mask: memory allocation flags
3956 * If we're in interrupt, yes, we can always allocate. If @node is set in
3957 * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this
3958 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
3959 * yes. If current has access to memory reserves as an oom victim, yes.
3962 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
3963 * and do not allow allocations outside the current tasks cpuset
3964 * unless the task has been OOM killed.
3965 * GFP_KERNEL allocations are not so marked, so can escape to the
3966 * nearest enclosing hardwalled ancestor cpuset.
3968 * Scanning up parent cpusets requires callback_lock. The
3969 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
3970 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
3971 * current tasks mems_allowed came up empty on the first pass over
3972 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
3973 * cpuset are short of memory, might require taking the callback_lock.
3975 * The first call here from mm/page_alloc:get_page_from_freelist()
3976 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
3977 * so no allocation on a node outside the cpuset is allowed (unless
3978 * in interrupt, of course).
3980 * The second pass through get_page_from_freelist() doesn't even call
3981 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
3982 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
3983 * in alloc_flags. That logic and the checks below have the combined
3985 * in_interrupt - any node ok (current task context irrelevant)
3986 * GFP_ATOMIC - any node ok
3987 * tsk_is_oom_victim - any node ok
3988 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
3989 * GFP_USER - only nodes in current tasks mems allowed ok.
3991 bool cpuset_node_allowed(int node, gfp_t gfp_mask)
3993 struct cpuset *cs; /* current cpuset ancestors */
3994 bool allowed; /* is allocation in zone z allowed? */
3995 unsigned long flags;
3999 if (node_isset(node, current->mems_allowed))
4002 * Allow tasks that have access to memory reserves because they have
4003 * been OOM killed to get memory anywhere.
4005 if (unlikely(tsk_is_oom_victim(current)))
4007 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
4010 if (current->flags & PF_EXITING) /* Let dying task have memory */
4013 /* Not hardwall and node outside mems_allowed: scan up cpusets */
4014 spin_lock_irqsave(&callback_lock, flags);
4017 cs = nearest_hardwall_ancestor(task_cs(current));
4018 allowed = node_isset(node, cs->mems_allowed);
4021 spin_unlock_irqrestore(&callback_lock, flags);
4026 * cpuset_spread_node() - On which node to begin search for a page
4028 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
4029 * tasks in a cpuset with is_spread_page or is_spread_slab set),
4030 * and if the memory allocation used cpuset_mem_spread_node()
4031 * to determine on which node to start looking, as it will for
4032 * certain page cache or slab cache pages such as used for file
4033 * system buffers and inode caches, then instead of starting on the
4034 * local node to look for a free page, rather spread the starting
4035 * node around the tasks mems_allowed nodes.
4037 * We don't have to worry about the returned node being offline
4038 * because "it can't happen", and even if it did, it would be ok.
4040 * The routines calling guarantee_online_mems() are careful to
4041 * only set nodes in task->mems_allowed that are online. So it
4042 * should not be possible for the following code to return an
4043 * offline node. But if it did, that would be ok, as this routine
4044 * is not returning the node where the allocation must be, only
4045 * the node where the search should start. The zonelist passed to
4046 * __alloc_pages() will include all nodes. If the slab allocator
4047 * is passed an offline node, it will fall back to the local node.
4048 * See kmem_cache_alloc_node().
4050 static int cpuset_spread_node(int *rotor)
4052 return *rotor = next_node_in(*rotor, current->mems_allowed);
4056 * cpuset_mem_spread_node() - On which node to begin search for a file page
4058 int cpuset_mem_spread_node(void)
4060 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
4061 current->cpuset_mem_spread_rotor =
4062 node_random(¤t->mems_allowed);
4064 return cpuset_spread_node(¤t->cpuset_mem_spread_rotor);
4068 * cpuset_slab_spread_node() - On which node to begin search for a slab page
4070 int cpuset_slab_spread_node(void)
4072 if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
4073 current->cpuset_slab_spread_rotor =
4074 node_random(¤t->mems_allowed);
4076 return cpuset_spread_node(¤t->cpuset_slab_spread_rotor);
4078 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
4081 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
4082 * @tsk1: pointer to task_struct of some task.
4083 * @tsk2: pointer to task_struct of some other task.
4085 * Description: Return true if @tsk1's mems_allowed intersects the
4086 * mems_allowed of @tsk2. Used by the OOM killer to determine if
4087 * one of the task's memory usage might impact the memory available
4091 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
4092 const struct task_struct *tsk2)
4094 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
4098 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
4100 * Description: Prints current's name, cpuset name, and cached copy of its
4101 * mems_allowed to the kernel log.
4103 void cpuset_print_current_mems_allowed(void)
4105 struct cgroup *cgrp;
4109 cgrp = task_cs(current)->css.cgroup;
4110 pr_cont(",cpuset=");
4111 pr_cont_cgroup_name(cgrp);
4112 pr_cont(",mems_allowed=%*pbl",
4113 nodemask_pr_args(¤t->mems_allowed));
4119 * Collection of memory_pressure is suppressed unless
4120 * this flag is enabled by writing "1" to the special
4121 * cpuset file 'memory_pressure_enabled' in the root cpuset.
4124 int cpuset_memory_pressure_enabled __read_mostly;
4127 * __cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
4129 * Keep a running average of the rate of synchronous (direct)
4130 * page reclaim efforts initiated by tasks in each cpuset.
4132 * This represents the rate at which some task in the cpuset
4133 * ran low on memory on all nodes it was allowed to use, and
4134 * had to enter the kernels page reclaim code in an effort to
4135 * create more free memory by tossing clean pages or swapping
4136 * or writing dirty pages.
4138 * Display to user space in the per-cpuset read-only file
4139 * "memory_pressure". Value displayed is an integer
4140 * representing the recent rate of entry into the synchronous
4141 * (direct) page reclaim by any task attached to the cpuset.
4144 void __cpuset_memory_pressure_bump(void)
4147 fmeter_markevent(&task_cs(current)->fmeter);
4151 #ifdef CONFIG_PROC_PID_CPUSET
4153 * proc_cpuset_show()
4154 * - Print tasks cpuset path into seq_file.
4155 * - Used for /proc/<pid>/cpuset.
4156 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
4157 * doesn't really matter if tsk->cpuset changes after we read it,
4158 * and we take cpuset_rwsem, keeping cpuset_attach() from changing it
4161 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
4162 struct pid *pid, struct task_struct *tsk)
4165 struct cgroup_subsys_state *css;
4169 buf = kmalloc(PATH_MAX, GFP_KERNEL);
4173 css = task_get_css(tsk, cpuset_cgrp_id);
4174 retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
4175 current->nsproxy->cgroup_ns);
4177 if (retval >= PATH_MAX)
4178 retval = -ENAMETOOLONG;
4189 #endif /* CONFIG_PROC_PID_CPUSET */
4191 /* Display task mems_allowed in /proc/<pid>/status file. */
4192 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
4194 seq_printf(m, "Mems_allowed:\t%*pb\n",
4195 nodemask_pr_args(&task->mems_allowed));
4196 seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
4197 nodemask_pr_args(&task->mems_allowed));