1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CPUSET_H
3 #define _LINUX_CPUSET_H
7 * Copyright (C) 2003 BULL SA
8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
12 #include <linux/sched.h>
13 #include <linux/sched/topology.h>
14 #include <linux/sched/task.h>
15 #include <linux/cpumask.h>
16 #include <linux/nodemask.h>
18 #include <linux/mmu_context.h>
19 #include <linux/jump_label.h>
24 * Static branch rewrites can happen in an arbitrary order for a given
25 * key. In code paths where we need to loop with read_mems_allowed_begin() and
26 * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
27 * to ensure that begin() always gets rewritten before retry() in the
28 * disabled -> enabled transition. If not, then if local irqs are disabled
29 * around the loop, we can deadlock since retry() would always be
30 * comparing the latest value of the mems_allowed seqcount against 0 as
31 * begin() still would see cpusets_enabled() as false. The enabled -> disabled
32 * transition should happen in reverse order for the same reasons (want to stop
33 * looking at real value of mems_allowed.sequence in retry() first).
35 extern struct static_key_false cpusets_pre_enable_key;
36 extern struct static_key_false cpusets_enabled_key;
37 extern struct static_key_false cpusets_insane_config_key;
39 static inline bool cpusets_enabled(void)
41 return static_branch_unlikely(&cpusets_enabled_key);
44 static inline void cpuset_inc(void)
46 static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
47 static_branch_inc_cpuslocked(&cpusets_enabled_key);
50 static inline void cpuset_dec(void)
52 static_branch_dec_cpuslocked(&cpusets_enabled_key);
53 static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
57 * This will get enabled whenever a cpuset configuration is considered
58 * unsupportable in general. E.g. movable only node which cannot satisfy
59 * any non movable allocations (see update_nodemask). Page allocator
60 * needs to make additional checks for those configurations and this
61 * check is meant to guard those checks without any overhead for sane
64 static inline bool cpusets_insane_config(void)
66 return static_branch_unlikely(&cpusets_insane_config_key);
69 extern int cpuset_init(void);
70 extern void cpuset_init_smp(void);
71 extern void cpuset_force_rebuild(void);
72 extern void cpuset_update_active_cpus(void);
73 extern void cpuset_wait_for_hotplug(void);
74 extern void cpuset_read_lock(void);
75 extern void cpuset_read_unlock(void);
76 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
77 extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
78 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
79 #define cpuset_current_mems_allowed (current->mems_allowed)
80 void cpuset_init_current_mems_allowed(void);
81 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
83 extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
85 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
87 if (cpusets_enabled())
88 return __cpuset_node_allowed(node, gfp_mask);
92 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
94 return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
97 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
99 if (cpusets_enabled())
100 return __cpuset_zone_allowed(z, gfp_mask);
104 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
105 const struct task_struct *tsk2);
107 #define cpuset_memory_pressure_bump() \
109 if (cpuset_memory_pressure_enabled) \
110 __cpuset_memory_pressure_bump(); \
112 extern int cpuset_memory_pressure_enabled;
113 extern void __cpuset_memory_pressure_bump(void);
115 extern void cpuset_task_status_allowed(struct seq_file *m,
116 struct task_struct *task);
117 extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
118 struct pid *pid, struct task_struct *tsk);
120 extern int cpuset_mem_spread_node(void);
121 extern int cpuset_slab_spread_node(void);
123 static inline int cpuset_do_page_mem_spread(void)
125 return task_spread_page(current);
128 static inline int cpuset_do_slab_mem_spread(void)
130 return task_spread_slab(current);
133 extern bool current_cpuset_is_being_rebound(void);
135 extern void rebuild_sched_domains(void);
137 extern void cpuset_print_current_mems_allowed(void);
140 * read_mems_allowed_begin is required when making decisions involving
141 * mems_allowed such as during page allocation. mems_allowed can be updated in
142 * parallel and depending on the new value an operation can fail potentially
143 * causing process failure. A retry loop with read_mems_allowed_begin and
144 * read_mems_allowed_retry prevents these artificial failures.
146 static inline unsigned int read_mems_allowed_begin(void)
148 if (!static_branch_unlikely(&cpusets_pre_enable_key))
151 return read_seqcount_begin(¤t->mems_allowed_seq);
155 * If this returns true, the operation that took place after
156 * read_mems_allowed_begin may have failed artificially due to a concurrent
157 * update of mems_allowed. It is up to the caller to retry the operation if
160 static inline bool read_mems_allowed_retry(unsigned int seq)
162 if (!static_branch_unlikely(&cpusets_enabled_key))
165 return read_seqcount_retry(¤t->mems_allowed_seq, seq);
168 static inline void set_mems_allowed(nodemask_t nodemask)
173 local_irq_save(flags);
174 write_seqcount_begin(¤t->mems_allowed_seq);
175 current->mems_allowed = nodemask;
176 write_seqcount_end(¤t->mems_allowed_seq);
177 local_irq_restore(flags);
178 task_unlock(current);
181 #else /* !CONFIG_CPUSETS */
183 static inline bool cpusets_enabled(void) { return false; }
185 static inline bool cpusets_insane_config(void) { return false; }
187 static inline int cpuset_init(void) { return 0; }
188 static inline void cpuset_init_smp(void) {}
190 static inline void cpuset_force_rebuild(void) { }
192 static inline void cpuset_update_active_cpus(void)
194 partition_sched_domains(1, NULL, NULL);
197 static inline void cpuset_wait_for_hotplug(void) { }
199 static inline void cpuset_read_lock(void) { }
200 static inline void cpuset_read_unlock(void) { }
202 static inline void cpuset_cpus_allowed(struct task_struct *p,
203 struct cpumask *mask)
205 cpumask_copy(mask, task_cpu_possible_mask(p));
208 static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
213 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
215 return node_possible_map;
218 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
219 static inline void cpuset_init_current_mems_allowed(void) {}
221 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
226 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
231 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
236 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
241 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
242 const struct task_struct *tsk2)
247 static inline void cpuset_memory_pressure_bump(void) {}
249 static inline void cpuset_task_status_allowed(struct seq_file *m,
250 struct task_struct *task)
254 static inline int cpuset_mem_spread_node(void)
259 static inline int cpuset_slab_spread_node(void)
264 static inline int cpuset_do_page_mem_spread(void)
269 static inline int cpuset_do_slab_mem_spread(void)
274 static inline bool current_cpuset_is_being_rebound(void)
279 static inline void rebuild_sched_domains(void)
281 partition_sched_domains(1, NULL, NULL);
284 static inline void cpuset_print_current_mems_allowed(void)
288 static inline void set_mems_allowed(nodemask_t nodemask)
292 static inline unsigned int read_mems_allowed_begin(void)
297 static inline bool read_mems_allowed_retry(unsigned int seq)
302 #endif /* !CONFIG_CPUSETS */
304 #endif /* _LINUX_CPUSET_H */