1 // SPDX-License-Identifier: GPL-2.0
3 * Scheduler topology setup/handling methods
7 DEFINE_MUTEX(sched_domains_mutex);
9 /* Protected by sched_domains_mutex: */
10 static cpumask_var_t sched_domains_tmpmask;
11 static cpumask_var_t sched_domains_tmpmask2;
13 #ifdef CONFIG_SCHED_DEBUG
15 static int __init sched_debug_setup(char *str)
17 sched_debug_enabled = true;
21 early_param("sched_debug", sched_debug_setup);
23 static inline bool sched_debug(void)
25 return sched_debug_enabled;
28 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
29 struct cpumask *groupmask)
31 struct sched_group *group = sd->groups;
33 cpumask_clear(groupmask);
35 printk(KERN_DEBUG "%*s domain-%d: ", level, "", level);
36 printk(KERN_CONT "span=%*pbl level=%s\n",
37 cpumask_pr_args(sched_domain_span(sd)), sd->name);
39 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
40 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
42 if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
43 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
46 printk(KERN_DEBUG "%*s groups:", level + 1, "");
50 printk(KERN_ERR "ERROR: group is NULL\n");
54 if (!cpumask_weight(sched_group_span(group))) {
55 printk(KERN_CONT "\n");
56 printk(KERN_ERR "ERROR: empty group\n");
60 if (!(sd->flags & SD_OVERLAP) &&
61 cpumask_intersects(groupmask, sched_group_span(group))) {
62 printk(KERN_CONT "\n");
63 printk(KERN_ERR "ERROR: repeated CPUs\n");
67 cpumask_or(groupmask, groupmask, sched_group_span(group));
69 printk(KERN_CONT " %d:{ span=%*pbl",
71 cpumask_pr_args(sched_group_span(group)));
73 if ((sd->flags & SD_OVERLAP) &&
74 !cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
75 printk(KERN_CONT " mask=%*pbl",
76 cpumask_pr_args(group_balance_mask(group)));
79 if (group->sgc->capacity != SCHED_CAPACITY_SCALE)
80 printk(KERN_CONT " cap=%lu", group->sgc->capacity);
82 if (group == sd->groups && sd->child &&
83 !cpumask_equal(sched_domain_span(sd->child),
84 sched_group_span(group))) {
85 printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n");
88 printk(KERN_CONT " }");
92 if (group != sd->groups)
93 printk(KERN_CONT ",");
95 } while (group != sd->groups);
96 printk(KERN_CONT "\n");
98 if (!cpumask_equal(sched_domain_span(sd), groupmask))
99 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
102 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
103 printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n");
107 static void sched_domain_debug(struct sched_domain *sd, int cpu)
111 if (!sched_debug_enabled)
115 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
119 printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu);
122 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
130 #else /* !CONFIG_SCHED_DEBUG */
132 # define sched_debug_enabled 0
133 # define sched_domain_debug(sd, cpu) do { } while (0)
134 static inline bool sched_debug(void)
138 #endif /* CONFIG_SCHED_DEBUG */
140 static int sd_degenerate(struct sched_domain *sd)
142 if (cpumask_weight(sched_domain_span(sd)) == 1)
145 /* Following flags need at least 2 groups */
146 if (sd->flags & (SD_BALANCE_NEWIDLE |
149 SD_SHARE_CPUCAPACITY |
150 SD_ASYM_CPUCAPACITY |
151 SD_SHARE_PKG_RESOURCES)) {
152 if (sd->groups != sd->groups->next)
156 /* Following flags don't use groups */
157 if (sd->flags & (SD_WAKE_AFFINE))
164 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
166 unsigned long cflags = sd->flags, pflags = parent->flags;
168 if (sd_degenerate(parent))
171 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
174 /* Flags needing groups don't count if only 1 group in parent */
175 if (parent->groups == parent->groups->next) {
176 pflags &= ~(SD_BALANCE_NEWIDLE |
179 SD_ASYM_CPUCAPACITY |
180 SD_SHARE_CPUCAPACITY |
181 SD_SHARE_PKG_RESOURCES |
183 if (nr_node_ids == 1)
184 pflags &= ~SD_SERIALIZE;
186 if (~cflags & pflags)
192 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
193 DEFINE_STATIC_KEY_FALSE(sched_energy_present);
194 unsigned int sysctl_sched_energy_aware = 1;
195 DEFINE_MUTEX(sched_energy_mutex);
196 bool sched_energy_update;
198 #ifdef CONFIG_PROC_SYSCTL
199 int sched_energy_aware_handler(struct ctl_table *table, int write,
200 void *buffer, size_t *lenp, loff_t *ppos)
204 if (write && !capable(CAP_SYS_ADMIN))
207 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
209 state = static_branch_unlikely(&sched_energy_present);
210 if (state != sysctl_sched_energy_aware) {
211 mutex_lock(&sched_energy_mutex);
212 sched_energy_update = 1;
213 rebuild_sched_domains();
214 sched_energy_update = 0;
215 mutex_unlock(&sched_energy_mutex);
223 static void free_pd(struct perf_domain *pd)
225 struct perf_domain *tmp;
234 static struct perf_domain *find_pd(struct perf_domain *pd, int cpu)
237 if (cpumask_test_cpu(cpu, perf_domain_span(pd)))
245 static struct perf_domain *pd_init(int cpu)
247 struct em_perf_domain *obj = em_cpu_get(cpu);
248 struct perf_domain *pd;
252 pr_info("%s: no EM found for CPU%d\n", __func__, cpu);
256 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
264 static void perf_domain_debug(const struct cpumask *cpu_map,
265 struct perf_domain *pd)
267 if (!sched_debug() || !pd)
270 printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map));
273 printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_pstate=%d }",
274 cpumask_first(perf_domain_span(pd)),
275 cpumask_pr_args(perf_domain_span(pd)),
276 em_pd_nr_perf_states(pd->em_pd));
280 printk(KERN_CONT "\n");
283 static void destroy_perf_domain_rcu(struct rcu_head *rp)
285 struct perf_domain *pd;
287 pd = container_of(rp, struct perf_domain, rcu);
291 static void sched_energy_set(bool has_eas)
293 if (!has_eas && static_branch_unlikely(&sched_energy_present)) {
295 pr_info("%s: stopping EAS\n", __func__);
296 static_branch_disable_cpuslocked(&sched_energy_present);
297 } else if (has_eas && !static_branch_unlikely(&sched_energy_present)) {
299 pr_info("%s: starting EAS\n", __func__);
300 static_branch_enable_cpuslocked(&sched_energy_present);
305 * EAS can be used on a root domain if it meets all the following conditions:
306 * 1. an Energy Model (EM) is available;
307 * 2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy.
308 * 3. no SMT is detected.
309 * 4. the EM complexity is low enough to keep scheduling overheads low;
310 * 5. schedutil is driving the frequency of all CPUs of the rd;
312 * The complexity of the Energy Model is defined as:
314 * C = nr_pd * (nr_cpus + nr_ps)
316 * with parameters defined as:
317 * - nr_pd: the number of performance domains
318 * - nr_cpus: the number of CPUs
319 * - nr_ps: the sum of the number of performance states of all performance
320 * domains (for example, on a system with 2 performance domains,
321 * with 10 performance states each, nr_ps = 2 * 10 = 20).
323 * It is generally not a good idea to use such a model in the wake-up path on
324 * very complex platforms because of the associated scheduling overheads. The
325 * arbitrary constraint below prevents that. It makes EAS usable up to 16 CPUs
326 * with per-CPU DVFS and less than 8 performance states each, for example.
328 #define EM_MAX_COMPLEXITY 2048
330 extern struct cpufreq_governor schedutil_gov;
331 static bool build_perf_domains(const struct cpumask *cpu_map)
333 int i, nr_pd = 0, nr_ps = 0, nr_cpus = cpumask_weight(cpu_map);
334 struct perf_domain *pd = NULL, *tmp;
335 int cpu = cpumask_first(cpu_map);
336 struct root_domain *rd = cpu_rq(cpu)->rd;
337 struct cpufreq_policy *policy;
338 struct cpufreq_governor *gov;
340 if (!sysctl_sched_energy_aware)
343 /* EAS is enabled for asymmetric CPU capacity topologies. */
344 if (!per_cpu(sd_asym_cpucapacity, cpu)) {
346 pr_info("rd %*pbl: CPUs do not have asymmetric capacities\n",
347 cpumask_pr_args(cpu_map));
352 /* EAS definitely does *not* handle SMT */
353 if (sched_smt_active()) {
354 pr_warn("rd %*pbl: Disabling EAS, SMT is not supported\n",
355 cpumask_pr_args(cpu_map));
359 for_each_cpu(i, cpu_map) {
360 /* Skip already covered CPUs. */
364 /* Do not attempt EAS if schedutil is not being used. */
365 policy = cpufreq_cpu_get(i);
368 gov = policy->governor;
369 cpufreq_cpu_put(policy);
370 if (gov != &schedutil_gov) {
372 pr_warn("rd %*pbl: Disabling EAS, schedutil is mandatory\n",
373 cpumask_pr_args(cpu_map));
377 /* Create the new pd and add it to the local list. */
385 * Count performance domains and performance states for the
389 nr_ps += em_pd_nr_perf_states(pd->em_pd);
392 /* Bail out if the Energy Model complexity is too high. */
393 if (nr_pd * (nr_ps + nr_cpus) > EM_MAX_COMPLEXITY) {
394 WARN(1, "rd %*pbl: Failed to start EAS, EM complexity is too high\n",
395 cpumask_pr_args(cpu_map));
399 perf_domain_debug(cpu_map, pd);
401 /* Attach the new list of performance domains to the root domain. */
403 rcu_assign_pointer(rd->pd, pd);
405 call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
412 rcu_assign_pointer(rd->pd, NULL);
414 call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
419 static void free_pd(struct perf_domain *pd) { }
420 #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL*/
422 static void free_rootdomain(struct rcu_head *rcu)
424 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
426 cpupri_cleanup(&rd->cpupri);
427 cpudl_cleanup(&rd->cpudl);
428 free_cpumask_var(rd->dlo_mask);
429 free_cpumask_var(rd->rto_mask);
430 free_cpumask_var(rd->online);
431 free_cpumask_var(rd->span);
436 void rq_attach_root(struct rq *rq, struct root_domain *rd)
438 struct root_domain *old_rd = NULL;
441 raw_spin_lock_irqsave(&rq->lock, flags);
446 if (cpumask_test_cpu(rq->cpu, old_rd->online))
449 cpumask_clear_cpu(rq->cpu, old_rd->span);
452 * If we dont want to free the old_rd yet then
453 * set old_rd to NULL to skip the freeing later
456 if (!atomic_dec_and_test(&old_rd->refcount))
460 atomic_inc(&rd->refcount);
463 cpumask_set_cpu(rq->cpu, rd->span);
464 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
467 raw_spin_unlock_irqrestore(&rq->lock, flags);
470 call_rcu(&old_rd->rcu, free_rootdomain);
473 void sched_get_rd(struct root_domain *rd)
475 atomic_inc(&rd->refcount);
478 void sched_put_rd(struct root_domain *rd)
480 if (!atomic_dec_and_test(&rd->refcount))
483 call_rcu(&rd->rcu, free_rootdomain);
486 static int init_rootdomain(struct root_domain *rd)
488 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
490 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
492 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
494 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
497 #ifdef HAVE_RT_PUSH_IPI
499 raw_spin_lock_init(&rd->rto_lock);
500 init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
503 init_dl_bw(&rd->dl_bw);
504 if (cpudl_init(&rd->cpudl) != 0)
507 if (cpupri_init(&rd->cpupri) != 0)
512 cpudl_cleanup(&rd->cpudl);
514 free_cpumask_var(rd->rto_mask);
516 free_cpumask_var(rd->dlo_mask);
518 free_cpumask_var(rd->online);
520 free_cpumask_var(rd->span);
526 * By default the system creates a single root-domain with all CPUs as
527 * members (mimicking the global state we have today).
529 struct root_domain def_root_domain;
531 void init_defrootdomain(void)
533 init_rootdomain(&def_root_domain);
535 atomic_set(&def_root_domain.refcount, 1);
538 static struct root_domain *alloc_rootdomain(void)
540 struct root_domain *rd;
542 rd = kzalloc(sizeof(*rd), GFP_KERNEL);
546 if (init_rootdomain(rd) != 0) {
554 static void free_sched_groups(struct sched_group *sg, int free_sgc)
556 struct sched_group *tmp, *first;
565 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
568 if (atomic_dec_and_test(&sg->ref))
571 } while (sg != first);
574 static void destroy_sched_domain(struct sched_domain *sd)
577 * A normal sched domain may have multiple group references, an
578 * overlapping domain, having private groups, only one. Iterate,
579 * dropping group/capacity references, freeing where none remain.
581 free_sched_groups(sd->groups, 1);
583 if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
588 static void destroy_sched_domains_rcu(struct rcu_head *rcu)
590 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
593 struct sched_domain *parent = sd->parent;
594 destroy_sched_domain(sd);
599 static void destroy_sched_domains(struct sched_domain *sd)
602 call_rcu(&sd->rcu, destroy_sched_domains_rcu);
606 * Keep a special pointer to the highest sched_domain that has
607 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
608 * allows us to avoid some pointer chasing select_idle_sibling().
610 * Also keep a unique ID per domain (we use the first CPU number in
611 * the cpumask of the domain), this allows us to quickly tell if
612 * two CPUs are in the same cache domain, see cpus_share_cache().
614 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
615 DEFINE_PER_CPU(int, sd_llc_size);
616 DEFINE_PER_CPU(int, sd_llc_id);
617 DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
618 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa);
619 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
620 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
621 DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity);
623 static void update_top_cache_domain(int cpu)
625 struct sched_domain_shared *sds = NULL;
626 struct sched_domain *sd;
630 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
632 id = cpumask_first(sched_domain_span(sd));
633 size = cpumask_weight(sched_domain_span(sd));
637 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
638 per_cpu(sd_llc_size, cpu) = size;
639 per_cpu(sd_llc_id, cpu) = id;
640 rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
642 sd = lowest_flag_domain(cpu, SD_NUMA);
643 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
645 sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
646 rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd);
648 sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY);
649 rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd);
653 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
654 * hold the hotplug lock.
657 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
659 struct rq *rq = cpu_rq(cpu);
660 struct sched_domain *tmp;
662 /* Remove the sched domains which do not contribute to scheduling. */
663 for (tmp = sd; tmp; ) {
664 struct sched_domain *parent = tmp->parent;
668 if (sd_parent_degenerate(tmp, parent)) {
669 tmp->parent = parent->parent;
671 parent->parent->child = tmp;
673 * Transfer SD_PREFER_SIBLING down in case of a
674 * degenerate parent; the spans match for this
675 * so the property transfers.
677 if (parent->flags & SD_PREFER_SIBLING)
678 tmp->flags |= SD_PREFER_SIBLING;
679 destroy_sched_domain(parent);
684 if (sd && sd_degenerate(sd)) {
687 destroy_sched_domain(tmp);
692 sched_domain_debug(sd, cpu);
694 rq_attach_root(rq, rd);
696 rcu_assign_pointer(rq->sd, sd);
697 dirty_sched_domain_sysctl(cpu);
698 destroy_sched_domains(tmp);
700 update_top_cache_domain(cpu);
704 struct sched_domain * __percpu *sd;
705 struct root_domain *rd;
716 * Return the canonical balance CPU for this group, this is the first CPU
717 * of this group that's also in the balance mask.
719 * The balance mask are all those CPUs that could actually end up at this
720 * group. See build_balance_mask().
722 * Also see should_we_balance().
724 int group_balance_cpu(struct sched_group *sg)
726 return cpumask_first(group_balance_mask(sg));
731 * NUMA topology (first read the regular topology blurb below)
733 * Given a node-distance table, for example:
741 * which represents a 4 node ring topology like:
749 * We want to construct domains and groups to represent this. The way we go
750 * about doing this is to build the domains on 'hops'. For each NUMA level we
751 * construct the mask of all nodes reachable in @level hops.
753 * For the above NUMA topology that gives 3 levels:
755 * NUMA-2 0-3 0-3 0-3 0-3
756 * groups: {0-1,3},{1-3} {0-2},{0,2-3} {1-3},{0-1,3} {0,2-3},{0-2}
758 * NUMA-1 0-1,3 0-2 1-3 0,2-3
759 * groups: {0},{1},{3} {0},{1},{2} {1},{2},{3} {0},{2},{3}
764 * As can be seen; things don't nicely line up as with the regular topology.
765 * When we iterate a domain in child domain chunks some nodes can be
766 * represented multiple times -- hence the "overlap" naming for this part of
769 * In order to minimize this overlap, we only build enough groups to cover the
770 * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3.
774 * - the first group of each domain is its child domain; this
775 * gets us the first 0-1,3
776 * - the only uncovered node is 2, who's child domain is 1-3.
778 * However, because of the overlap, computing a unique CPU for each group is
779 * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both
780 * groups include the CPUs of Node-0, while those CPUs would not in fact ever
781 * end up at those groups (they would end up in group: 0-1,3).
783 * To correct this we have to introduce the group balance mask. This mask
784 * will contain those CPUs in the group that can reach this group given the
785 * (child) domain tree.
787 * With this we can once again compute balance_cpu and sched_group_capacity
790 * XXX include words on how balance_cpu is unique and therefore can be
791 * used for sched_group_capacity links.
794 * Another 'interesting' topology is:
802 * Which looks a little like:
810 * This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3
813 * This leads to a few particularly weird cases where the sched_domain's are
814 * not of the same number for each CPU. Consider:
817 * groups: {0-2},{1-3} {1-3},{0-2}
819 * NUMA-1 0-2 0-3 0-3 1-3
827 * Build the balance mask; it contains only those CPUs that can arrive at this
828 * group and should be considered to continue balancing.
830 * We do this during the group creation pass, therefore the group information
831 * isn't complete yet, however since each group represents a (child) domain we
832 * can fully construct this using the sched_domain bits (which are already
836 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
838 const struct cpumask *sg_span = sched_group_span(sg);
839 struct sd_data *sdd = sd->private;
840 struct sched_domain *sibling;
845 for_each_cpu(i, sg_span) {
846 sibling = *per_cpu_ptr(sdd->sd, i);
849 * Can happen in the asymmetric case, where these siblings are
850 * unused. The mask will not be empty because those CPUs that
851 * do have the top domain _should_ span the domain.
856 /* If we would not end up here, we can't continue from here */
857 if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
860 cpumask_set_cpu(i, mask);
863 /* We must not have empty masks here */
864 WARN_ON_ONCE(cpumask_empty(mask));
868 * XXX: This creates per-node group entries; since the load-balancer will
869 * immediately access remote memory to construct this group's load-balance
870 * statistics having the groups node local is of dubious benefit.
872 static struct sched_group *
873 build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
875 struct sched_group *sg;
876 struct cpumask *sg_span;
878 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
879 GFP_KERNEL, cpu_to_node(cpu));
884 sg_span = sched_group_span(sg);
886 cpumask_copy(sg_span, sched_domain_span(sd->child));
888 cpumask_copy(sg_span, sched_domain_span(sd));
890 atomic_inc(&sg->ref);
894 static void init_overlap_sched_group(struct sched_domain *sd,
895 struct sched_group *sg)
897 struct cpumask *mask = sched_domains_tmpmask2;
898 struct sd_data *sdd = sd->private;
899 struct cpumask *sg_span;
902 build_balance_mask(sd, sg, mask);
903 cpu = cpumask_first_and(sched_group_span(sg), mask);
905 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
906 if (atomic_inc_return(&sg->sgc->ref) == 1)
907 cpumask_copy(group_balance_mask(sg), mask);
909 WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask));
912 * Initialize sgc->capacity such that even if we mess up the
913 * domains and no possible iteration will get us here, we won't
916 sg_span = sched_group_span(sg);
917 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
918 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
919 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
923 build_overlap_sched_groups(struct sched_domain *sd, int cpu)
925 struct sched_group *first = NULL, *last = NULL, *sg;
926 const struct cpumask *span = sched_domain_span(sd);
927 struct cpumask *covered = sched_domains_tmpmask;
928 struct sd_data *sdd = sd->private;
929 struct sched_domain *sibling;
932 cpumask_clear(covered);
934 for_each_cpu_wrap(i, span, cpu) {
935 struct cpumask *sg_span;
937 if (cpumask_test_cpu(i, covered))
940 sibling = *per_cpu_ptr(sdd->sd, i);
943 * Asymmetric node setups can result in situations where the
944 * domain tree is of unequal depth, make sure to skip domains
945 * that already cover the entire range.
947 * In that case build_sched_domains() will have terminated the
948 * iteration early and our sibling sd spans will be empty.
949 * Domains should always include the CPU they're built on, so
952 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
955 sg = build_group_from_child_sched_domain(sibling, cpu);
959 sg_span = sched_group_span(sg);
960 cpumask_or(covered, covered, sg_span);
962 init_overlap_sched_group(sd, sg);
976 free_sched_groups(first, 0);
983 * Package topology (also see the load-balance blurb in fair.c)
985 * The scheduler builds a tree structure to represent a number of important
986 * topology features. By default (default_topology[]) these include:
988 * - Simultaneous multithreading (SMT)
989 * - Multi-Core Cache (MC)
992 * Where the last one more or less denotes everything up to a NUMA node.
994 * The tree consists of 3 primary data structures:
996 * sched_domain -> sched_group -> sched_group_capacity
1000 * The sched_domains are per-CPU and have a two way link (parent & child) and
1001 * denote the ever growing mask of CPUs belonging to that level of topology.
1003 * Each sched_domain has a circular (double) linked list of sched_group's, each
1004 * denoting the domains of the level below (or individual CPUs in case of the
1005 * first domain level). The sched_group linked by a sched_domain includes the
1006 * CPU of that sched_domain [*].
1008 * Take for instance a 2 threaded, 2 core, 2 cache cluster part:
1010 * CPU 0 1 2 3 4 5 6 7
1014 * SMT [ ] [ ] [ ] [ ]
1018 * DIE 0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7
1019 * MC 0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7
1020 * SMT 0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7
1022 * CPU 0 1 2 3 4 5 6 7
1024 * One way to think about it is: sched_domain moves you up and down among these
1025 * topology levels, while sched_group moves you sideways through it, at child
1026 * domain granularity.
1028 * sched_group_capacity ensures each unique sched_group has shared storage.
1030 * There are two related construction problems, both require a CPU that
1031 * uniquely identify each group (for a given domain):
1033 * - The first is the balance_cpu (see should_we_balance() and the
1034 * load-balance blub in fair.c); for each group we only want 1 CPU to
1035 * continue balancing at a higher domain.
1037 * - The second is the sched_group_capacity; we want all identical groups
1038 * to share a single sched_group_capacity.
1040 * Since these topologies are exclusive by construction. That is, its
1041 * impossible for an SMT thread to belong to multiple cores, and cores to
1042 * be part of multiple caches. There is a very clear and unique location
1043 * for each CPU in the hierarchy.
1045 * Therefore computing a unique CPU for each group is trivial (the iteration
1046 * mask is redundant and set all 1s; all CPUs in a group will end up at _that_
1047 * group), we can simply pick the first CPU in each group.
1050 * [*] in other words, the first group of each domain is its child domain.
1053 static struct sched_group *get_group(int cpu, struct sd_data *sdd)
1055 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1056 struct sched_domain *child = sd->child;
1057 struct sched_group *sg;
1058 bool already_visited;
1061 cpu = cpumask_first(sched_domain_span(child));
1063 sg = *per_cpu_ptr(sdd->sg, cpu);
1064 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
1066 /* Increase refcounts for claim_allocations: */
1067 already_visited = atomic_inc_return(&sg->ref) > 1;
1068 /* sgc visits should follow a similar trend as sg */
1069 WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1));
1071 /* If we have already visited that group, it's already initialized. */
1072 if (already_visited)
1076 cpumask_copy(sched_group_span(sg), sched_domain_span(child));
1077 cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
1079 cpumask_set_cpu(cpu, sched_group_span(sg));
1080 cpumask_set_cpu(cpu, group_balance_mask(sg));
1083 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
1084 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
1085 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
1091 * build_sched_groups will build a circular linked list of the groups
1092 * covered by the given span, will set each group's ->cpumask correctly,
1093 * and will initialize their ->sgc.
1095 * Assumes the sched_domain tree is fully constructed
1098 build_sched_groups(struct sched_domain *sd, int cpu)
1100 struct sched_group *first = NULL, *last = NULL;
1101 struct sd_data *sdd = sd->private;
1102 const struct cpumask *span = sched_domain_span(sd);
1103 struct cpumask *covered;
1106 lockdep_assert_held(&sched_domains_mutex);
1107 covered = sched_domains_tmpmask;
1109 cpumask_clear(covered);
1111 for_each_cpu_wrap(i, span, cpu) {
1112 struct sched_group *sg;
1114 if (cpumask_test_cpu(i, covered))
1117 sg = get_group(i, sdd);
1119 cpumask_or(covered, covered, sched_group_span(sg));
1134 * Initialize sched groups cpu_capacity.
1136 * cpu_capacity indicates the capacity of sched group, which is used while
1137 * distributing the load between different sched groups in a sched domain.
1138 * Typically cpu_capacity for all the groups in a sched domain will be same
1139 * unless there are asymmetries in the topology. If there are asymmetries,
1140 * group having more cpu_capacity will pickup more load compared to the
1141 * group having less cpu_capacity.
1143 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
1145 struct sched_group *sg = sd->groups;
1150 int cpu, max_cpu = -1;
1152 sg->group_weight = cpumask_weight(sched_group_span(sg));
1154 if (!(sd->flags & SD_ASYM_PACKING))
1157 for_each_cpu(cpu, sched_group_span(sg)) {
1160 else if (sched_asym_prefer(cpu, max_cpu))
1163 sg->asym_prefer_cpu = max_cpu;
1167 } while (sg != sd->groups);
1169 if (cpu != group_balance_cpu(sg))
1172 update_group_capacity(sd, cpu);
1176 * Initializers for schedule domains
1177 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
1180 static int default_relax_domain_level = -1;
1181 int sched_domain_level_max;
1183 static int __init setup_relax_domain_level(char *str)
1185 if (kstrtoint(str, 0, &default_relax_domain_level))
1186 pr_warn("Unable to set relax_domain_level\n");
1190 __setup("relax_domain_level=", setup_relax_domain_level);
1192 static void set_domain_attribute(struct sched_domain *sd,
1193 struct sched_domain_attr *attr)
1197 if (!attr || attr->relax_domain_level < 0) {
1198 if (default_relax_domain_level < 0)
1200 request = default_relax_domain_level;
1202 request = attr->relax_domain_level;
1204 if (sd->level > request) {
1205 /* Turn off idle balance on this domain: */
1206 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1210 static void __sdt_free(const struct cpumask *cpu_map);
1211 static int __sdt_alloc(const struct cpumask *cpu_map);
1213 static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
1214 const struct cpumask *cpu_map)
1218 if (!atomic_read(&d->rd->refcount))
1219 free_rootdomain(&d->rd->rcu);
1225 __sdt_free(cpu_map);
1233 __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map)
1235 memset(d, 0, sizeof(*d));
1237 if (__sdt_alloc(cpu_map))
1238 return sa_sd_storage;
1239 d->sd = alloc_percpu(struct sched_domain *);
1241 return sa_sd_storage;
1242 d->rd = alloc_rootdomain();
1246 return sa_rootdomain;
1250 * NULL the sd_data elements we've used to build the sched_domain and
1251 * sched_group structure so that the subsequent __free_domain_allocs()
1252 * will not free the data we're using.
1254 static void claim_allocations(int cpu, struct sched_domain *sd)
1256 struct sd_data *sdd = sd->private;
1258 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
1259 *per_cpu_ptr(sdd->sd, cpu) = NULL;
1261 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
1262 *per_cpu_ptr(sdd->sds, cpu) = NULL;
1264 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
1265 *per_cpu_ptr(sdd->sg, cpu) = NULL;
1267 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
1268 *per_cpu_ptr(sdd->sgc, cpu) = NULL;
1272 enum numa_topology_type sched_numa_topology_type;
1274 static int sched_domains_numa_levels;
1275 static int sched_domains_curr_level;
1277 int sched_max_numa_distance;
1278 static int *sched_domains_numa_distance;
1279 static struct cpumask ***sched_domains_numa_masks;
1280 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
1284 * SD_flags allowed in topology descriptions.
1286 * These flags are purely descriptive of the topology and do not prescribe
1287 * behaviour. Behaviour is artificial and mapped in the below sd_init()
1290 * SD_SHARE_CPUCAPACITY - describes SMT topologies
1291 * SD_SHARE_PKG_RESOURCES - describes shared caches
1292 * SD_NUMA - describes NUMA topologies
1294 * Odd one out, which beside describing the topology has a quirk also
1295 * prescribes the desired behaviour that goes along with it:
1297 * SD_ASYM_PACKING - describes SMT quirks
1299 #define TOPOLOGY_SD_FLAGS \
1300 (SD_SHARE_CPUCAPACITY | \
1301 SD_SHARE_PKG_RESOURCES | \
1305 static struct sched_domain *
1306 sd_init(struct sched_domain_topology_level *tl,
1307 const struct cpumask *cpu_map,
1308 struct sched_domain *child, int dflags, int cpu)
1310 struct sd_data *sdd = &tl->data;
1311 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1312 int sd_id, sd_weight, sd_flags = 0;
1316 * Ugly hack to pass state to sd_numa_mask()...
1318 sched_domains_curr_level = tl->numa_level;
1321 sd_weight = cpumask_weight(tl->mask(cpu));
1324 sd_flags = (*tl->sd_flags)();
1325 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
1326 "wrong sd_flags in topology description\n"))
1327 sd_flags &= TOPOLOGY_SD_FLAGS;
1329 /* Apply detected topology flags */
1332 *sd = (struct sched_domain){
1333 .min_interval = sd_weight,
1334 .max_interval = 2*sd_weight,
1336 .imbalance_pct = 125,
1338 .cache_nice_tries = 0,
1340 .flags = 1*SD_BALANCE_NEWIDLE
1345 | 0*SD_SHARE_CPUCAPACITY
1346 | 0*SD_SHARE_PKG_RESOURCES
1348 | 1*SD_PREFER_SIBLING
1353 .last_balance = jiffies,
1354 .balance_interval = sd_weight,
1355 .max_newidle_lb_cost = 0,
1356 .next_decay_max_lb_cost = jiffies,
1358 #ifdef CONFIG_SCHED_DEBUG
1363 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
1364 sd_id = cpumask_first(sched_domain_span(sd));
1367 * Convert topological properties into behaviour.
1370 /* Don't attempt to spread across CPUs of different capacities. */
1371 if ((sd->flags & SD_ASYM_CPUCAPACITY) && sd->child)
1372 sd->child->flags &= ~SD_PREFER_SIBLING;
1374 if (sd->flags & SD_SHARE_CPUCAPACITY) {
1375 sd->imbalance_pct = 110;
1377 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1378 sd->imbalance_pct = 117;
1379 sd->cache_nice_tries = 1;
1382 } else if (sd->flags & SD_NUMA) {
1383 sd->cache_nice_tries = 2;
1385 sd->flags &= ~SD_PREFER_SIBLING;
1386 sd->flags |= SD_SERIALIZE;
1387 if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) {
1388 sd->flags &= ~(SD_BALANCE_EXEC |
1395 sd->cache_nice_tries = 1;
1399 * For all levels sharing cache; connect a sched_domain_shared
1402 if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1403 sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
1404 atomic_inc(&sd->shared->ref);
1405 atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
1414 * Topology list, bottom-up.
1416 static struct sched_domain_topology_level default_topology[] = {
1417 #ifdef CONFIG_SCHED_SMT
1418 { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
1420 #ifdef CONFIG_SCHED_MC
1421 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
1423 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1427 static struct sched_domain_topology_level *sched_domain_topology =
1430 #define for_each_sd_topology(tl) \
1431 for (tl = sched_domain_topology; tl->mask; tl++)
1433 void set_sched_topology(struct sched_domain_topology_level *tl)
1435 if (WARN_ON_ONCE(sched_smp_initialized))
1438 sched_domain_topology = tl;
1443 static const struct cpumask *sd_numa_mask(int cpu)
1445 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
1448 static void sched_numa_warn(const char *str)
1450 static int done = false;
1458 printk(KERN_WARNING "ERROR: %s\n\n", str);
1460 for (i = 0; i < nr_node_ids; i++) {
1461 printk(KERN_WARNING " ");
1462 for (j = 0; j < nr_node_ids; j++)
1463 printk(KERN_CONT "%02d ", node_distance(i,j));
1464 printk(KERN_CONT "\n");
1466 printk(KERN_WARNING "\n");
1469 bool find_numa_distance(int distance)
1473 if (distance == node_distance(0, 0))
1476 for (i = 0; i < sched_domains_numa_levels; i++) {
1477 if (sched_domains_numa_distance[i] == distance)
1485 * A system can have three types of NUMA topology:
1486 * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
1487 * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
1488 * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
1490 * The difference between a glueless mesh topology and a backplane
1491 * topology lies in whether communication between not directly
1492 * connected nodes goes through intermediary nodes (where programs
1493 * could run), or through backplane controllers. This affects
1494 * placement of programs.
1496 * The type of topology can be discerned with the following tests:
1497 * - If the maximum distance between any nodes is 1 hop, the system
1498 * is directly connected.
1499 * - If for two nodes A and B, located N > 1 hops away from each other,
1500 * there is an intermediary node C, which is < N hops away from both
1501 * nodes A and B, the system is a glueless mesh.
1503 static void init_numa_topology_type(void)
1507 n = sched_max_numa_distance;
1509 if (sched_domains_numa_levels <= 2) {
1510 sched_numa_topology_type = NUMA_DIRECT;
1514 for_each_online_node(a) {
1515 for_each_online_node(b) {
1516 /* Find two nodes furthest removed from each other. */
1517 if (node_distance(a, b) < n)
1520 /* Is there an intermediary node between a and b? */
1521 for_each_online_node(c) {
1522 if (node_distance(a, c) < n &&
1523 node_distance(b, c) < n) {
1524 sched_numa_topology_type =
1530 sched_numa_topology_type = NUMA_BACKPLANE;
1536 void sched_init_numa(void)
1538 int next_distance, curr_distance = node_distance(0, 0);
1539 struct sched_domain_topology_level *tl;
1543 sched_domains_numa_distance = kzalloc(sizeof(int) * (nr_node_ids + 1), GFP_KERNEL);
1544 if (!sched_domains_numa_distance)
1547 /* Includes NUMA identity node at level 0. */
1548 sched_domains_numa_distance[level++] = curr_distance;
1549 sched_domains_numa_levels = level;
1552 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
1553 * unique distances in the node_distance() table.
1555 * Assumes node_distance(0,j) includes all distances in
1556 * node_distance(i,j) in order to avoid cubic time.
1558 next_distance = curr_distance;
1559 for (i = 0; i < nr_node_ids; i++) {
1560 for (j = 0; j < nr_node_ids; j++) {
1561 for (k = 0; k < nr_node_ids; k++) {
1562 int distance = node_distance(i, k);
1564 if (distance > curr_distance &&
1565 (distance < next_distance ||
1566 next_distance == curr_distance))
1567 next_distance = distance;
1570 * While not a strong assumption it would be nice to know
1571 * about cases where if node A is connected to B, B is not
1572 * equally connected to A.
1574 if (sched_debug() && node_distance(k, i) != distance)
1575 sched_numa_warn("Node-distance not symmetric");
1577 if (sched_debug() && i && !find_numa_distance(distance))
1578 sched_numa_warn("Node-0 not representative");
1580 if (next_distance != curr_distance) {
1581 sched_domains_numa_distance[level++] = next_distance;
1582 sched_domains_numa_levels = level;
1583 curr_distance = next_distance;
1588 * In case of sched_debug() we verify the above assumption.
1595 * 'level' contains the number of unique distances
1597 * The sched_domains_numa_distance[] array includes the actual distance
1602 * Here, we should temporarily reset sched_domains_numa_levels to 0.
1603 * If it fails to allocate memory for array sched_domains_numa_masks[][],
1604 * the array will contain less then 'level' members. This could be
1605 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
1606 * in other functions.
1608 * We reset it to 'level' at the end of this function.
1610 sched_domains_numa_levels = 0;
1612 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
1613 if (!sched_domains_numa_masks)
1617 * Now for each level, construct a mask per node which contains all
1618 * CPUs of nodes that are that many hops away from us.
1620 for (i = 0; i < level; i++) {
1621 sched_domains_numa_masks[i] =
1622 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
1623 if (!sched_domains_numa_masks[i])
1626 for (j = 0; j < nr_node_ids; j++) {
1627 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
1631 sched_domains_numa_masks[i][j] = mask;
1634 if (node_distance(j, k) > sched_domains_numa_distance[i])
1637 cpumask_or(mask, mask, cpumask_of_node(k));
1642 /* Compute default topology size */
1643 for (i = 0; sched_domain_topology[i].mask; i++);
1645 tl = kzalloc((i + level + 1) *
1646 sizeof(struct sched_domain_topology_level), GFP_KERNEL);
1651 * Copy the default topology bits..
1653 for (i = 0; sched_domain_topology[i].mask; i++)
1654 tl[i] = sched_domain_topology[i];
1657 * Add the NUMA identity distance, aka single NODE.
1659 tl[i++] = (struct sched_domain_topology_level){
1660 .mask = sd_numa_mask,
1666 * .. and append 'j' levels of NUMA goodness.
1668 for (j = 1; j < level; i++, j++) {
1669 tl[i] = (struct sched_domain_topology_level){
1670 .mask = sd_numa_mask,
1671 .sd_flags = cpu_numa_flags,
1672 .flags = SDTL_OVERLAP,
1678 sched_domain_topology = tl;
1680 sched_domains_numa_levels = level;
1681 sched_max_numa_distance = sched_domains_numa_distance[level - 1];
1683 init_numa_topology_type();
1686 void sched_domains_numa_masks_set(unsigned int cpu)
1688 int node = cpu_to_node(cpu);
1691 for (i = 0; i < sched_domains_numa_levels; i++) {
1692 for (j = 0; j < nr_node_ids; j++) {
1693 if (node_distance(j, node) <= sched_domains_numa_distance[i])
1694 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
1699 void sched_domains_numa_masks_clear(unsigned int cpu)
1703 for (i = 0; i < sched_domains_numa_levels; i++) {
1704 for (j = 0; j < nr_node_ids; j++)
1705 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
1710 * sched_numa_find_closest() - given the NUMA topology, find the cpu
1711 * closest to @cpu from @cpumask.
1712 * cpumask: cpumask to find a cpu from
1713 * cpu: cpu to be close to
1715 * returns: cpu, or nr_cpu_ids when nothing found.
1717 int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
1719 int i, j = cpu_to_node(cpu);
1721 for (i = 0; i < sched_domains_numa_levels; i++) {
1722 cpu = cpumask_any_and(cpus, sched_domains_numa_masks[i][j]);
1723 if (cpu < nr_cpu_ids)
1729 #endif /* CONFIG_NUMA */
1731 static int __sdt_alloc(const struct cpumask *cpu_map)
1733 struct sched_domain_topology_level *tl;
1736 for_each_sd_topology(tl) {
1737 struct sd_data *sdd = &tl->data;
1739 sdd->sd = alloc_percpu(struct sched_domain *);
1743 sdd->sds = alloc_percpu(struct sched_domain_shared *);
1747 sdd->sg = alloc_percpu(struct sched_group *);
1751 sdd->sgc = alloc_percpu(struct sched_group_capacity *);
1755 for_each_cpu(j, cpu_map) {
1756 struct sched_domain *sd;
1757 struct sched_domain_shared *sds;
1758 struct sched_group *sg;
1759 struct sched_group_capacity *sgc;
1761 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
1762 GFP_KERNEL, cpu_to_node(j));
1766 *per_cpu_ptr(sdd->sd, j) = sd;
1768 sds = kzalloc_node(sizeof(struct sched_domain_shared),
1769 GFP_KERNEL, cpu_to_node(j));
1773 *per_cpu_ptr(sdd->sds, j) = sds;
1775 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
1776 GFP_KERNEL, cpu_to_node(j));
1782 *per_cpu_ptr(sdd->sg, j) = sg;
1784 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
1785 GFP_KERNEL, cpu_to_node(j));
1789 #ifdef CONFIG_SCHED_DEBUG
1793 *per_cpu_ptr(sdd->sgc, j) = sgc;
1800 static void __sdt_free(const struct cpumask *cpu_map)
1802 struct sched_domain_topology_level *tl;
1805 for_each_sd_topology(tl) {
1806 struct sd_data *sdd = &tl->data;
1808 for_each_cpu(j, cpu_map) {
1809 struct sched_domain *sd;
1812 sd = *per_cpu_ptr(sdd->sd, j);
1813 if (sd && (sd->flags & SD_OVERLAP))
1814 free_sched_groups(sd->groups, 0);
1815 kfree(*per_cpu_ptr(sdd->sd, j));
1819 kfree(*per_cpu_ptr(sdd->sds, j));
1821 kfree(*per_cpu_ptr(sdd->sg, j));
1823 kfree(*per_cpu_ptr(sdd->sgc, j));
1825 free_percpu(sdd->sd);
1827 free_percpu(sdd->sds);
1829 free_percpu(sdd->sg);
1831 free_percpu(sdd->sgc);
1836 static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
1837 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
1838 struct sched_domain *child, int dflags, int cpu)
1840 struct sched_domain *sd = sd_init(tl, cpu_map, child, dflags, cpu);
1843 sd->level = child->level + 1;
1844 sched_domain_level_max = max(sched_domain_level_max, sd->level);
1847 if (!cpumask_subset(sched_domain_span(child),
1848 sched_domain_span(sd))) {
1849 pr_err("BUG: arch topology borken\n");
1850 #ifdef CONFIG_SCHED_DEBUG
1851 pr_err(" the %s domain not a subset of the %s domain\n",
1852 child->name, sd->name);
1854 /* Fixup, ensure @sd has at least @child CPUs. */
1855 cpumask_or(sched_domain_span(sd),
1856 sched_domain_span(sd),
1857 sched_domain_span(child));
1861 set_domain_attribute(sd, attr);
1867 * Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for
1868 * any two given CPUs at this (non-NUMA) topology level.
1870 static bool topology_span_sane(struct sched_domain_topology_level *tl,
1871 const struct cpumask *cpu_map, int cpu)
1875 /* NUMA levels are allowed to overlap */
1876 if (tl->flags & SDTL_OVERLAP)
1880 * Non-NUMA levels cannot partially overlap - they must be either
1881 * completely equal or completely disjoint. Otherwise we can end up
1882 * breaking the sched_group lists - i.e. a later get_group() pass
1883 * breaks the linking done for an earlier span.
1885 for_each_cpu(i, cpu_map) {
1889 * We should 'and' all those masks with 'cpu_map' to exactly
1890 * match the topology we're about to build, but that can only
1891 * remove CPUs, which only lessens our ability to detect
1894 if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) &&
1895 cpumask_intersects(tl->mask(cpu), tl->mask(i)))
1903 * Find the sched_domain_topology_level where all CPU capacities are visible
1906 static struct sched_domain_topology_level
1907 *asym_cpu_capacity_level(const struct cpumask *cpu_map)
1909 int i, j, asym_level = 0;
1911 struct sched_domain_topology_level *tl, *asym_tl = NULL;
1914 /* Is there any asymmetry? */
1915 cap = arch_scale_cpu_capacity(cpumask_first(cpu_map));
1917 for_each_cpu(i, cpu_map) {
1918 if (arch_scale_cpu_capacity(i) != cap) {
1928 * Examine topology from all CPU's point of views to detect the lowest
1929 * sched_domain_topology_level where a highest capacity CPU is visible
1932 for_each_cpu(i, cpu_map) {
1933 unsigned long max_capacity = arch_scale_cpu_capacity(i);
1936 for_each_sd_topology(tl) {
1937 if (tl_id < asym_level)
1940 for_each_cpu_and(j, tl->mask(i), cpu_map) {
1941 unsigned long capacity;
1943 capacity = arch_scale_cpu_capacity(j);
1945 if (capacity <= max_capacity)
1948 max_capacity = capacity;
1962 * Build sched domains for a given set of CPUs and attach the sched domains
1963 * to the individual CPUs
1966 build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr)
1968 enum s_alloc alloc_state = sa_none;
1969 struct sched_domain *sd;
1971 struct rq *rq = NULL;
1972 int i, ret = -ENOMEM;
1973 struct sched_domain_topology_level *tl_asym;
1974 bool has_asym = false;
1976 if (WARN_ON(cpumask_empty(cpu_map)))
1979 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
1980 if (alloc_state != sa_rootdomain)
1983 tl_asym = asym_cpu_capacity_level(cpu_map);
1985 /* Set up domains for CPUs specified by the cpu_map: */
1986 for_each_cpu(i, cpu_map) {
1987 struct sched_domain_topology_level *tl;
1990 for_each_sd_topology(tl) {
1993 if (tl == tl_asym) {
1994 dflags |= SD_ASYM_CPUCAPACITY;
1998 if (WARN_ON(!topology_span_sane(tl, cpu_map, i)))
2001 sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i);
2003 if (tl == sched_domain_topology)
2004 *per_cpu_ptr(d.sd, i) = sd;
2005 if (tl->flags & SDTL_OVERLAP)
2006 sd->flags |= SD_OVERLAP;
2007 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
2012 /* Build the groups for the domains */
2013 for_each_cpu(i, cpu_map) {
2014 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2015 sd->span_weight = cpumask_weight(sched_domain_span(sd));
2016 if (sd->flags & SD_OVERLAP) {
2017 if (build_overlap_sched_groups(sd, i))
2020 if (build_sched_groups(sd, i))
2026 /* Calculate CPU capacity for physical packages and nodes */
2027 for (i = nr_cpumask_bits-1; i >= 0; i--) {
2028 if (!cpumask_test_cpu(i, cpu_map))
2031 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2032 claim_allocations(i, sd);
2033 init_sched_groups_capacity(i, sd);
2037 /* Attach the domains */
2039 for_each_cpu(i, cpu_map) {
2041 sd = *per_cpu_ptr(d.sd, i);
2043 /* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */
2044 if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity))
2045 WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);
2047 cpu_attach_domain(sd, d.rd, i);
2052 static_branch_inc_cpuslocked(&sched_asym_cpucapacity);
2054 if (rq && sched_debug_enabled) {
2055 pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
2056 cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
2061 __free_domain_allocs(&d, alloc_state, cpu_map);
2066 /* Current sched domains: */
2067 static cpumask_var_t *doms_cur;
2069 /* Number of sched domains in 'doms_cur': */
2070 static int ndoms_cur;
2072 /* Attribues of custom domains in 'doms_cur' */
2073 static struct sched_domain_attr *dattr_cur;
2076 * Special case: If a kmalloc() of a doms_cur partition (array of
2077 * cpumask) fails, then fallback to a single sched domain,
2078 * as determined by the single cpumask fallback_doms.
2080 static cpumask_var_t fallback_doms;
2083 * arch_update_cpu_topology lets virtualized architectures update the
2084 * CPU core maps. It is supposed to return 1 if the topology changed
2085 * or 0 if it stayed the same.
2087 int __weak arch_update_cpu_topology(void)
2092 cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
2095 cpumask_var_t *doms;
2097 doms = kmalloc_array(ndoms, sizeof(*doms), GFP_KERNEL);
2100 for (i = 0; i < ndoms; i++) {
2101 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
2102 free_sched_domains(doms, i);
2109 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
2112 for (i = 0; i < ndoms; i++)
2113 free_cpumask_var(doms[i]);
2118 * Set up scheduler domains and groups. For now this just excludes isolated
2119 * CPUs, but could be used to exclude other special cases in the future.
2121 int sched_init_domains(const struct cpumask *cpu_map)
2125 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL);
2126 zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL);
2127 zalloc_cpumask_var(&fallback_doms, GFP_KERNEL);
2129 arch_update_cpu_topology();
2131 doms_cur = alloc_sched_domains(ndoms_cur);
2133 doms_cur = &fallback_doms;
2134 cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_FLAG_DOMAIN));
2135 err = build_sched_domains(doms_cur[0], NULL);
2136 register_sched_domain_sysctl();
2142 * Detach sched domains from a group of CPUs specified in cpu_map
2143 * These CPUs will now be attached to the NULL domain
2145 static void detach_destroy_domains(const struct cpumask *cpu_map)
2147 unsigned int cpu = cpumask_any(cpu_map);
2150 if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu)))
2151 static_branch_dec_cpuslocked(&sched_asym_cpucapacity);
2154 for_each_cpu(i, cpu_map)
2155 cpu_attach_domain(NULL, &def_root_domain, i);
2159 /* handle null as "default" */
2160 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
2161 struct sched_domain_attr *new, int idx_new)
2163 struct sched_domain_attr tmp;
2171 return !memcmp(cur ? (cur + idx_cur) : &tmp,
2172 new ? (new + idx_new) : &tmp,
2173 sizeof(struct sched_domain_attr));
2177 * Partition sched domains as specified by the 'ndoms_new'
2178 * cpumasks in the array doms_new[] of cpumasks. This compares
2179 * doms_new[] to the current sched domain partitioning, doms_cur[].
2180 * It destroys each deleted domain and builds each new domain.
2182 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
2183 * The masks don't intersect (don't overlap.) We should setup one
2184 * sched domain for each mask. CPUs not in any of the cpumasks will
2185 * not be load balanced. If the same cpumask appears both in the
2186 * current 'doms_cur' domains and in the new 'doms_new', we can leave
2189 * The passed in 'doms_new' should be allocated using
2190 * alloc_sched_domains. This routine takes ownership of it and will
2191 * free_sched_domains it when done with it. If the caller failed the
2192 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
2193 * and partition_sched_domains() will fallback to the single partition
2194 * 'fallback_doms', it also forces the domains to be rebuilt.
2196 * If doms_new == NULL it will be replaced with cpu_online_mask.
2197 * ndoms_new == 0 is a special case for destroying existing domains,
2198 * and it will not create the default domain.
2200 * Call with hotplug lock and sched_domains_mutex held
2202 void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
2203 struct sched_domain_attr *dattr_new)
2205 bool __maybe_unused has_eas = false;
2209 lockdep_assert_held(&sched_domains_mutex);
2211 /* Always unregister in case we don't destroy any domains: */
2212 unregister_sched_domain_sysctl();
2214 /* Let the architecture update CPU core mappings: */
2215 new_topology = arch_update_cpu_topology();
2218 WARN_ON_ONCE(dattr_new);
2220 doms_new = alloc_sched_domains(1);
2223 cpumask_and(doms_new[0], cpu_active_mask,
2224 housekeeping_cpumask(HK_FLAG_DOMAIN));
2230 /* Destroy deleted domains: */
2231 for (i = 0; i < ndoms_cur; i++) {
2232 for (j = 0; j < n && !new_topology; j++) {
2233 if (cpumask_equal(doms_cur[i], doms_new[j]) &&
2234 dattrs_equal(dattr_cur, i, dattr_new, j)) {
2235 struct root_domain *rd;
2238 * This domain won't be destroyed and as such
2239 * its dl_bw->total_bw needs to be cleared. It
2240 * will be recomputed in function
2241 * update_tasks_root_domain().
2243 rd = cpu_rq(cpumask_any(doms_cur[i]))->rd;
2244 dl_clear_root_domain(rd);
2248 /* No match - a current sched domain not in new doms_new[] */
2249 detach_destroy_domains(doms_cur[i]);
2257 doms_new = &fallback_doms;
2258 cpumask_and(doms_new[0], cpu_active_mask,
2259 housekeeping_cpumask(HK_FLAG_DOMAIN));
2262 /* Build new domains: */
2263 for (i = 0; i < ndoms_new; i++) {
2264 for (j = 0; j < n && !new_topology; j++) {
2265 if (cpumask_equal(doms_new[i], doms_cur[j]) &&
2266 dattrs_equal(dattr_new, i, dattr_cur, j))
2269 /* No match - add a new doms_new */
2270 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
2275 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
2276 /* Build perf. domains: */
2277 for (i = 0; i < ndoms_new; i++) {
2278 for (j = 0; j < n && !sched_energy_update; j++) {
2279 if (cpumask_equal(doms_new[i], doms_cur[j]) &&
2280 cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) {
2285 /* No match - add perf. domains for a new rd */
2286 has_eas |= build_perf_domains(doms_new[i]);
2290 sched_energy_set(has_eas);
2293 /* Remember the new sched domains: */
2294 if (doms_cur != &fallback_doms)
2295 free_sched_domains(doms_cur, ndoms_cur);
2298 doms_cur = doms_new;
2299 dattr_cur = dattr_new;
2300 ndoms_cur = ndoms_new;
2302 register_sched_domain_sysctl();
2306 * Call with hotplug lock held
2308 void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
2309 struct sched_domain_attr *dattr_new)
2311 mutex_lock(&sched_domains_mutex);
2312 partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
2313 mutex_unlock(&sched_domains_mutex);