1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2007, 2011
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
7 #define KMSG_COMPONENT "cpu"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 #include <linux/workqueue.h>
11 #include <linux/memblock.h>
12 #include <linux/uaccess.h>
13 #include <linux/sysctl.h>
14 #include <linux/cpuset.h>
15 #include <linux/device.h>
16 #include <linux/export.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/sched/topology.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/slab.h>
23 #include <linux/cpu.h>
24 #include <linux/smp.h>
26 #include <linux/nodemask.h>
27 #include <linux/node.h>
28 #include <asm/sysinfo.h>
30 #define PTF_HORIZONTAL (0UL)
31 #define PTF_VERTICAL (1UL)
32 #define PTF_CHECK (2UL)
37 TOPOLOGY_MODE_PACKAGE,
38 TOPOLOGY_MODE_UNINITIALIZED
42 struct mask_info *next;
47 static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED;
48 static void set_topology_timer(void);
49 static void topology_work_fn(struct work_struct *work);
50 static struct sysinfo_15_1_x *tl_info;
52 static DECLARE_WORK(topology_work, topology_work_fn);
55 * Socket/Book linked lists and cpu_topology updates are
56 * protected by "sched_domains_mutex".
58 static struct mask_info socket_info;
59 static struct mask_info book_info;
60 static struct mask_info drawer_info;
62 struct cpu_topology_s390 cpu_topology[NR_CPUS];
63 EXPORT_SYMBOL_GPL(cpu_topology);
65 static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int cpu)
67 static cpumask_t mask;
69 cpumask_copy(&mask, cpumask_of(cpu));
70 switch (topology_mode) {
71 case TOPOLOGY_MODE_HW:
73 if (cpumask_test_cpu(cpu, &info->mask)) {
74 cpumask_copy(&mask, &info->mask);
79 if (cpumask_empty(&mask))
80 cpumask_copy(&mask, cpumask_of(cpu));
82 case TOPOLOGY_MODE_PACKAGE:
83 cpumask_copy(&mask, cpu_present_mask);
87 case TOPOLOGY_MODE_SINGLE:
88 cpumask_copy(&mask, cpumask_of(cpu));
91 cpumask_and(&mask, &mask, cpu_online_mask);
92 cpumask_copy(dst, &mask);
95 static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
97 static cpumask_t mask;
100 cpumask_copy(&mask, cpumask_of(cpu));
101 if (topology_mode != TOPOLOGY_MODE_HW)
103 cpu -= cpu % (smp_cpu_mtid + 1);
104 for (i = 0; i <= smp_cpu_mtid; i++)
105 if (cpu_present(cpu + i))
106 cpumask_set_cpu(cpu + i, &mask);
107 cpumask_and(&mask, &mask, cpu_online_mask);
109 cpumask_copy(dst, &mask);
112 #define TOPOLOGY_CORE_BITS 64
114 static void add_cpus_to_mask(struct topology_core *tl_core,
115 struct mask_info *drawer,
116 struct mask_info *book,
117 struct mask_info *socket)
119 struct cpu_topology_s390 *topo;
122 for_each_set_bit(core, &tl_core->mask, TOPOLOGY_CORE_BITS) {
126 rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
127 lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
130 for (i = 0; i <= smp_cpu_mtid; i++) {
131 topo = &cpu_topology[lcpu + i];
132 topo->drawer_id = drawer->id;
133 topo->book_id = book->id;
134 topo->socket_id = socket->id;
135 topo->core_id = rcore;
136 topo->thread_id = lcpu + i;
137 topo->dedicated = tl_core->d;
138 cpumask_set_cpu(lcpu + i, &drawer->mask);
139 cpumask_set_cpu(lcpu + i, &book->mask);
140 cpumask_set_cpu(lcpu + i, &socket->mask);
141 smp_cpu_set_polarization(lcpu + i, tl_core->pp);
146 static void clear_masks(void)
148 struct mask_info *info;
152 cpumask_clear(&info->mask);
157 cpumask_clear(&info->mask);
162 cpumask_clear(&info->mask);
167 static union topology_entry *next_tle(union topology_entry *tle)
170 return (union topology_entry *)((struct topology_core *)tle + 1);
171 return (union topology_entry *)((struct topology_container *)tle + 1);
174 static void tl_to_masks(struct sysinfo_15_1_x *info)
176 struct mask_info *socket = &socket_info;
177 struct mask_info *book = &book_info;
178 struct mask_info *drawer = &drawer_info;
179 union topology_entry *tle, *end;
183 end = (union topology_entry *)((unsigned long)info + info->length);
187 drawer = drawer->next;
188 drawer->id = tle->container.id;
192 book->id = tle->container.id;
195 socket = socket->next;
196 socket->id = tle->container.id;
199 add_cpus_to_mask(&tle->cpu, drawer, book, socket);
209 static void topology_update_polarization_simple(void)
213 for_each_possible_cpu(cpu)
214 smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
217 static int ptf(unsigned long fc)
222 " .insn rre,0xb9a20000,%1,%1\n"
230 int topology_set_cpu_management(int fc)
234 if (!MACHINE_HAS_TOPOLOGY)
237 rc = ptf(PTF_VERTICAL);
239 rc = ptf(PTF_HORIZONTAL);
242 for_each_possible_cpu(cpu)
243 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
247 void update_cpu_masks(void)
249 struct cpu_topology_s390 *topo, *topo_package, *topo_sibling;
250 int cpu, sibling, pkg_first, smt_first, id;
252 for_each_possible_cpu(cpu) {
253 topo = &cpu_topology[cpu];
254 cpu_thread_map(&topo->thread_mask, cpu);
255 cpu_group_map(&topo->core_mask, &socket_info, cpu);
256 cpu_group_map(&topo->book_mask, &book_info, cpu);
257 cpu_group_map(&topo->drawer_mask, &drawer_info, cpu);
258 topo->booted_cores = 0;
259 if (topology_mode != TOPOLOGY_MODE_HW) {
260 id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu;
261 topo->thread_id = cpu;
263 topo->socket_id = id;
265 topo->drawer_id = id;
268 for_each_online_cpu(cpu) {
269 topo = &cpu_topology[cpu];
270 pkg_first = cpumask_first(&topo->core_mask);
271 topo_package = &cpu_topology[pkg_first];
272 if (cpu == pkg_first) {
273 for_each_cpu(sibling, &topo->core_mask) {
274 topo_sibling = &cpu_topology[sibling];
275 smt_first = cpumask_first(&topo_sibling->thread_mask);
276 if (sibling == smt_first)
277 topo_package->booted_cores++;
280 topo->booted_cores = topo_package->booted_cores;
285 void store_topology(struct sysinfo_15_1_x *info)
287 stsi(info, 15, 1, topology_mnest_limit());
290 static void __arch_update_dedicated_flag(void *arg)
292 if (topology_cpu_dedicated(smp_processor_id()))
293 set_cpu_flag(CIF_DEDICATED_CPU);
295 clear_cpu_flag(CIF_DEDICATED_CPU);
298 static int __arch_update_cpu_topology(void)
300 struct sysinfo_15_1_x *info = tl_info;
303 mutex_lock(&smp_cpu_state_mutex);
304 if (MACHINE_HAS_TOPOLOGY) {
306 store_topology(info);
310 if (!MACHINE_HAS_TOPOLOGY)
311 topology_update_polarization_simple();
312 mutex_unlock(&smp_cpu_state_mutex);
316 int arch_update_cpu_topology(void)
321 rc = __arch_update_cpu_topology();
322 on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
323 for_each_online_cpu(cpu) {
324 dev = get_cpu_device(cpu);
326 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
331 static void topology_work_fn(struct work_struct *work)
333 rebuild_sched_domains();
336 void topology_schedule_update(void)
338 schedule_work(&topology_work);
341 static void topology_flush_work(void)
343 flush_work(&topology_work);
346 static void topology_timer_fn(struct timer_list *unused)
349 topology_schedule_update();
350 set_topology_timer();
353 static struct timer_list topology_timer;
355 static atomic_t topology_poll = ATOMIC_INIT(0);
357 static void set_topology_timer(void)
359 if (atomic_add_unless(&topology_poll, -1, 0))
360 mod_timer(&topology_timer, jiffies + msecs_to_jiffies(100));
362 mod_timer(&topology_timer, jiffies + msecs_to_jiffies(60 * MSEC_PER_SEC));
365 void topology_expect_change(void)
367 if (!MACHINE_HAS_TOPOLOGY)
369 /* This is racy, but it doesn't matter since it is just a heuristic.
370 * Worst case is that we poll in a higher frequency for a bit longer.
372 if (atomic_read(&topology_poll) > 60)
374 atomic_add(60, &topology_poll);
375 set_topology_timer();
378 static int cpu_management;
380 static ssize_t dispatching_show(struct device *dev,
381 struct device_attribute *attr,
386 mutex_lock(&smp_cpu_state_mutex);
387 count = sprintf(buf, "%d\n", cpu_management);
388 mutex_unlock(&smp_cpu_state_mutex);
392 static ssize_t dispatching_store(struct device *dev,
393 struct device_attribute *attr,
400 if (sscanf(buf, "%d %c", &val, &delim) != 1)
402 if (val != 0 && val != 1)
406 mutex_lock(&smp_cpu_state_mutex);
407 if (cpu_management == val)
409 rc = topology_set_cpu_management(val);
412 cpu_management = val;
413 topology_expect_change();
415 mutex_unlock(&smp_cpu_state_mutex);
417 return rc ? rc : count;
419 static DEVICE_ATTR_RW(dispatching);
421 static ssize_t cpu_polarization_show(struct device *dev,
422 struct device_attribute *attr, char *buf)
427 mutex_lock(&smp_cpu_state_mutex);
428 switch (smp_cpu_get_polarization(cpu)) {
429 case POLARIZATION_HRZ:
430 count = sprintf(buf, "horizontal\n");
432 case POLARIZATION_VL:
433 count = sprintf(buf, "vertical:low\n");
435 case POLARIZATION_VM:
436 count = sprintf(buf, "vertical:medium\n");
438 case POLARIZATION_VH:
439 count = sprintf(buf, "vertical:high\n");
442 count = sprintf(buf, "unknown\n");
445 mutex_unlock(&smp_cpu_state_mutex);
448 static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
450 static struct attribute *topology_cpu_attrs[] = {
451 &dev_attr_polarization.attr,
455 static struct attribute_group topology_cpu_attr_group = {
456 .attrs = topology_cpu_attrs,
459 static ssize_t cpu_dedicated_show(struct device *dev,
460 struct device_attribute *attr, char *buf)
465 mutex_lock(&smp_cpu_state_mutex);
466 count = sprintf(buf, "%d\n", topology_cpu_dedicated(cpu));
467 mutex_unlock(&smp_cpu_state_mutex);
470 static DEVICE_ATTR(dedicated, 0444, cpu_dedicated_show, NULL);
472 static struct attribute *topology_extra_cpu_attrs[] = {
473 &dev_attr_dedicated.attr,
477 static struct attribute_group topology_extra_cpu_attr_group = {
478 .attrs = topology_extra_cpu_attrs,
481 int topology_cpu_init(struct cpu *cpu)
485 rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
486 if (rc || !MACHINE_HAS_TOPOLOGY)
488 rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group);
490 sysfs_remove_group(&cpu->dev.kobj, &topology_cpu_attr_group);
494 static const struct cpumask *cpu_thread_mask(int cpu)
496 return &cpu_topology[cpu].thread_mask;
500 const struct cpumask *cpu_coregroup_mask(int cpu)
502 return &cpu_topology[cpu].core_mask;
505 static const struct cpumask *cpu_book_mask(int cpu)
507 return &cpu_topology[cpu].book_mask;
510 static const struct cpumask *cpu_drawer_mask(int cpu)
512 return &cpu_topology[cpu].drawer_mask;
515 static struct sched_domain_topology_level s390_topology[] = {
516 { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
517 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
518 { cpu_book_mask, SD_INIT_NAME(BOOK) },
519 { cpu_drawer_mask, SD_INIT_NAME(DRAWER) },
520 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
524 static void __init alloc_masks(struct sysinfo_15_1_x *info,
525 struct mask_info *mask, int offset)
529 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
530 for (i = 0; i < info->mnest - offset; i++)
531 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
532 nr_masks = max(nr_masks, 1);
533 for (i = 0; i < nr_masks; i++) {
534 mask->next = memblock_alloc(sizeof(*mask->next), 8);
536 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
537 __func__, sizeof(*mask->next), 8);
542 void __init topology_init_early(void)
544 struct sysinfo_15_1_x *info;
546 set_sched_topology(s390_topology);
547 if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) {
548 if (MACHINE_HAS_TOPOLOGY)
549 topology_mode = TOPOLOGY_MODE_HW;
551 topology_mode = TOPOLOGY_MODE_SINGLE;
553 if (!MACHINE_HAS_TOPOLOGY)
555 tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
557 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
558 __func__, PAGE_SIZE, PAGE_SIZE);
560 store_topology(info);
561 pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
562 info->mag[0], info->mag[1], info->mag[2], info->mag[3],
563 info->mag[4], info->mag[5], info->mnest);
564 alloc_masks(info, &socket_info, 1);
565 alloc_masks(info, &book_info, 2);
566 alloc_masks(info, &drawer_info, 3);
568 __arch_update_cpu_topology();
569 __arch_update_dedicated_flag(NULL);
572 static inline int topology_get_mode(int enabled)
575 return TOPOLOGY_MODE_SINGLE;
576 return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE;
579 static inline int topology_is_enabled(void)
581 return topology_mode != TOPOLOGY_MODE_SINGLE;
584 static int __init topology_setup(char *str)
589 rc = kstrtobool(str, &enabled);
592 topology_mode = topology_get_mode(enabled);
595 early_param("topology", topology_setup);
597 static int topology_ctl_handler(struct ctl_table *ctl, int write,
598 void *buffer, size_t *lenp, loff_t *ppos)
600 int enabled = topology_is_enabled();
603 struct ctl_table ctl_entry = {
604 .procname = ctl->procname,
606 .maxlen = sizeof(int),
607 .extra1 = SYSCTL_ZERO,
608 .extra2 = SYSCTL_ONE,
611 rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
612 if (rc < 0 || !write)
615 mutex_lock(&smp_cpu_state_mutex);
616 new_mode = topology_get_mode(enabled);
617 if (topology_mode != new_mode) {
618 topology_mode = new_mode;
619 topology_schedule_update();
621 mutex_unlock(&smp_cpu_state_mutex);
622 topology_flush_work();
627 static struct ctl_table topology_ctl_table[] = {
629 .procname = "topology",
631 .proc_handler = topology_ctl_handler,
636 static struct ctl_table topology_dir_table[] = {
641 .child = topology_ctl_table,
646 static int __init topology_init(void)
648 timer_setup(&topology_timer, topology_timer_fn, TIMER_DEFERRABLE);
649 if (MACHINE_HAS_TOPOLOGY)
650 set_topology_timer();
652 topology_update_polarization_simple();
653 register_sysctl_table(topology_dir_table);
654 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
656 device_initcall(topology_init);