1 // SPDX-License-Identifier: GPL-2.0
3 * SMP related functions
5 * Copyright IBM Corp. 1999, 2012
6 * Author(s): Denis Joseph Barrow,
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
8 * Heiko Carstens <heiko.carstens@de.ibm.com>,
10 * based on other smp stuff by
11 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
12 * (c) 1998 Ingo Molnar
14 * The code outside of smp.c uses logical cpu numbers, only smp.c does
15 * the translation of logical to physical cpu ids. All new code that
16 * operates on physical cpu numbers needs to go into smp.c.
19 #define KMSG_COMPONENT "cpu"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 #include <linux/workqueue.h>
23 #include <linux/memblock.h>
24 #include <linux/export.h>
25 #include <linux/init.h>
27 #include <linux/err.h>
28 #include <linux/spinlock.h>
29 #include <linux/kernel_stat.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/irqflags.h>
33 #include <linux/irq_work.h>
34 #include <linux/cpu.h>
35 #include <linux/slab.h>
36 #include <linux/sched/hotplug.h>
37 #include <linux/sched/task_stack.h>
38 #include <linux/crash_dump.h>
39 #include <linux/kprobes.h>
40 #include <asm/asm-offsets.h>
42 #include <asm/switch_to.h>
43 #include <asm/facility.h>
45 #include <asm/setup.h>
47 #include <asm/tlbflush.h>
48 #include <asm/vtimer.h>
49 #include <asm/lowcore.h>
51 #include <asm/debug.h>
52 #include <asm/os_info.h>
56 #include <asm/stacktrace.h>
57 #include <asm/topology.h>
63 ec_call_function_single,
74 static DEFINE_PER_CPU(struct cpu *, cpu_device);
77 unsigned long ec_mask; /* bit mask for ec_xxx functions */
78 unsigned long ec_clk; /* sigp timestamp for ec_xxx */
79 signed char state; /* physical cpu state */
80 signed char polarization; /* physical polarization */
81 u16 address; /* physical cpu address */
84 static u8 boot_core_type;
85 static struct pcpu pcpu_devices[NR_CPUS];
87 unsigned int smp_cpu_mt_shift;
88 EXPORT_SYMBOL(smp_cpu_mt_shift);
90 unsigned int smp_cpu_mtid;
91 EXPORT_SYMBOL(smp_cpu_mtid);
93 #ifdef CONFIG_CRASH_DUMP
94 __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
97 static unsigned int smp_max_threads __initdata = -1U;
98 cpumask_t cpu_setup_mask;
100 static int __init early_nosmt(char *s)
105 early_param("nosmt", early_nosmt);
107 static int __init early_smt(char *s)
109 get_option(&s, &smp_max_threads);
112 early_param("smt", early_smt);
115 * The smp_cpu_state_mutex must be held when changing the state or polarization
116 * member of a pcpu data structure within the pcpu_devices arreay.
118 DEFINE_MUTEX(smp_cpu_state_mutex);
121 * Signal processor helper functions.
123 static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
128 cc = __pcpu_sigp(addr, order, parm, NULL);
129 if (cc != SIGP_CC_BUSY)
135 static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
139 for (retry = 0; ; retry++) {
140 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
141 if (cc != SIGP_CC_BUSY)
149 static inline int pcpu_stopped(struct pcpu *pcpu)
153 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
154 0, &status) != SIGP_CC_STATUS_STORED)
156 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
159 static inline int pcpu_running(struct pcpu *pcpu)
161 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
162 0, NULL) != SIGP_CC_STATUS_STORED)
164 /* Status stored condition code is equivalent to cpu not running. */
169 * Find struct pcpu by cpu address.
171 static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
175 for_each_cpu(cpu, mask)
176 if (pcpu_devices[cpu].address == address)
177 return pcpu_devices + cpu;
181 static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
185 if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
187 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
188 pcpu->ec_clk = get_tod_clock_fast();
189 pcpu_sigp_retry(pcpu, order, 0);
192 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
194 unsigned long async_stack, nodat_stack, mcck_stack;
197 lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
198 nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
199 async_stack = stack_alloc();
200 mcck_stack = stack_alloc();
201 if (!lc || !nodat_stack || !async_stack || !mcck_stack)
203 memcpy(lc, &S390_lowcore, 512);
204 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
205 lc->async_stack = async_stack + STACK_INIT_OFFSET;
206 lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
207 lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
209 lc->spinlock_lockval = arch_spin_lockval(cpu);
210 lc->spinlock_index = 0;
211 lc->br_r1_trampoline = 0x07f1; /* br %r1 */
212 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
213 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
214 lc->preempt_count = PREEMPT_DISABLED;
215 if (nmi_alloc_per_cpu(lc))
217 lowcore_ptr[cpu] = lc;
218 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
222 stack_free(mcck_stack);
223 stack_free(async_stack);
224 free_pages(nodat_stack, THREAD_SIZE_ORDER);
225 free_pages((unsigned long) lc, LC_ORDER);
229 static void pcpu_free_lowcore(struct pcpu *pcpu)
231 unsigned long async_stack, nodat_stack, mcck_stack;
235 cpu = pcpu - pcpu_devices;
236 lc = lowcore_ptr[cpu];
237 nodat_stack = lc->nodat_stack - STACK_INIT_OFFSET;
238 async_stack = lc->async_stack - STACK_INIT_OFFSET;
239 mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET;
240 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
241 lowcore_ptr[cpu] = NULL;
242 nmi_free_per_cpu(lc);
243 stack_free(async_stack);
244 stack_free(mcck_stack);
245 free_pages(nodat_stack, THREAD_SIZE_ORDER);
246 free_pages((unsigned long) lc, LC_ORDER);
249 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
251 struct lowcore *lc = lowcore_ptr[cpu];
253 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
254 cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
256 lc->restart_flags = RESTART_FLAG_CTLREGS;
257 lc->spinlock_lockval = arch_spin_lockval(cpu);
258 lc->spinlock_index = 0;
259 lc->percpu_offset = __per_cpu_offset[cpu];
260 lc->kernel_asce = S390_lowcore.kernel_asce;
261 lc->user_asce = s390_invalid_asce;
262 lc->machine_flags = S390_lowcore.machine_flags;
263 lc->user_timer = lc->system_timer =
264 lc->steal_timer = lc->avg_steal_timer = 0;
265 __ctl_store(lc->cregs_save_area, 0, 15);
266 lc->cregs_save_area[1] = lc->kernel_asce;
267 lc->cregs_save_area[7] = lc->user_asce;
268 save_access_regs((unsigned int *) lc->access_regs_save_area);
269 arch_spin_lock_setup(cpu);
272 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
277 cpu = pcpu - pcpu_devices;
278 lc = lowcore_ptr[cpu];
279 lc->kernel_stack = (unsigned long) task_stack_page(tsk)
280 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
281 lc->current_task = (unsigned long) tsk;
283 lc->current_pid = tsk->pid;
284 lc->user_timer = tsk->thread.user_timer;
285 lc->guest_timer = tsk->thread.guest_timer;
286 lc->system_timer = tsk->thread.system_timer;
287 lc->hardirq_timer = tsk->thread.hardirq_timer;
288 lc->softirq_timer = tsk->thread.softirq_timer;
292 static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
297 cpu = pcpu - pcpu_devices;
298 lc = lowcore_ptr[cpu];
299 lc->restart_stack = lc->kernel_stack;
300 lc->restart_fn = (unsigned long) func;
301 lc->restart_data = (unsigned long) data;
302 lc->restart_source = -1U;
303 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
306 typedef void (pcpu_delegate_fn)(void *);
309 * Call function via PSW restart on pcpu and stop the current cpu.
311 static void __pcpu_delegate(pcpu_delegate_fn *func, void *data)
313 func(data); /* should not return */
316 static void pcpu_delegate(struct pcpu *pcpu,
317 pcpu_delegate_fn *func,
318 void *data, unsigned long stack)
320 struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
321 unsigned int source_cpu = stap();
323 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
324 if (pcpu->address == source_cpu) {
325 call_on_stack(2, stack, void, __pcpu_delegate,
326 pcpu_delegate_fn *, func, void *, data);
328 /* Stop target cpu (if func returns this stops the current cpu). */
329 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
330 /* Restart func on the target cpu and stop the current cpu. */
332 lc->restart_stack = stack;
333 lc->restart_fn = (unsigned long)func;
334 lc->restart_data = (unsigned long)data;
335 lc->restart_source = source_cpu;
337 mem_assign_absolute(lc->restart_stack, stack);
338 mem_assign_absolute(lc->restart_fn, (unsigned long)func);
339 mem_assign_absolute(lc->restart_data, (unsigned long)data);
340 mem_assign_absolute(lc->restart_source, source_cpu);
344 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
345 " brc 2,0b # busy, try again\n"
346 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
347 " brc 2,1b # busy, try again\n"
348 : : "d" (pcpu->address), "d" (source_cpu),
349 "K" (SIGP_RESTART), "K" (SIGP_STOP)
355 * Enable additional logical cpus for multi-threading.
357 static int pcpu_set_smt(unsigned int mtid)
361 if (smp_cpu_mtid == mtid)
363 cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL);
366 smp_cpu_mt_shift = 0;
367 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
369 pcpu_devices[0].address = stap();
375 * Call function on an online CPU.
377 void smp_call_online_cpu(void (*func)(void *), void *data)
381 /* Use the current cpu if it is online. */
382 pcpu = pcpu_find_address(cpu_online_mask, stap());
384 /* Use the first online cpu. */
385 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
386 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
390 * Call function on the ipl CPU.
392 void smp_call_ipl_cpu(void (*func)(void *), void *data)
394 struct lowcore *lc = lowcore_ptr[0];
396 if (pcpu_devices[0].address == stap())
399 pcpu_delegate(&pcpu_devices[0], func, data,
403 int smp_find_processor_id(u16 address)
407 for_each_present_cpu(cpu)
408 if (pcpu_devices[cpu].address == address)
413 void schedule_mcck_handler(void)
415 pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_mcck_pending);
418 bool notrace arch_vcpu_is_preempted(int cpu)
420 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
422 if (pcpu_running(pcpu_devices + cpu))
426 EXPORT_SYMBOL(arch_vcpu_is_preempted);
428 void notrace smp_yield_cpu(int cpu)
430 if (!MACHINE_HAS_DIAG9C)
432 diag_stat_inc_norecursion(DIAG_STAT_X09C);
433 asm volatile("diag %0,0,0x9c"
434 : : "d" (pcpu_devices[cpu].address));
436 EXPORT_SYMBOL_GPL(smp_yield_cpu);
439 * Send cpus emergency shutdown signal. This gives the cpus the
440 * opportunity to complete outstanding interrupts.
442 void notrace smp_emergency_stop(void)
444 static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
445 static cpumask_t cpumask;
449 arch_spin_lock(&lock);
450 cpumask_copy(&cpumask, cpu_online_mask);
451 cpumask_clear_cpu(smp_processor_id(), &cpumask);
453 end = get_tod_clock() + (1000000UL << 12);
454 for_each_cpu(cpu, &cpumask) {
455 struct pcpu *pcpu = pcpu_devices + cpu;
456 set_bit(ec_stop_cpu, &pcpu->ec_mask);
457 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
458 0, NULL) == SIGP_CC_BUSY &&
459 get_tod_clock() < end)
462 while (get_tod_clock() < end) {
463 for_each_cpu(cpu, &cpumask)
464 if (pcpu_stopped(pcpu_devices + cpu))
465 cpumask_clear_cpu(cpu, &cpumask);
466 if (cpumask_empty(&cpumask))
470 arch_spin_unlock(&lock);
472 NOKPROBE_SYMBOL(smp_emergency_stop);
475 * Stop all cpus but the current one.
477 void smp_send_stop(void)
481 /* Disable all interrupts/machine checks */
482 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
483 trace_hardirqs_off();
485 debug_set_critical();
487 if (oops_in_progress)
488 smp_emergency_stop();
490 /* stop all processors */
491 for_each_online_cpu(cpu) {
492 if (cpu == smp_processor_id())
494 pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0);
495 while (!pcpu_stopped(pcpu_devices + cpu))
501 * This is the main routine where commands issued by other
504 static void smp_handle_ext_call(void)
508 /* handle bit signal external calls */
509 bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
510 if (test_bit(ec_stop_cpu, &bits))
512 if (test_bit(ec_schedule, &bits))
514 if (test_bit(ec_call_function_single, &bits))
515 generic_smp_call_function_single_interrupt();
516 if (test_bit(ec_mcck_pending, &bits))
517 __s390_handle_mcck();
518 if (test_bit(ec_irq_work, &bits))
522 static void do_ext_call_interrupt(struct ext_code ext_code,
523 unsigned int param32, unsigned long param64)
525 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
526 smp_handle_ext_call();
529 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
533 for_each_cpu(cpu, mask)
534 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
537 void arch_send_call_function_single_ipi(int cpu)
539 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
543 * this function sends a 'reschedule' IPI to another CPU.
544 * it goes straight through and wastes no time serializing
545 * anything. Worst case is that we lose a reschedule ...
547 void smp_send_reschedule(int cpu)
549 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
552 #ifdef CONFIG_IRQ_WORK
553 void arch_irq_work_raise(void)
555 pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_irq_work);
560 * parameter area for the set/clear control bit callbacks
562 struct ec_creg_mask_parms {
564 unsigned long andval;
569 * callback for setting/clearing control bits
571 static void smp_ctl_bit_callback(void *info)
573 struct ec_creg_mask_parms *pp = info;
574 unsigned long cregs[16];
576 __ctl_store(cregs, 0, 15);
577 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
578 __ctl_load(cregs, 0, 15);
581 static DEFINE_SPINLOCK(ctl_lock);
582 static unsigned long ctlreg;
585 * Set a bit in a control register of all cpus
587 void smp_ctl_set_bit(int cr, int bit)
589 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
591 spin_lock(&ctl_lock);
592 memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
593 __set_bit(bit, &ctlreg);
594 memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
595 spin_unlock(&ctl_lock);
596 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
598 EXPORT_SYMBOL(smp_ctl_set_bit);
601 * Clear a bit in a control register of all cpus
603 void smp_ctl_clear_bit(int cr, int bit)
605 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
607 spin_lock(&ctl_lock);
608 memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
609 __clear_bit(bit, &ctlreg);
610 memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
611 spin_unlock(&ctl_lock);
612 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
614 EXPORT_SYMBOL(smp_ctl_clear_bit);
616 #ifdef CONFIG_CRASH_DUMP
618 int smp_store_status(int cpu)
624 pcpu = pcpu_devices + cpu;
625 lc = lowcore_ptr[cpu];
626 pa = __pa(&lc->floating_pt_save_area);
627 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
628 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
630 if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
632 pa = __pa(lc->mcesad & MCESA_ORIGIN_MASK);
634 pa |= lc->mcesad & MCESA_LC_MASK;
635 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
636 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
642 * Collect CPU state of the previous, crashed system.
643 * There are four cases:
644 * 1) standard zfcp/nvme dump
645 * condition: OLDMEM_BASE == NULL && is_ipl_type_dump() == true
646 * The state for all CPUs except the boot CPU needs to be collected
647 * with sigp stop-and-store-status. The boot CPU state is located in
648 * the absolute lowcore of the memory stored in the HSA. The zcore code
649 * will copy the boot CPU state from the HSA.
650 * 2) stand-alone kdump for SCSI/NVMe (zfcp/nvme dump with swapped memory)
651 * condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == true
652 * The state for all CPUs except the boot CPU needs to be collected
653 * with sigp stop-and-store-status. The firmware or the boot-loader
654 * stored the registers of the boot CPU in the absolute lowcore in the
655 * memory of the old system.
656 * 3) kdump and the old kernel did not store the CPU state,
657 * or stand-alone kdump for DASD
658 * condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
659 * The state for all CPUs except the boot CPU needs to be collected
660 * with sigp stop-and-store-status. The kexec code or the boot-loader
661 * stored the registers of the boot CPU in the memory of the old system.
662 * 4) kdump and the old kernel stored the CPU state
663 * condition: OLDMEM_BASE != NULL && is_kdump_kernel()
664 * This case does not exist for s390 anymore, setup_arch explicitly
665 * deactivates the elfcorehdr= kernel parameter
667 static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr,
668 bool is_boot_cpu, unsigned long page)
670 __vector128 *vxrs = (__vector128 *) page;
673 vxrs = boot_cpu_vector_save_area;
675 __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page);
676 save_area_add_vxrs(sa, vxrs);
679 static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr,
680 bool is_boot_cpu, unsigned long page)
682 void *regs = (void *) page;
685 copy_oldmem_kernel(regs, __LC_FPREGS_SAVE_AREA, 512);
687 __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page);
688 save_area_add_regs(sa, regs);
691 void __init smp_save_dump_cpus(void)
693 int addr, boot_cpu_addr, max_cpu_addr;
694 struct save_area *sa;
698 if (!(oldmem_data.start || is_ipl_type_dump()))
699 /* No previous system present, normal boot. */
701 /* Allocate a page as dumping area for the store status sigps */
702 page = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 1UL << 31);
704 panic("ERROR: Failed to allocate %lx bytes below %lx\n",
705 PAGE_SIZE, 1UL << 31);
707 /* Set multi-threading state to the previous system. */
708 pcpu_set_smt(sclp.mtid_prev);
709 boot_cpu_addr = stap();
710 max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
711 for (addr = 0; addr <= max_cpu_addr; addr++) {
712 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
713 SIGP_CC_NOT_OPERATIONAL)
715 is_boot_cpu = (addr == boot_cpu_addr);
716 /* Allocate save area */
717 sa = save_area_alloc(is_boot_cpu);
719 panic("could not allocate memory for save area\n");
721 /* Get the vector registers */
722 smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page);
724 * For a zfcp/nvme dump OLDMEM_BASE == NULL and the registers
725 * of the boot CPU are stored in the HSA. To retrieve
726 * these registers an SCLP request is required which is
727 * done by drivers/s390/char/zcore.c:init_cpu_info()
729 if (!is_boot_cpu || oldmem_data.start)
730 /* Get the CPU registers */
731 smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
733 memblock_free(page, PAGE_SIZE);
734 diag_amode31_ops.diag308_reset();
737 #endif /* CONFIG_CRASH_DUMP */
739 void smp_cpu_set_polarization(int cpu, int val)
741 pcpu_devices[cpu].polarization = val;
744 int smp_cpu_get_polarization(int cpu)
746 return pcpu_devices[cpu].polarization;
749 int smp_cpu_get_cpu_address(int cpu)
751 return pcpu_devices[cpu].address;
754 static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
756 static int use_sigp_detection;
759 if (use_sigp_detection || sclp_get_core_info(info, early)) {
760 use_sigp_detection = 1;
762 address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
763 address += (1U << smp_cpu_mt_shift)) {
764 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
765 SIGP_CC_NOT_OPERATIONAL)
767 info->core[info->configured].core_id =
768 address >> smp_cpu_mt_shift;
771 info->combined = info->configured;
775 static int smp_add_present_cpu(int cpu);
777 static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
778 bool configured, bool early)
785 if (sclp.has_core_type && core->type != boot_core_type)
787 cpu = cpumask_first(avail);
788 address = core->core_id << smp_cpu_mt_shift;
789 for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
790 if (pcpu_find_address(cpu_present_mask, address + i))
792 pcpu = pcpu_devices + cpu;
793 pcpu->address = address + i;
795 pcpu->state = CPU_STATE_CONFIGURED;
797 pcpu->state = CPU_STATE_STANDBY;
798 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
799 set_cpu_present(cpu, true);
800 if (!early && smp_add_present_cpu(cpu) != 0)
801 set_cpu_present(cpu, false);
804 cpumask_clear_cpu(cpu, avail);
805 cpu = cpumask_next(cpu, avail);
810 static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
812 struct sclp_core_entry *core;
813 static cpumask_t avail;
819 mutex_lock(&smp_cpu_state_mutex);
821 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
823 * Add IPL core first (which got logical CPU number 0) to make sure
824 * that all SMT threads get subsequent logical CPU numbers.
827 core_id = pcpu_devices[0].address >> smp_cpu_mt_shift;
828 for (i = 0; i < info->configured; i++) {
829 core = &info->core[i];
830 if (core->core_id == core_id) {
831 nr += smp_add_core(core, &avail, true, early);
836 for (i = 0; i < info->combined; i++) {
837 configured = i < info->configured;
838 nr += smp_add_core(&info->core[i], &avail, configured, early);
840 mutex_unlock(&smp_cpu_state_mutex);
845 void __init smp_detect_cpus(void)
847 unsigned int cpu, mtid, c_cpus, s_cpus;
848 struct sclp_core_info *info;
851 /* Get CPU information */
852 info = memblock_alloc(sizeof(*info), 8);
854 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
855 __func__, sizeof(*info), 8);
856 smp_get_core_info(info, 1);
857 /* Find boot CPU type */
858 if (sclp.has_core_type) {
860 for (cpu = 0; cpu < info->combined; cpu++)
861 if (info->core[cpu].core_id == address) {
862 /* The boot cpu dictates the cpu type. */
863 boot_core_type = info->core[cpu].type;
866 if (cpu >= info->combined)
867 panic("Could not find boot CPU type");
870 /* Set multi-threading state for the current system */
871 mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
872 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
875 /* Print number of CPUs */
877 for (cpu = 0; cpu < info->combined; cpu++) {
878 if (sclp.has_core_type &&
879 info->core[cpu].type != boot_core_type)
881 if (cpu < info->configured)
882 c_cpus += smp_cpu_mtid + 1;
884 s_cpus += smp_cpu_mtid + 1;
886 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
888 /* Add CPUs present at boot */
889 __smp_rescan_cpus(info, true);
890 memblock_free_early((unsigned long)info, sizeof(*info));
894 * Activate a secondary processor.
896 static void smp_start_secondary(void *cpuvoid)
898 int cpu = raw_smp_processor_id();
900 S390_lowcore.last_update_clock = get_tod_clock();
901 S390_lowcore.restart_stack = (unsigned long)restart_stack;
902 S390_lowcore.restart_fn = (unsigned long)do_restart;
903 S390_lowcore.restart_data = 0;
904 S390_lowcore.restart_source = -1U;
905 S390_lowcore.restart_flags = 0;
906 restore_access_regs(S390_lowcore.access_regs_save_area);
908 rcu_cpu_starting(cpu);
913 cpumask_set_cpu(cpu, &cpu_setup_mask);
915 notify_cpu_starting(cpu);
916 if (topology_cpu_dedicated(cpu))
917 set_cpu_flag(CIF_DEDICATED_CPU);
919 clear_cpu_flag(CIF_DEDICATED_CPU);
920 set_cpu_online(cpu, true);
921 inc_irq_stat(CPU_RST);
923 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
926 /* Upping and downing of CPUs */
927 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
929 struct pcpu *pcpu = pcpu_devices + cpu;
932 if (pcpu->state != CPU_STATE_CONFIGURED)
934 if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
935 SIGP_CC_ORDER_CODE_ACCEPTED)
938 rc = pcpu_alloc_lowcore(pcpu, cpu);
941 pcpu_prepare_secondary(pcpu, cpu);
942 pcpu_attach_task(pcpu, tidle);
943 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
944 /* Wait until cpu puts itself in the online & active maps */
945 while (!cpu_online(cpu))
950 static unsigned int setup_possible_cpus __initdata;
952 static int __init _setup_possible_cpus(char *s)
954 get_option(&s, &setup_possible_cpus);
957 early_param("possible_cpus", _setup_possible_cpus);
959 int __cpu_disable(void)
961 unsigned long cregs[16];
964 /* Handle possible pending IPIs */
965 smp_handle_ext_call();
966 cpu = smp_processor_id();
967 set_cpu_online(cpu, false);
968 cpumask_clear_cpu(cpu, &cpu_setup_mask);
970 /* Disable pseudo page faults on this cpu. */
972 /* Disable interrupt sources via control register. */
973 __ctl_store(cregs, 0, 15);
974 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
975 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
976 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
977 __ctl_load(cregs, 0, 15);
978 clear_cpu_flag(CIF_NOHZ_DELAY);
982 void __cpu_die(unsigned int cpu)
986 /* Wait until target cpu is down */
987 pcpu = pcpu_devices + cpu;
988 while (!pcpu_stopped(pcpu))
990 pcpu_free_lowcore(pcpu);
991 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
992 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
995 void __noreturn cpu_die(void)
999 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
1003 void __init smp_fill_possible_mask(void)
1005 unsigned int possible, sclp_max, cpu;
1007 sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
1008 sclp_max = min(smp_max_threads, sclp_max);
1009 sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids;
1010 possible = setup_possible_cpus ?: nr_cpu_ids;
1011 possible = min(possible, sclp_max);
1012 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
1013 set_cpu_possible(cpu, true);
1016 void __init smp_prepare_cpus(unsigned int max_cpus)
1018 /* request the 0x1201 emergency signal external interrupt */
1019 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
1020 panic("Couldn't request external interrupt 0x1201");
1021 /* request the 0x1202 external call external interrupt */
1022 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
1023 panic("Couldn't request external interrupt 0x1202");
1026 void __init smp_prepare_boot_cpu(void)
1028 struct pcpu *pcpu = pcpu_devices;
1030 WARN_ON(!cpu_present(0) || !cpu_online(0));
1031 pcpu->state = CPU_STATE_CONFIGURED;
1032 S390_lowcore.percpu_offset = __per_cpu_offset[0];
1033 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
1036 void __init smp_setup_processor_id(void)
1038 pcpu_devices[0].address = stap();
1039 S390_lowcore.cpu_nr = 0;
1040 S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
1041 S390_lowcore.spinlock_index = 0;
1045 * the frequency of the profiling timer can be changed
1046 * by writing a multiplier value into /proc/profile.
1048 * usually you want to run this on all CPUs ;)
1050 int setup_profiling_timer(unsigned int multiplier)
1055 static ssize_t cpu_configure_show(struct device *dev,
1056 struct device_attribute *attr, char *buf)
1060 mutex_lock(&smp_cpu_state_mutex);
1061 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
1062 mutex_unlock(&smp_cpu_state_mutex);
1066 static ssize_t cpu_configure_store(struct device *dev,
1067 struct device_attribute *attr,
1068 const char *buf, size_t count)
1071 int cpu, val, rc, i;
1074 if (sscanf(buf, "%d %c", &val, &delim) != 1)
1076 if (val != 0 && val != 1)
1079 mutex_lock(&smp_cpu_state_mutex);
1081 /* disallow configuration changes of online cpus and cpu 0 */
1083 cpu = smp_get_base_cpu(cpu);
1086 for (i = 0; i <= smp_cpu_mtid; i++)
1087 if (cpu_online(cpu + i))
1089 pcpu = pcpu_devices + cpu;
1093 if (pcpu->state != CPU_STATE_CONFIGURED)
1095 rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
1098 for (i = 0; i <= smp_cpu_mtid; i++) {
1099 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1101 pcpu[i].state = CPU_STATE_STANDBY;
1102 smp_cpu_set_polarization(cpu + i,
1103 POLARIZATION_UNKNOWN);
1105 topology_expect_change();
1108 if (pcpu->state != CPU_STATE_STANDBY)
1110 rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
1113 for (i = 0; i <= smp_cpu_mtid; i++) {
1114 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1116 pcpu[i].state = CPU_STATE_CONFIGURED;
1117 smp_cpu_set_polarization(cpu + i,
1118 POLARIZATION_UNKNOWN);
1120 topology_expect_change();
1126 mutex_unlock(&smp_cpu_state_mutex);
1128 return rc ? rc : count;
1130 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1132 static ssize_t show_cpu_address(struct device *dev,
1133 struct device_attribute *attr, char *buf)
1135 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
1137 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1139 static struct attribute *cpu_common_attrs[] = {
1140 &dev_attr_configure.attr,
1141 &dev_attr_address.attr,
1145 static struct attribute_group cpu_common_attr_group = {
1146 .attrs = cpu_common_attrs,
1149 static struct attribute *cpu_online_attrs[] = {
1150 &dev_attr_idle_count.attr,
1151 &dev_attr_idle_time_us.attr,
1155 static struct attribute_group cpu_online_attr_group = {
1156 .attrs = cpu_online_attrs,
1159 static int smp_cpu_online(unsigned int cpu)
1161 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1163 return sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1166 static int smp_cpu_pre_down(unsigned int cpu)
1168 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1170 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1174 static int smp_add_present_cpu(int cpu)
1180 c = kzalloc(sizeof(*c), GFP_KERNEL);
1183 per_cpu(cpu_device, cpu) = c;
1185 c->hotpluggable = 1;
1186 rc = register_cpu(c, cpu);
1189 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1192 rc = topology_cpu_init(c);
1198 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1205 int __ref smp_rescan_cpus(void)
1207 struct sclp_core_info *info;
1210 info = kzalloc(sizeof(*info), GFP_KERNEL);
1213 smp_get_core_info(info, 0);
1214 nr = __smp_rescan_cpus(info, false);
1217 topology_schedule_update();
1221 static ssize_t __ref rescan_store(struct device *dev,
1222 struct device_attribute *attr,
1228 rc = lock_device_hotplug_sysfs();
1231 rc = smp_rescan_cpus();
1232 unlock_device_hotplug();
1233 return rc ? rc : count;
1235 static DEVICE_ATTR_WO(rescan);
1237 static int __init s390_smp_init(void)
1241 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1244 for_each_present_cpu(cpu) {
1245 rc = smp_add_present_cpu(cpu);
1250 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
1251 smp_cpu_online, smp_cpu_pre_down);
1252 rc = rc <= 0 ? rc : 0;
1256 subsys_initcall(s390_smp_init);
1258 static __always_inline void set_new_lowcore(struct lowcore *lc)
1260 union register_pair dst, src;
1263 src.even = (unsigned long) &S390_lowcore;
1264 src.odd = sizeof(S390_lowcore);
1265 dst.even = (unsigned long) lc;
1266 dst.odd = sizeof(*lc);
1267 pfx = (unsigned long) lc;
1270 " mvcl %[dst],%[src]\n"
1272 : [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
1277 static int __init smp_reinit_ipl_cpu(void)
1279 unsigned long async_stack, nodat_stack, mcck_stack;
1280 struct lowcore *lc, *lc_ipl;
1281 unsigned long flags;
1283 lc_ipl = lowcore_ptr[0];
1284 lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
1285 nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
1286 async_stack = stack_alloc();
1287 mcck_stack = stack_alloc();
1288 if (!lc || !nodat_stack || !async_stack || !mcck_stack)
1289 panic("Couldn't allocate memory");
1291 local_irq_save(flags);
1292 local_mcck_disable();
1293 set_new_lowcore(lc);
1294 S390_lowcore.nodat_stack = nodat_stack + STACK_INIT_OFFSET;
1295 S390_lowcore.async_stack = async_stack + STACK_INIT_OFFSET;
1296 S390_lowcore.mcck_stack = mcck_stack + STACK_INIT_OFFSET;
1297 lowcore_ptr[0] = lc;
1298 local_mcck_enable();
1299 local_irq_restore(flags);
1301 free_pages(lc_ipl->async_stack - STACK_INIT_OFFSET, THREAD_SIZE_ORDER);
1302 memblock_free_late(lc_ipl->mcck_stack - STACK_INIT_OFFSET, THREAD_SIZE);
1303 memblock_free_late((unsigned long) lc_ipl, sizeof(*lc_ipl));
1307 early_initcall(smp_reinit_ipl_cpu);