1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright 2015 IBM Corp.
8 #include <linux/types.h>
10 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/cpu.h>
15 #include <asm/asm-prototypes.h>
16 #include <asm/firmware.h>
17 #include <asm/machdep.h>
19 #include <asm/cputhreads.h>
20 #include <asm/cpuidle.h>
21 #include <asm/code-patching.h>
23 #include <asm/runlatch.h>
24 #include <asm/dbell.h>
29 /* Power ISA 3.0 allows for stop states 0x0 - 0xF */
30 #define MAX_STOP_STATE 0xF
32 #define P9_STOP_SPR_MSR 2000
33 #define P9_STOP_SPR_PSSCR 855
35 static u32 supported_cpuidle_states;
36 struct pnv_idle_states_t *pnv_idle_states;
37 int nr_pnv_idle_states;
40 * The default stop state that will be used by ppc_md.power_save
41 * function on platforms that support stop instruction.
43 static u64 pnv_default_stop_val;
44 static u64 pnv_default_stop_mask;
45 static bool default_stop_found;
48 * First stop state levels when SPR and TB loss can occur.
50 static u64 pnv_first_tb_loss_level = MAX_STOP_STATE + 1;
51 static u64 pnv_first_spr_loss_level = MAX_STOP_STATE + 1;
54 * psscr value and mask of the deepest stop idle state.
55 * Used when a cpu is offlined.
57 static u64 pnv_deepest_stop_psscr_val;
58 static u64 pnv_deepest_stop_psscr_mask;
59 static u64 pnv_deepest_stop_flag;
60 static bool deepest_stop_found;
62 static unsigned long power7_offline_type;
64 static int pnv_save_sprs_for_deep_states(void)
70 * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric across
71 * all cpus at boot. Get these reg values of current cpu and use the
72 * same across all cpus.
74 uint64_t lpcr_val = mfspr(SPRN_LPCR);
75 uint64_t hid0_val = mfspr(SPRN_HID0);
76 uint64_t hid1_val = mfspr(SPRN_HID1);
77 uint64_t hid4_val = mfspr(SPRN_HID4);
78 uint64_t hid5_val = mfspr(SPRN_HID5);
79 uint64_t hmeer_val = mfspr(SPRN_HMEER);
80 uint64_t msr_val = MSR_IDLE;
81 uint64_t psscr_val = pnv_deepest_stop_psscr_val;
83 for_each_present_cpu(cpu) {
84 uint64_t pir = get_hard_smp_processor_id(cpu);
85 uint64_t hsprg0_val = (uint64_t)paca_ptrs[cpu];
87 rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val);
91 rc = opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
95 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
96 rc = opal_slw_set_reg(pir, P9_STOP_SPR_MSR, msr_val);
100 rc = opal_slw_set_reg(pir,
101 P9_STOP_SPR_PSSCR, psscr_val);
107 /* HIDs are per core registers */
108 if (cpu_thread_in_core(cpu) == 0) {
110 rc = opal_slw_set_reg(pir, SPRN_HMEER, hmeer_val);
114 rc = opal_slw_set_reg(pir, SPRN_HID0, hid0_val);
118 /* Only p8 needs to set extra HID regiters */
119 if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
121 rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val);
125 rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val);
129 rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val);
139 u32 pnv_get_supported_cpuidle_states(void)
141 return supported_cpuidle_states;
143 EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states);
145 static void pnv_fastsleep_workaround_apply(void *info)
151 rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP,
152 OPAL_CONFIG_IDLE_APPLY);
157 static bool power7_fastsleep_workaround_entry = true;
158 static bool power7_fastsleep_workaround_exit = true;
161 * Used to store fastsleep workaround state
162 * 0 - Workaround applied/undone at fastsleep entry/exit path (Default)
163 * 1 - Workaround applied once, never undone.
165 static u8 fastsleep_workaround_applyonce;
167 static ssize_t show_fastsleep_workaround_applyonce(struct device *dev,
168 struct device_attribute *attr, char *buf)
170 return sprintf(buf, "%u\n", fastsleep_workaround_applyonce);
173 static ssize_t store_fastsleep_workaround_applyonce(struct device *dev,
174 struct device_attribute *attr, const char *buf,
177 cpumask_t primary_thread_mask;
181 if (kstrtou8(buf, 0, &val) || val != 1)
184 if (fastsleep_workaround_applyonce == 1)
188 * fastsleep_workaround_applyonce = 1 implies
189 * fastsleep workaround needs to be left in 'applied' state on all
190 * the cores. Do this by-
191 * 1. Disable the 'undo' workaround in fastsleep exit path
192 * 2. Sendi IPIs to all the cores which have at least one online thread
193 * 3. Disable the 'apply' workaround in fastsleep entry path
195 * There is no need to send ipi to cores which have all threads
196 * offlined, as last thread of the core entering fastsleep or deeper
197 * state would have applied workaround.
199 power7_fastsleep_workaround_exit = false;
202 primary_thread_mask = cpu_online_cores_map();
203 on_each_cpu_mask(&primary_thread_mask,
204 pnv_fastsleep_workaround_apply,
208 pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply");
212 power7_fastsleep_workaround_entry = false;
214 fastsleep_workaround_applyonce = 1;
221 static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600,
222 show_fastsleep_workaround_applyonce,
223 store_fastsleep_workaround_applyonce);
225 static inline void atomic_start_thread_idle(void)
227 int cpu = raw_smp_processor_id();
228 int first = cpu_first_thread_sibling(cpu);
229 int thread_nr = cpu_thread_in_core(cpu);
230 unsigned long *state = &paca_ptrs[first]->idle_state;
232 clear_bit(thread_nr, state);
235 static inline void atomic_stop_thread_idle(void)
237 int cpu = raw_smp_processor_id();
238 int first = cpu_first_thread_sibling(cpu);
239 int thread_nr = cpu_thread_in_core(cpu);
240 unsigned long *state = &paca_ptrs[first]->idle_state;
242 set_bit(thread_nr, state);
245 static inline void atomic_lock_thread_idle(void)
247 int cpu = raw_smp_processor_id();
248 int first = cpu_first_thread_sibling(cpu);
249 unsigned long *state = &paca_ptrs[first]->idle_state;
251 while (unlikely(test_and_set_bit_lock(NR_PNV_CORE_IDLE_LOCK_BIT, state)))
255 static inline void atomic_unlock_and_stop_thread_idle(void)
257 int cpu = raw_smp_processor_id();
258 int first = cpu_first_thread_sibling(cpu);
259 unsigned long thread = 1UL << cpu_thread_in_core(cpu);
260 unsigned long *state = &paca_ptrs[first]->idle_state;
261 u64 s = READ_ONCE(*state);
264 BUG_ON(!(s & PNV_CORE_IDLE_LOCK_BIT));
268 new = (s | thread) & ~PNV_CORE_IDLE_LOCK_BIT;
269 tmp = cmpxchg(state, s, new);
270 if (unlikely(tmp != s)) {
276 static inline void atomic_unlock_thread_idle(void)
278 int cpu = raw_smp_processor_id();
279 int first = cpu_first_thread_sibling(cpu);
280 unsigned long *state = &paca_ptrs[first]->idle_state;
282 BUG_ON(!test_bit(NR_PNV_CORE_IDLE_LOCK_BIT, state));
283 clear_bit_unlock(NR_PNV_CORE_IDLE_LOCK_BIT, state);
305 /* per thread SPRs that get lost in shallow states */
312 static unsigned long power7_idle_insn(unsigned long type)
314 int cpu = raw_smp_processor_id();
315 int first = cpu_first_thread_sibling(cpu);
316 unsigned long *state = &paca_ptrs[first]->idle_state;
317 unsigned long thread = 1UL << cpu_thread_in_core(cpu);
318 unsigned long core_thread_mask = (1UL << threads_per_core) - 1;
321 struct p7_sprs sprs = {}; /* avoid false use-uninitialised */
322 bool sprs_saved = false;
325 if (unlikely(type != PNV_THREAD_NAP)) {
326 atomic_lock_thread_idle();
328 BUG_ON(!(*state & thread));
331 if (power7_fastsleep_workaround_entry) {
332 if ((*state & core_thread_mask) == 0) {
333 rc = opal_config_cpu_idle_state(
334 OPAL_CONFIG_IDLE_FASTSLEEP,
335 OPAL_CONFIG_IDLE_APPLY);
340 if (type == PNV_THREAD_WINKLE) {
341 sprs.tscr = mfspr(SPRN_TSCR);
342 sprs.worc = mfspr(SPRN_WORC);
344 sprs.sdr1 = mfspr(SPRN_SDR1);
345 sprs.rpr = mfspr(SPRN_RPR);
347 sprs.lpcr = mfspr(SPRN_LPCR);
348 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
349 sprs.hfscr = mfspr(SPRN_HFSCR);
350 sprs.fscr = mfspr(SPRN_FSCR);
352 sprs.purr = mfspr(SPRN_PURR);
353 sprs.spurr = mfspr(SPRN_SPURR);
354 sprs.dscr = mfspr(SPRN_DSCR);
355 sprs.wort = mfspr(SPRN_WORT);
360 * Increment winkle counter and set all winkle bits if
361 * all threads are winkling. This allows wakeup side to
362 * distinguish between fast sleep and winkle state
363 * loss. Fast sleep still has to resync the timebase so
364 * this may not be a really big win.
366 *state += 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT;
367 if ((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS)
368 >> PNV_CORE_IDLE_WINKLE_COUNT_SHIFT
370 *state |= PNV_CORE_IDLE_THREAD_WINKLE_BITS;
371 WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0);
374 atomic_unlock_thread_idle();
377 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
378 sprs.amr = mfspr(SPRN_AMR);
379 sprs.iamr = mfspr(SPRN_IAMR);
380 sprs.amor = mfspr(SPRN_AMOR);
381 sprs.uamor = mfspr(SPRN_UAMOR);
384 local_paca->thread_idle_state = type;
385 srr1 = isa206_idle_insn_mayloss(type); /* go idle */
386 local_paca->thread_idle_state = PNV_THREAD_RUNNING;
389 WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
391 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
392 if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) {
394 * We don't need an isync after the mtsprs here because
395 * the upcoming mtmsrd is execution synchronizing.
397 mtspr(SPRN_AMR, sprs.amr);
398 mtspr(SPRN_IAMR, sprs.iamr);
399 mtspr(SPRN_AMOR, sprs.amor);
400 mtspr(SPRN_UAMOR, sprs.uamor);
404 if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI))
405 hmi_exception_realmode(NULL);
407 if (likely((srr1 & SRR1_WAKESTATE) != SRR1_WS_HVLOSS)) {
408 if (unlikely(type != PNV_THREAD_NAP)) {
409 atomic_lock_thread_idle();
410 if (type == PNV_THREAD_WINKLE) {
411 WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0);
412 *state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT;
413 *state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT);
415 atomic_unlock_and_stop_thread_idle();
421 BUG_ON(type == PNV_THREAD_NAP);
423 atomic_lock_thread_idle();
426 if (type == PNV_THREAD_WINKLE) {
427 WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0);
428 *state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT;
429 if (*state & (thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT)) {
430 *state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT);
436 WARN_ON(*state & thread);
438 if ((*state & core_thread_mask) != 0)
443 mtspr(SPRN_TSCR, sprs.tscr);
444 mtspr(SPRN_WORC, sprs.worc);
447 if (power7_fastsleep_workaround_exit) {
448 rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP,
449 OPAL_CONFIG_IDLE_UNDO);
454 if (opal_resync_timebase() != OPAL_SUCCESS)
461 if ((*state & local_paca->subcore_sibling_mask) != 0)
464 /* Per-subcore SPRs */
465 mtspr(SPRN_SDR1, sprs.sdr1);
466 mtspr(SPRN_RPR, sprs.rpr);
470 * isync after restoring shared SPRs and before unlocking. Unlock
471 * only contains hwsync which does not necessarily do the right
475 atomic_unlock_and_stop_thread_idle();
477 /* Fast sleep does not lose SPRs */
481 /* Per-thread SPRs */
482 mtspr(SPRN_LPCR, sprs.lpcr);
483 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
484 mtspr(SPRN_HFSCR, sprs.hfscr);
485 mtspr(SPRN_FSCR, sprs.fscr);
487 mtspr(SPRN_PURR, sprs.purr);
488 mtspr(SPRN_SPURR, sprs.spurr);
489 mtspr(SPRN_DSCR, sprs.dscr);
490 mtspr(SPRN_WORT, sprs.wort);
492 mtspr(SPRN_SPRG3, local_paca->sprg_vdso);
495 * The SLB has to be restored here, but it sometimes still
496 * contains entries, so the __ variant must be used to prevent
499 __slb_restore_bolted_realmode();
504 extern unsigned long idle_kvm_start_guest(unsigned long srr1);
506 #ifdef CONFIG_HOTPLUG_CPU
507 static unsigned long power7_offline(void)
513 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
514 /* Tell KVM we're entering idle. */
515 /******************************************************/
516 /* N O T E W E L L ! ! ! N O T E W E L L */
517 /* The following store to HSTATE_HWTHREAD_STATE(r13) */
518 /* MUST occur in real mode, i.e. with the MMU off, */
519 /* and the MMU must stay off until we clear this flag */
520 /* and test HSTATE_HWTHREAD_REQ(r13) in */
521 /* pnv_powersave_wakeup in this file. */
522 /* The reason is that another thread can switch the */
523 /* MMU to a guest context whenever this flag is set */
524 /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
525 /* that would potentially cause this thread to start */
526 /* executing instructions from guest memory in */
527 /* hypervisor mode, leading to a host crash or data */
528 /* corruption, or worse. */
529 /******************************************************/
530 local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE;
533 __ppc64_runlatch_off();
534 srr1 = power7_idle_insn(power7_offline_type);
535 __ppc64_runlatch_on();
537 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
538 local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL;
539 /* Order setting hwthread_state vs. testing hwthread_req */
541 if (local_paca->kvm_hstate.hwthread_req)
542 srr1 = idle_kvm_start_guest(srr1);
551 void power7_idle_type(unsigned long type)
555 if (!prep_irq_for_idle_irqsoff())
559 __ppc64_runlatch_off();
560 srr1 = power7_idle_insn(type);
561 __ppc64_runlatch_on();
564 fini_irq_for_idle_irqsoff();
565 irq_set_pending_from_srr1(srr1);
568 void power7_idle(void)
573 power7_idle_type(PNV_THREAD_NAP);
598 /* per thread SPRs that get lost in shallow states */
605 static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
607 int cpu = raw_smp_processor_id();
608 int first = cpu_first_thread_sibling(cpu);
609 unsigned long *state = &paca_ptrs[first]->idle_state;
610 unsigned long core_thread_mask = (1UL << threads_per_core) - 1;
613 unsigned long mmcr0 = 0;
614 struct p9_sprs sprs = {}; /* avoid false used-uninitialised */
615 bool sprs_saved = false;
617 if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
623 * Wake synchronously. SRESET via xscom may still cause
624 * a 0x100 powersave wakeup with SRR1 reason!
626 srr1 = isa300_idle_stop_noloss(psscr); /* go idle */
631 * Registers not saved, can't recover!
632 * This would be a hardware bug
634 BUG_ON((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS);
640 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
641 if (cpu_has_feature(CPU_FTR_P9_TM_XER_SO_BUG)) {
642 local_paca->requested_psscr = psscr;
643 /* order setting requested_psscr vs testing dont_stop */
645 if (atomic_read(&local_paca->dont_stop)) {
646 local_paca->requested_psscr = 0;
652 if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) {
654 * POWER9 DD2 can incorrectly set PMAO when waking up
655 * after a state-loss idle. Saving and restoring MMCR0
656 * over idle is a workaround.
658 mmcr0 = mfspr(SPRN_MMCR0);
660 if ((psscr & PSSCR_RL_MASK) >= pnv_first_spr_loss_level) {
661 sprs.lpcr = mfspr(SPRN_LPCR);
662 sprs.hfscr = mfspr(SPRN_HFSCR);
663 sprs.fscr = mfspr(SPRN_FSCR);
664 sprs.pid = mfspr(SPRN_PID);
665 sprs.purr = mfspr(SPRN_PURR);
666 sprs.spurr = mfspr(SPRN_SPURR);
667 sprs.dscr = mfspr(SPRN_DSCR);
668 sprs.wort = mfspr(SPRN_WORT);
670 sprs.mmcra = mfspr(SPRN_MMCRA);
671 sprs.mmcr0 = mfspr(SPRN_MMCR0);
672 sprs.mmcr1 = mfspr(SPRN_MMCR1);
673 sprs.mmcr2 = mfspr(SPRN_MMCR2);
675 sprs.ptcr = mfspr(SPRN_PTCR);
676 sprs.rpr = mfspr(SPRN_RPR);
677 sprs.tscr = mfspr(SPRN_TSCR);
678 if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR))
679 sprs.ldbar = mfspr(SPRN_LDBAR);
683 atomic_start_thread_idle();
686 sprs.amr = mfspr(SPRN_AMR);
687 sprs.iamr = mfspr(SPRN_IAMR);
688 sprs.amor = mfspr(SPRN_AMOR);
689 sprs.uamor = mfspr(SPRN_UAMOR);
691 srr1 = isa300_idle_stop_mayloss(psscr); /* go idle */
693 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
694 local_paca->requested_psscr = 0;
697 psscr = mfspr(SPRN_PSSCR);
700 WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
702 if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) {
706 * We don't need an isync after the mtsprs here because the
707 * upcoming mtmsrd is execution synchronizing.
709 mtspr(SPRN_AMR, sprs.amr);
710 mtspr(SPRN_IAMR, sprs.iamr);
711 mtspr(SPRN_AMOR, sprs.amor);
712 mtspr(SPRN_UAMOR, sprs.uamor);
715 * Workaround for POWER9 DD2.0, if we lost resources, the ERAT
716 * might have been corrupted and needs flushing. We also need
717 * to reload MMCR0 (see mmcr0 comment above).
719 if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) {
720 asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT);
721 mtspr(SPRN_MMCR0, mmcr0);
725 * DD2.2 and earlier need to set then clear bit 60 in MMCRA
726 * to ensure the PMU starts running.
728 mmcra = mfspr(SPRN_MMCRA);
729 mmcra |= PPC_BIT(60);
730 mtspr(SPRN_MMCRA, mmcra);
731 mmcra &= ~PPC_BIT(60);
732 mtspr(SPRN_MMCRA, mmcra);
735 if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI))
736 hmi_exception_realmode(NULL);
739 * On POWER9, SRR1 bits do not match exactly as expected.
740 * SRR1_WS_GPRLOSS (10b) can also result in SPR loss, so
741 * just always test PSSCR for SPR/TB state loss.
743 pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT;
744 if (likely(pls < pnv_first_spr_loss_level)) {
746 atomic_stop_thread_idle();
753 atomic_lock_thread_idle();
755 if ((*state & core_thread_mask) != 0)
759 mtspr(SPRN_PTCR, sprs.ptcr);
760 mtspr(SPRN_RPR, sprs.rpr);
761 mtspr(SPRN_TSCR, sprs.tscr);
763 if (pls >= pnv_first_tb_loss_level) {
765 if (opal_resync_timebase() != OPAL_SUCCESS)
770 * isync after restoring shared SPRs and before unlocking. Unlock
771 * only contains hwsync which does not necessarily do the right
777 atomic_unlock_and_stop_thread_idle();
779 /* Per-thread SPRs */
780 mtspr(SPRN_LPCR, sprs.lpcr);
781 mtspr(SPRN_HFSCR, sprs.hfscr);
782 mtspr(SPRN_FSCR, sprs.fscr);
783 mtspr(SPRN_PID, sprs.pid);
784 mtspr(SPRN_PURR, sprs.purr);
785 mtspr(SPRN_SPURR, sprs.spurr);
786 mtspr(SPRN_DSCR, sprs.dscr);
787 mtspr(SPRN_WORT, sprs.wort);
789 mtspr(SPRN_MMCRA, sprs.mmcra);
790 mtspr(SPRN_MMCR0, sprs.mmcr0);
791 mtspr(SPRN_MMCR1, sprs.mmcr1);
792 mtspr(SPRN_MMCR2, sprs.mmcr2);
793 if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR))
794 mtspr(SPRN_LDBAR, sprs.ldbar);
796 mtspr(SPRN_SPRG3, local_paca->sprg_vdso);
798 if (!radix_enabled())
799 __slb_restore_bolted_realmode();
808 #ifdef CONFIG_HOTPLUG_CPU
809 static unsigned long power9_offline_stop(unsigned long psscr)
813 #ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
814 __ppc64_runlatch_off();
815 srr1 = power9_idle_stop(psscr, true);
816 __ppc64_runlatch_on();
819 * Tell KVM we're entering idle.
820 * This does not have to be done in real mode because the P9 MMU
821 * is independent per-thread. Some steppings share radix/hash mode
822 * between threads, but in that case KVM has a barrier sync in real
823 * mode before and after switching between radix and hash.
825 * kvm_start_guest must still be called in real mode though, hence
826 * the false argument.
828 local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE;
830 __ppc64_runlatch_off();
831 srr1 = power9_idle_stop(psscr, false);
832 __ppc64_runlatch_on();
834 local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL;
835 /* Order setting hwthread_state vs. testing hwthread_req */
837 if (local_paca->kvm_hstate.hwthread_req)
838 srr1 = idle_kvm_start_guest(srr1);
846 void power9_idle_type(unsigned long stop_psscr_val,
847 unsigned long stop_psscr_mask)
852 if (!prep_irq_for_idle_irqsoff())
855 psscr = mfspr(SPRN_PSSCR);
856 psscr = (psscr & ~stop_psscr_mask) | stop_psscr_val;
858 __ppc64_runlatch_off();
859 srr1 = power9_idle_stop(psscr, true);
860 __ppc64_runlatch_on();
862 fini_irq_for_idle_irqsoff();
864 irq_set_pending_from_srr1(srr1);
868 * Used for ppc_md.power_save which needs a function with no parameters
870 void power9_idle(void)
872 power9_idle_type(pnv_default_stop_val, pnv_default_stop_mask);
875 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
877 * This is used in working around bugs in thread reconfiguration
878 * on POWER9 (at least up to Nimbus DD2.2) relating to transactional
879 * memory and the way that XER[SO] is checkpointed.
880 * This function forces the core into SMT4 in order by asking
881 * all other threads not to stop, and sending a message to any
882 * that are in a stop state.
883 * Must be called with preemption disabled.
885 void pnv_power9_force_smt4_catch(void)
888 int awake_threads = 1; /* this thread is awake */
889 int poke_threads = 0;
890 int need_awake = threads_per_core;
892 cpu = smp_processor_id();
893 cpu0 = cpu & ~(threads_per_core - 1);
894 for (thr = 0; thr < threads_per_core; ++thr) {
895 if (cpu != cpu0 + thr)
896 atomic_inc(&paca_ptrs[cpu0+thr]->dont_stop);
898 /* order setting dont_stop vs testing requested_psscr */
900 for (thr = 0; thr < threads_per_core; ++thr) {
901 if (!paca_ptrs[cpu0+thr]->requested_psscr)
904 poke_threads |= (1 << thr);
907 /* If at least 3 threads are awake, the core is in SMT4 already */
908 if (awake_threads < need_awake) {
909 /* We have to wake some threads; we'll use msgsnd */
910 for (thr = 0; thr < threads_per_core; ++thr) {
911 if (poke_threads & (1 << thr)) {
913 ppc_msgsnd(PPC_DBELL_MSGTYPE, 0,
914 paca_ptrs[cpu0+thr]->hw_cpu_id);
917 /* now spin until at least 3 threads are awake */
919 for (thr = 0; thr < threads_per_core; ++thr) {
920 if ((poke_threads & (1 << thr)) &&
921 !paca_ptrs[cpu0+thr]->requested_psscr) {
923 poke_threads &= ~(1 << thr);
926 } while (awake_threads < need_awake);
929 EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_catch);
931 void pnv_power9_force_smt4_release(void)
935 cpu = smp_processor_id();
936 cpu0 = cpu & ~(threads_per_core - 1);
938 /* clear all the dont_stop flags */
939 for (thr = 0; thr < threads_per_core; ++thr) {
940 if (cpu != cpu0 + thr)
941 atomic_dec(&paca_ptrs[cpu0+thr]->dont_stop);
944 EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release);
945 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
947 #ifdef CONFIG_HOTPLUG_CPU
949 void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
951 u64 pir = get_hard_smp_processor_id(cpu);
953 mtspr(SPRN_LPCR, lpcr_val);
956 * Program the LPCR via stop-api only if the deepest stop state
957 * can lose hypervisor context.
959 if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)
960 opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
964 * pnv_cpu_offline: A function that puts the CPU into the deepest
965 * available platform idle state on a CPU-Offline.
966 * interrupts hard disabled and no lazy irq pending.
968 unsigned long pnv_cpu_offline(unsigned int cpu)
972 __ppc64_runlatch_off();
974 if (cpu_has_feature(CPU_FTR_ARCH_300) && deepest_stop_found) {
977 psscr = mfspr(SPRN_PSSCR);
978 psscr = (psscr & ~pnv_deepest_stop_psscr_mask) |
979 pnv_deepest_stop_psscr_val;
980 srr1 = power9_offline_stop(psscr);
981 } else if (cpu_has_feature(CPU_FTR_ARCH_206) && power7_offline_type) {
982 srr1 = power7_offline();
984 /* This is the fallback method. We emulate snooze */
985 while (!generic_check_cpu_restart(cpu)) {
993 __ppc64_runlatch_on();
1000 * Power ISA 3.0 idle initialization.
1002 * POWER ISA 3.0 defines a new SPR Processor stop Status and Control
1003 * Register (PSSCR) to control idle behavior.
1006 * ----------------------------------------------------------
1007 * | PLS | /// | SD | ESL | EC | PSLL | /// | TR | MTL | RL |
1008 * ----------------------------------------------------------
1009 * 0 4 41 42 43 44 48 54 56 60
1012 * Bits 0:3 - Power-Saving Level Status (PLS). This field indicates the
1013 * lowest power-saving state the thread entered since stop instruction was
1016 * Bit 41 - Status Disable(SD)
1017 * 0 - Shows PLS entries
1018 * 1 - PLS entries are all 0
1020 * Bit 42 - Enable State Loss
1021 * 0 - No state is lost irrespective of other fields
1022 * 1 - Allows state loss
1024 * Bit 43 - Exit Criterion
1025 * 0 - Exit from power-save mode on any interrupt
1026 * 1 - Exit from power-save mode controlled by LPCR's PECE bits
1028 * Bits 44:47 - Power-Saving Level Limit
1029 * This limits the power-saving level that can be entered into.
1031 * Bits 60:63 - Requested Level
1032 * Used to specify which power-saving level must be entered on executing
1036 int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags)
1041 * psscr_mask == 0xf indicates an older firmware.
1042 * Set remaining fields of psscr to the default values.
1043 * See NOTE above definition of PSSCR_HV_DEFAULT_VAL
1045 if (*psscr_mask == 0xf) {
1046 *psscr_val = *psscr_val | PSSCR_HV_DEFAULT_VAL;
1047 *psscr_mask = PSSCR_HV_DEFAULT_MASK;
1052 * New firmware is expected to set the psscr_val bits correctly.
1053 * Validate that the following invariants are correctly maintained by
1055 * - ESL bit value matches the EC bit value.
1056 * - ESL bit is set for all the deep stop states.
1058 if (GET_PSSCR_ESL(*psscr_val) != GET_PSSCR_EC(*psscr_val)) {
1059 err = ERR_EC_ESL_MISMATCH;
1060 } else if ((flags & OPAL_PM_LOSE_FULL_CONTEXT) &&
1061 GET_PSSCR_ESL(*psscr_val) == 0) {
1062 err = ERR_DEEP_STATE_ESL_MISMATCH;
1069 * pnv_arch300_idle_init: Initializes the default idle state, first
1070 * deep idle state and deepest idle state on
1073 * @np: /ibm,opal/power-mgt device node
1074 * @flags: cpu-idle-state-flags array
1075 * @dt_idle_states: Number of idle state entries
1076 * Returns 0 on success
1078 static void __init pnv_power9_idle_init(void)
1080 u64 max_residency_ns = 0;
1084 * pnv_deepest_stop_{val,mask} should be set to values corresponding to
1085 * the deepest stop state.
1087 * pnv_default_stop_{val,mask} should be set to values corresponding to
1088 * the deepest loss-less (OPAL_PM_STOP_INST_FAST) stop state.
1090 pnv_first_tb_loss_level = MAX_STOP_STATE + 1;
1091 pnv_first_spr_loss_level = MAX_STOP_STATE + 1;
1092 for (i = 0; i < nr_pnv_idle_states; i++) {
1094 struct pnv_idle_states_t *state = &pnv_idle_states[i];
1095 u64 psscr_rl = state->psscr_val & PSSCR_RL_MASK;
1097 if ((state->flags & OPAL_PM_TIMEBASE_STOP) &&
1098 (pnv_first_tb_loss_level > psscr_rl))
1099 pnv_first_tb_loss_level = psscr_rl;
1101 if ((state->flags & OPAL_PM_LOSE_FULL_CONTEXT) &&
1102 (pnv_first_spr_loss_level > psscr_rl))
1103 pnv_first_spr_loss_level = psscr_rl;
1106 * The idle code does not deal with TB loss occurring
1107 * in a shallower state than SPR loss, so force it to
1108 * behave like SPRs are lost if TB is lost. POWER9 would
1109 * never encouter this, but a POWER8 core would if it
1110 * implemented the stop instruction. So this is for forward
1113 if ((state->flags & OPAL_PM_TIMEBASE_STOP) &&
1114 (pnv_first_spr_loss_level > psscr_rl))
1115 pnv_first_spr_loss_level = psscr_rl;
1117 err = validate_psscr_val_mask(&state->psscr_val,
1121 report_invalid_psscr_val(state->psscr_val, err);
1125 state->valid = true;
1127 if (max_residency_ns < state->residency_ns) {
1128 max_residency_ns = state->residency_ns;
1129 pnv_deepest_stop_psscr_val = state->psscr_val;
1130 pnv_deepest_stop_psscr_mask = state->psscr_mask;
1131 pnv_deepest_stop_flag = state->flags;
1132 deepest_stop_found = true;
1135 if (!default_stop_found &&
1136 (state->flags & OPAL_PM_STOP_INST_FAST)) {
1137 pnv_default_stop_val = state->psscr_val;
1138 pnv_default_stop_mask = state->psscr_mask;
1139 default_stop_found = true;
1140 WARN_ON(state->flags & OPAL_PM_LOSE_FULL_CONTEXT);
1144 if (unlikely(!default_stop_found)) {
1145 pr_warn("cpuidle-powernv: No suitable default stop state found. Disabling platform idle.\n");
1147 ppc_md.power_save = power9_idle;
1148 pr_info("cpuidle-powernv: Default stop: psscr = 0x%016llx,mask=0x%016llx\n",
1149 pnv_default_stop_val, pnv_default_stop_mask);
1152 if (unlikely(!deepest_stop_found)) {
1153 pr_warn("cpuidle-powernv: No suitable stop state for CPU-Hotplug. Offlined CPUs will busy wait");
1155 pr_info("cpuidle-powernv: Deepest stop: psscr = 0x%016llx,mask=0x%016llx\n",
1156 pnv_deepest_stop_psscr_val,
1157 pnv_deepest_stop_psscr_mask);
1160 pr_info("cpuidle-powernv: First stop level that may lose SPRs = 0x%llx\n",
1161 pnv_first_spr_loss_level);
1163 pr_info("cpuidle-powernv: First stop level that may lose timebase = 0x%llx\n",
1164 pnv_first_tb_loss_level);
1167 static void __init pnv_disable_deep_states(void)
1170 * The stop-api is unable to restore hypervisor
1171 * resources on wakeup from platform idle states which
1172 * lose full context. So disable such states.
1174 supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT;
1175 pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n");
1176 pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n");
1178 if (cpu_has_feature(CPU_FTR_ARCH_300) &&
1179 (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) {
1181 * Use the default stop state for CPU-Hotplug
1184 if (default_stop_found) {
1185 pnv_deepest_stop_psscr_val = pnv_default_stop_val;
1186 pnv_deepest_stop_psscr_mask = pnv_default_stop_mask;
1187 pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n",
1188 pnv_deepest_stop_psscr_val);
1189 } else { /* Fallback to snooze loop for CPU-Hotplug */
1190 deepest_stop_found = false;
1191 pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n");
1197 * Probe device tree for supported idle states
1199 static void __init pnv_probe_idle_states(void)
1203 if (nr_pnv_idle_states < 0) {
1204 pr_warn("cpuidle-powernv: no idle states found in the DT\n");
1208 if (cpu_has_feature(CPU_FTR_ARCH_300))
1209 pnv_power9_idle_init();
1211 for (i = 0; i < nr_pnv_idle_states; i++)
1212 supported_cpuidle_states |= pnv_idle_states[i].flags;
1216 * This function parses device-tree and populates all the information
1217 * into pnv_idle_states structure. It also sets up nr_pnv_idle_states
1218 * which is the number of cpuidle states discovered through device-tree.
1221 static int pnv_parse_cpuidle_dt(void)
1223 struct device_node *np;
1224 int nr_idle_states, i;
1228 const char **temp_string;
1230 np = of_find_node_by_path("/ibm,opal/power-mgt");
1232 pr_warn("opal: PowerMgmt Node not found\n");
1235 nr_idle_states = of_property_count_u32_elems(np,
1236 "ibm,cpu-idle-state-flags");
1238 pnv_idle_states = kcalloc(nr_idle_states, sizeof(*pnv_idle_states),
1240 temp_u32 = kcalloc(nr_idle_states, sizeof(u32), GFP_KERNEL);
1241 temp_u64 = kcalloc(nr_idle_states, sizeof(u64), GFP_KERNEL);
1242 temp_string = kcalloc(nr_idle_states, sizeof(char *), GFP_KERNEL);
1244 if (!(pnv_idle_states && temp_u32 && temp_u64 && temp_string)) {
1245 pr_err("Could not allocate memory for dt parsing\n");
1251 if (of_property_read_u32_array(np, "ibm,cpu-idle-state-flags",
1252 temp_u32, nr_idle_states)) {
1253 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
1257 for (i = 0; i < nr_idle_states; i++)
1258 pnv_idle_states[i].flags = temp_u32[i];
1260 /* Read latencies */
1261 if (of_property_read_u32_array(np, "ibm,cpu-idle-state-latencies-ns",
1262 temp_u32, nr_idle_states)) {
1263 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
1267 for (i = 0; i < nr_idle_states; i++)
1268 pnv_idle_states[i].latency_ns = temp_u32[i];
1270 /* Read residencies */
1271 if (of_property_read_u32_array(np, "ibm,cpu-idle-state-residency-ns",
1272 temp_u32, nr_idle_states)) {
1273 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
1277 for (i = 0; i < nr_idle_states; i++)
1278 pnv_idle_states[i].residency_ns = temp_u32[i];
1281 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1282 /* Read pm_crtl_val */
1283 if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr",
1284 temp_u64, nr_idle_states)) {
1285 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n");
1289 for (i = 0; i < nr_idle_states; i++)
1290 pnv_idle_states[i].psscr_val = temp_u64[i];
1292 /* Read pm_crtl_mask */
1293 if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr-mask",
1294 temp_u64, nr_idle_states)) {
1295 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr-mask in DT\n");
1299 for (i = 0; i < nr_idle_states; i++)
1300 pnv_idle_states[i].psscr_mask = temp_u64[i];
1304 * power8 specific properties ibm,cpu-idle-state-pmicr-mask and
1305 * ibm,cpu-idle-state-pmicr-val were never used and there is no
1306 * plan to use it in near future. Hence, not parsing these properties
1309 if (of_property_read_string_array(np, "ibm,cpu-idle-state-names",
1310 temp_string, nr_idle_states) < 0) {
1311 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n");
1315 for (i = 0; i < nr_idle_states; i++)
1316 strlcpy(pnv_idle_states[i].name, temp_string[i],
1318 nr_pnv_idle_states = nr_idle_states;
1327 static int __init pnv_init_idle_states(void)
1332 /* Set up PACA fields */
1333 for_each_present_cpu(cpu) {
1334 struct paca_struct *p = paca_ptrs[cpu];
1337 if (cpu == cpu_first_thread_sibling(cpu))
1338 p->idle_state = (1 << threads_per_core) - 1;
1340 if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
1342 p->thread_idle_state = PNV_THREAD_RUNNING;
1345 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1346 p->requested_psscr = 0;
1347 atomic_set(&p->dont_stop, 0);
1352 /* In case we error out nr_pnv_idle_states will be zero */
1353 nr_pnv_idle_states = 0;
1354 supported_cpuidle_states = 0;
1356 if (cpuidle_disable != IDLE_NO_OVERRIDE)
1358 rc = pnv_parse_cpuidle_dt();
1361 pnv_probe_idle_states();
1363 if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
1364 if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
1365 power7_fastsleep_workaround_entry = false;
1366 power7_fastsleep_workaround_exit = false;
1369 * OPAL_PM_SLEEP_ENABLED_ER1 is set. It indicates that
1370 * workaround is needed to use fastsleep. Provide sysfs
1371 * control to choose how this workaround has to be
1374 device_create_file(cpu_subsys.dev_root,
1375 &dev_attr_fastsleep_workaround_applyonce);
1378 update_subcore_sibling_mask();
1380 if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED) {
1381 ppc_md.power_save = power7_idle;
1382 power7_offline_type = PNV_THREAD_NAP;
1385 if ((supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED) &&
1386 (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT))
1387 power7_offline_type = PNV_THREAD_WINKLE;
1388 else if ((supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED) ||
1389 (supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1))
1390 power7_offline_type = PNV_THREAD_SLEEP;
1393 if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) {
1394 if (pnv_save_sprs_for_deep_states())
1395 pnv_disable_deep_states();
1401 machine_subsys_initcall(powernv, pnv_init_idle_states);