1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/errno.h>
5 #include <linux/kernel.h>
9 #include <linux/prctl.h>
10 #include <linux/slab.h>
11 #include <linux/sched.h>
12 #include <linux/sched/idle.h>
13 #include <linux/sched/debug.h>
14 #include <linux/sched/task.h>
15 #include <linux/sched/task_stack.h>
16 #include <linux/init.h>
17 #include <linux/export.h>
19 #include <linux/tick.h>
20 #include <linux/random.h>
21 #include <linux/user-return-notifier.h>
22 #include <linux/dmi.h>
23 #include <linux/utsname.h>
24 #include <linux/stackprotector.h>
25 #include <linux/cpuidle.h>
26 #include <linux/acpi.h>
27 #include <linux/elf-randomize.h>
28 #include <linux/static_call.h>
29 #include <trace/events/power.h>
30 #include <linux/hw_breakpoint.h>
31 #include <linux/entry-common.h>
34 #include <linux/uaccess.h>
35 #include <asm/mwait.h>
36 #include <asm/fpu/api.h>
37 #include <asm/fpu/sched.h>
38 #include <asm/fpu/xstate.h>
39 #include <asm/debugreg.h>
41 #include <asm/tlbflush.h>
44 #include <asm/switch_to.h>
46 #include <asm/prctl.h>
47 #include <asm/spec-ctrl.h>
48 #include <asm/io_bitmap.h>
49 #include <asm/proto.h>
50 #include <asm/frame.h>
51 #include <asm/unwind.h>
53 #include <asm/mmu_context.h>
58 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
59 * no more per-task TSS's. The TSS size is kept cacheline-aligned
60 * so they are allowed to end up in the .data..cacheline_aligned
61 * section. Since TSS's are completely CPU-local, we want them
62 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
64 __visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
67 * .sp0 is only used when entering ring 0 from a lower
68 * privilege level. Since the init task never runs anything
69 * but ring 0 code, there is no need for a valid value here.
72 .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
75 .sp1 = TOP_OF_INIT_STACK,
80 .io_bitmap_base = IO_BITMAP_OFFSET_INVALID,
83 EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
85 DEFINE_PER_CPU(bool, __tss_limit_invalid);
86 EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
89 * this gets called so that we can store lazy state into memory and copy the
90 * current task into the new thread.
92 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
94 memcpy(dst, src, arch_task_struct_size);
96 dst->thread.vm86 = NULL;
98 /* Drop the copied pointer to current's fpstate */
99 dst->thread.fpu.fpstate = NULL;
105 void arch_release_task_struct(struct task_struct *tsk)
107 if (fpu_state_size_dynamic())
108 fpstate_free(&tsk->thread.fpu);
113 * Free thread data structures etc..
115 void exit_thread(struct task_struct *tsk)
117 struct thread_struct *t = &tsk->thread;
118 struct fpu *fpu = &t->fpu;
120 if (test_thread_flag(TIF_IO_BITMAP))
128 static int set_new_tls(struct task_struct *p, unsigned long tls)
130 struct user_desc __user *utls = (struct user_desc __user *)tls;
132 if (in_ia32_syscall())
133 return do_set_thread_area(p, -1, utls, 0);
135 return do_set_thread_area_64(p, ARCH_SET_FS, tls);
138 __visible void ret_from_fork(struct task_struct *prev, struct pt_regs *regs,
139 int (*fn)(void *), void *fn_arg)
143 /* Is this a kernel thread? */
147 * A kernel thread is allowed to return here after successfully
148 * calling kernel_execve(). Exit to userspace to complete the
154 syscall_exit_to_user_mode(regs);
157 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
159 unsigned long clone_flags = args->flags;
160 unsigned long sp = args->stack;
161 unsigned long tls = args->tls;
162 struct inactive_task_frame *frame;
163 struct fork_frame *fork_frame;
164 struct pt_regs *childregs;
167 childregs = task_pt_regs(p);
168 fork_frame = container_of(childregs, struct fork_frame, regs);
169 frame = &fork_frame->frame;
171 frame->bp = encode_frame_pointer(childregs);
172 frame->ret_addr = (unsigned long) ret_from_fork_asm;
173 p->thread.sp = (unsigned long) fork_frame;
174 p->thread.io_bitmap = NULL;
175 p->thread.iopl_warn = 0;
176 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
180 p->thread.fsindex = current->thread.fsindex;
181 p->thread.fsbase = current->thread.fsbase;
182 p->thread.gsindex = current->thread.gsindex;
183 p->thread.gsbase = current->thread.gsbase;
185 savesegment(es, p->thread.es);
186 savesegment(ds, p->thread.ds);
188 if (p->mm && (clone_flags & (CLONE_VM | CLONE_VFORK)) == CLONE_VM)
189 set_bit(MM_CONTEXT_LOCK_LAM, &p->mm->context.flags);
191 p->thread.sp0 = (unsigned long) (childregs + 1);
192 savesegment(gs, p->thread.gs);
194 * Clear all status flags including IF and set fixed bit. 64bit
195 * does not have this initialization as the frame does not contain
196 * flags. The flags consistency (especially vs. AC) is there
197 * ensured via objtool, which lacks 32bit support.
199 frame->flags = X86_EFLAGS_FIXED;
202 fpu_clone(p, clone_flags, args->fn);
204 /* Kernel thread ? */
205 if (unlikely(p->flags & PF_KTHREAD)) {
206 p->thread.pkru = pkru_get_init_value();
207 memset(childregs, 0, sizeof(struct pt_regs));
208 kthread_frame_init(frame, args->fn, args->fn_arg);
213 * Clone current's PKRU value from hardware. tsk->thread.pkru
214 * is only valid when scheduled out.
216 p->thread.pkru = read_pkru();
219 *childregs = *current_pt_regs();
224 if (unlikely(args->fn)) {
226 * A user space thread, but it doesn't return to
229 * In order to indicate that to tools like gdb,
230 * we reset the stack and instruction pointers.
232 * It does the same kernel frame setup to return to a kernel
233 * function that a kernel thread does.
237 kthread_frame_init(frame, args->fn, args->fn_arg);
241 /* Set a new TLS for the child thread? */
242 if (clone_flags & CLONE_SETTLS)
243 ret = set_new_tls(p, tls);
245 if (!ret && unlikely(test_tsk_thread_flag(current, TIF_IO_BITMAP)))
251 static void pkru_flush_thread(void)
254 * If PKRU is enabled the default PKRU value has to be loaded into
255 * the hardware right here (similar to context switch).
257 pkru_write_default();
260 void flush_thread(void)
262 struct task_struct *tsk = current;
264 flush_ptrace_hw_breakpoint(tsk);
265 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
271 void disable_TSC(void)
274 if (!test_and_set_thread_flag(TIF_NOTSC))
276 * Must flip the CPU state synchronously with
277 * TIF_NOTSC in the current running context.
279 cr4_set_bits(X86_CR4_TSD);
283 static void enable_TSC(void)
286 if (test_and_clear_thread_flag(TIF_NOTSC))
288 * Must flip the CPU state synchronously with
289 * TIF_NOTSC in the current running context.
291 cr4_clear_bits(X86_CR4_TSD);
295 int get_tsc_mode(unsigned long adr)
299 if (test_thread_flag(TIF_NOTSC))
300 val = PR_TSC_SIGSEGV;
304 return put_user(val, (unsigned int __user *)adr);
307 int set_tsc_mode(unsigned int val)
309 if (val == PR_TSC_SIGSEGV)
311 else if (val == PR_TSC_ENABLE)
319 DEFINE_PER_CPU(u64, msr_misc_features_shadow);
321 static void set_cpuid_faulting(bool on)
325 msrval = this_cpu_read(msr_misc_features_shadow);
326 msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
327 msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT);
328 this_cpu_write(msr_misc_features_shadow, msrval);
329 wrmsrl(MSR_MISC_FEATURES_ENABLES, msrval);
332 static void disable_cpuid(void)
335 if (!test_and_set_thread_flag(TIF_NOCPUID)) {
337 * Must flip the CPU state synchronously with
338 * TIF_NOCPUID in the current running context.
340 set_cpuid_faulting(true);
345 static void enable_cpuid(void)
348 if (test_and_clear_thread_flag(TIF_NOCPUID)) {
350 * Must flip the CPU state synchronously with
351 * TIF_NOCPUID in the current running context.
353 set_cpuid_faulting(false);
358 static int get_cpuid_mode(void)
360 return !test_thread_flag(TIF_NOCPUID);
363 static int set_cpuid_mode(unsigned long cpuid_enabled)
365 if (!boot_cpu_has(X86_FEATURE_CPUID_FAULT))
377 * Called immediately after a successful exec.
379 void arch_setup_new_exec(void)
381 /* If cpuid was previously disabled for this task, re-enable it. */
382 if (test_thread_flag(TIF_NOCPUID))
386 * Don't inherit TIF_SSBD across exec boundary when
387 * PR_SPEC_DISABLE_NOEXEC is used.
389 if (test_thread_flag(TIF_SSBD) &&
390 task_spec_ssb_noexec(current)) {
391 clear_thread_flag(TIF_SSBD);
392 task_clear_spec_ssb_disable(current);
393 task_clear_spec_ssb_noexec(current);
394 speculation_ctrl_update(read_thread_flags());
397 mm_reset_untag_mask(current->mm);
400 #ifdef CONFIG_X86_IOPL_IOPERM
401 static inline void switch_to_bitmap(unsigned long tifp)
404 * Invalidate I/O bitmap if the previous task used it. This prevents
405 * any possible leakage of an active I/O bitmap.
407 * If the next task has an I/O bitmap it will handle it on exit to
410 if (tifp & _TIF_IO_BITMAP)
411 tss_invalidate_io_bitmap();
414 static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)
417 * Copy at least the byte range of the incoming tasks bitmap which
418 * covers the permitted I/O ports.
420 * If the previous task which used an I/O bitmap had more bits
421 * permitted, then the copy needs to cover those as well so they
424 memcpy(tss->io_bitmap.bitmap, iobm->bitmap,
425 max(tss->io_bitmap.prev_max, iobm->max));
428 * Store the new max and the sequence number of this bitmap
429 * and a pointer to the bitmap itself.
431 tss->io_bitmap.prev_max = iobm->max;
432 tss->io_bitmap.prev_sequence = iobm->sequence;
436 * native_tss_update_io_bitmap - Update I/O bitmap before exiting to user mode
438 void native_tss_update_io_bitmap(void)
440 struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
441 struct thread_struct *t = ¤t->thread;
442 u16 *base = &tss->x86_tss.io_bitmap_base;
444 if (!test_thread_flag(TIF_IO_BITMAP)) {
445 native_tss_invalidate_io_bitmap();
449 if (IS_ENABLED(CONFIG_X86_IOPL_IOPERM) && t->iopl_emul == 3) {
450 *base = IO_BITMAP_OFFSET_VALID_ALL;
452 struct io_bitmap *iobm = t->io_bitmap;
455 * Only copy bitmap data when the sequence number differs. The
456 * update time is accounted to the incoming task.
458 if (tss->io_bitmap.prev_sequence != iobm->sequence)
459 tss_copy_io_bitmap(tss, iobm);
461 /* Enable the bitmap */
462 *base = IO_BITMAP_OFFSET_VALID_MAP;
466 * Make sure that the TSS limit is covering the IO bitmap. It might have
467 * been cut down by a VMEXIT to 0x67 which would cause a subsequent I/O
468 * access from user space to trigger a #GP because tbe bitmap is outside
473 #else /* CONFIG_X86_IOPL_IOPERM */
474 static inline void switch_to_bitmap(unsigned long tifp) { }
480 struct ssb_state *shared_state;
482 unsigned int disable_state;
483 unsigned long local_state;
488 static DEFINE_PER_CPU(struct ssb_state, ssb_state);
490 void speculative_store_bypass_ht_init(void)
492 struct ssb_state *st = this_cpu_ptr(&ssb_state);
493 unsigned int this_cpu = smp_processor_id();
499 * Shared state setup happens once on the first bringup
500 * of the CPU. It's not destroyed on CPU hotunplug.
502 if (st->shared_state)
505 raw_spin_lock_init(&st->lock);
508 * Go over HT siblings and check whether one of them has set up the
509 * shared state pointer already.
511 for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
515 if (!per_cpu(ssb_state, cpu).shared_state)
518 /* Link it to the state of the sibling: */
519 st->shared_state = per_cpu(ssb_state, cpu).shared_state;
524 * First HT sibling to come up on the core. Link shared state of
525 * the first HT sibling to itself. The siblings on the same core
526 * which come up later will see the shared state pointer and link
527 * themselves to the state of this CPU.
529 st->shared_state = st;
533 * Logic is: First HT sibling enables SSBD for both siblings in the core
534 * and last sibling to disable it, disables it for the whole core. This how
535 * MSR_SPEC_CTRL works in "hardware":
537 * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
539 static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
541 struct ssb_state *st = this_cpu_ptr(&ssb_state);
542 u64 msr = x86_amd_ls_cfg_base;
544 if (!static_cpu_has(X86_FEATURE_ZEN)) {
545 msr |= ssbd_tif_to_amd_ls_cfg(tifn);
546 wrmsrl(MSR_AMD64_LS_CFG, msr);
550 if (tifn & _TIF_SSBD) {
552 * Since this can race with prctl(), block reentry on the
555 if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
558 msr |= x86_amd_ls_cfg_ssbd_mask;
560 raw_spin_lock(&st->shared_state->lock);
561 /* First sibling enables SSBD: */
562 if (!st->shared_state->disable_state)
563 wrmsrl(MSR_AMD64_LS_CFG, msr);
564 st->shared_state->disable_state++;
565 raw_spin_unlock(&st->shared_state->lock);
567 if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
570 raw_spin_lock(&st->shared_state->lock);
571 st->shared_state->disable_state--;
572 if (!st->shared_state->disable_state)
573 wrmsrl(MSR_AMD64_LS_CFG, msr);
574 raw_spin_unlock(&st->shared_state->lock);
578 static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
580 u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
582 wrmsrl(MSR_AMD64_LS_CFG, msr);
586 static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
589 * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
590 * so ssbd_tif_to_spec_ctrl() just works.
592 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
596 * Update the MSRs managing speculation control, during context switch.
598 * tifp: Previous task's thread flags
599 * tifn: Next task's thread flags
601 static __always_inline void __speculation_ctrl_update(unsigned long tifp,
604 unsigned long tif_diff = tifp ^ tifn;
605 u64 msr = x86_spec_ctrl_base;
608 lockdep_assert_irqs_disabled();
610 /* Handle change of TIF_SSBD depending on the mitigation method. */
611 if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
612 if (tif_diff & _TIF_SSBD)
613 amd_set_ssb_virt_state(tifn);
614 } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
615 if (tif_diff & _TIF_SSBD)
616 amd_set_core_ssb_state(tifn);
617 } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
618 static_cpu_has(X86_FEATURE_AMD_SSBD)) {
619 updmsr |= !!(tif_diff & _TIF_SSBD);
620 msr |= ssbd_tif_to_spec_ctrl(tifn);
623 /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
624 if (IS_ENABLED(CONFIG_SMP) &&
625 static_branch_unlikely(&switch_to_cond_stibp)) {
626 updmsr |= !!(tif_diff & _TIF_SPEC_IB);
627 msr |= stibp_tif_to_spec_ctrl(tifn);
631 update_spec_ctrl_cond(msr);
634 static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
636 if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) {
637 if (task_spec_ssb_disable(tsk))
638 set_tsk_thread_flag(tsk, TIF_SSBD);
640 clear_tsk_thread_flag(tsk, TIF_SSBD);
642 if (task_spec_ib_disable(tsk))
643 set_tsk_thread_flag(tsk, TIF_SPEC_IB);
645 clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
647 /* Return the updated threadinfo flags*/
648 return read_task_thread_flags(tsk);
651 void speculation_ctrl_update(unsigned long tif)
655 /* Forced update. Make sure all relevant TIF flags are different */
656 local_irq_save(flags);
657 __speculation_ctrl_update(~tif, tif);
658 local_irq_restore(flags);
661 /* Called from seccomp/prctl update */
662 void speculation_ctrl_update_current(void)
665 speculation_ctrl_update(speculation_ctrl_update_tif(current));
669 static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
671 unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
675 this_cpu_write(cpu_tlbstate.cr4, newval);
680 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
682 unsigned long tifp, tifn;
684 tifn = read_task_thread_flags(next_p);
685 tifp = read_task_thread_flags(prev_p);
687 switch_to_bitmap(tifp);
689 propagate_user_return_notify(prev_p, next_p);
691 if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
692 arch_has_block_step()) {
693 unsigned long debugctl, msk;
695 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
696 debugctl &= ~DEBUGCTLMSR_BTF;
697 msk = tifn & _TIF_BLOCKSTEP;
698 debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
699 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
702 if ((tifp ^ tifn) & _TIF_NOTSC)
703 cr4_toggle_bits_irqsoff(X86_CR4_TSD);
705 if ((tifp ^ tifn) & _TIF_NOCPUID)
706 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
708 if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) {
709 __speculation_ctrl_update(tifp, tifn);
711 speculation_ctrl_update_tif(prev_p);
712 tifn = speculation_ctrl_update_tif(next_p);
714 /* Enforce MSR update to ensure consistent state */
715 __speculation_ctrl_update(~tifn, tifn);
720 * Idle related variables and functions
722 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
723 EXPORT_SYMBOL(boot_option_idle_override);
726 * We use this if we don't have any better idle routine..
728 void __cpuidle default_idle(void)
731 raw_local_irq_disable();
733 #if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
734 EXPORT_SYMBOL(default_idle);
737 DEFINE_STATIC_CALL_NULL(x86_idle, default_idle);
739 static bool x86_idle_set(void)
741 return !!static_call_query(x86_idle);
745 static inline void __noreturn play_dead(void)
751 void arch_cpu_idle_enter(void)
753 tsc_verify_tsc_adjust(false);
757 void __noreturn arch_cpu_idle_dead(void)
763 * Called from the generic idle code.
765 void __cpuidle arch_cpu_idle(void)
767 static_call(x86_idle)();
769 EXPORT_SYMBOL_GPL(arch_cpu_idle);
772 bool xen_set_default_idle(void)
774 bool ret = x86_idle_set();
776 static_call_update(x86_idle, default_idle);
782 struct cpumask cpus_stop_mask;
784 void __noreturn stop_this_cpu(void *dummy)
786 struct cpuinfo_x86 *c = this_cpu_ptr(&cpu_info);
787 unsigned int cpu = smp_processor_id();
792 * Remove this CPU from the online mask and disable it
793 * unconditionally. This might be redundant in case that the reboot
794 * vector was handled late and stop_other_cpus() sent an NMI.
796 * According to SDM and APM NMIs can be accepted even after soft
797 * disabling the local APIC.
799 set_cpu_online(cpu, false);
800 disable_local_APIC();
804 * Use wbinvd on processors that support SME. This provides support
805 * for performing a successful kexec when going from SME inactive
806 * to SME active (or vice-versa). The cache must be cleared so that
807 * if there are entries with the same physical address, both with and
808 * without the encryption bit, they don't race each other when flushed
809 * and potentially end up with the wrong entry being committed to
812 * Test the CPUID bit directly because the machine might've cleared
813 * X86_FEATURE_SME due to cmdline options.
815 if (c->extended_cpuid_level >= 0x8000001f && (cpuid_eax(0x8000001f) & BIT(0)))
819 * This brings a cache line back and dirties it, but
820 * native_stop_other_cpus() will overwrite cpus_stop_mask after it
821 * observed that all CPUs reported stop. This write will invalidate
822 * the related cache line on this CPU.
824 cpumask_clear_cpu(cpu, &cpus_stop_mask);
828 * Use native_halt() so that memory contents don't change
829 * (stack usage and variables) after possibly issuing the
830 * native_wbinvd() above.
837 * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
838 * states (local apic timer and TSC stop).
840 * XXX this function is completely buggered vs RCU and tracing.
842 static void amd_e400_idle(void)
845 * We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
846 * gets set after static_cpu_has() places have been converted via
849 if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
854 tick_broadcast_enter();
858 tick_broadcast_exit();
862 * Prefer MWAIT over HALT if MWAIT is supported, MWAIT_CPUID leaf
863 * exists and whenever MONITOR/MWAIT extensions are present there is at
864 * least one C1 substate.
866 * Do not prefer MWAIT if MONITOR instruction has a bug or idle=nomwait
867 * is passed to kernel commandline parameter.
869 static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
871 u32 eax, ebx, ecx, edx;
873 /* User has disallowed the use of MWAIT. Fallback to HALT */
874 if (boot_option_idle_override == IDLE_NOMWAIT)
877 /* MWAIT is not supported on this platform. Fallback to HALT */
878 if (!cpu_has(c, X86_FEATURE_MWAIT))
881 /* Monitor has a bug. Fallback to HALT */
882 if (boot_cpu_has_bug(X86_BUG_MONITOR))
885 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
888 * If MWAIT extensions are not available, it is safe to use MWAIT
891 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED))
895 * If MWAIT extensions are available, there should be at least one
896 * MWAIT C1 substate present.
898 return (edx & MWAIT_C1_SUBSTATE_MASK);
902 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
903 * with interrupts enabled and no flags, which is backwards compatible with the
904 * original MWAIT implementation.
906 static __cpuidle void mwait_idle(void)
908 if (!current_set_polling_and_test()) {
909 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
911 clflush((void *)¤t_thread_info()->flags);
915 __monitor((void *)¤t_thread_info()->flags, 0, 0);
916 if (!need_resched()) {
918 raw_local_irq_disable();
921 __current_clr_polling();
924 void select_idle_routine(const struct cpuinfo_x86 *c)
927 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
928 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
930 if (x86_idle_set() || boot_option_idle_override == IDLE_POLL)
933 if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
934 pr_info("using AMD E400 aware idle routine\n");
935 static_call_update(x86_idle, amd_e400_idle);
936 } else if (prefer_mwait_c1_over_halt(c)) {
937 pr_info("using mwait in idle threads\n");
938 static_call_update(x86_idle, mwait_idle);
939 } else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
940 pr_info("using TDX aware idle routine\n");
941 static_call_update(x86_idle, tdx_safe_halt);
943 static_call_update(x86_idle, default_idle);
946 void amd_e400_c1e_apic_setup(void)
948 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
949 pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
951 tick_broadcast_force();
956 void __init arch_post_acpi_subsys_init(void)
960 if (!boot_cpu_has_bug(X86_BUG_AMD_E400))
964 * AMD E400 detection needs to happen after ACPI has been enabled. If
965 * the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in
966 * MSR_K8_INT_PENDING_MSG.
968 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
969 if (!(lo & K8_INTP_C1E_ACTIVE_MASK))
972 boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E);
974 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
975 mark_tsc_unstable("TSC halt in AMD C1E");
976 pr_info("System has AMD C1E enabled\n");
979 static int __init idle_setup(char *str)
984 if (!strcmp(str, "poll")) {
985 pr_info("using polling idle threads\n");
986 boot_option_idle_override = IDLE_POLL;
987 cpu_idle_poll_ctrl(true);
988 } else if (!strcmp(str, "halt")) {
990 * When the boot option of idle=halt is added, halt is
991 * forced to be used for CPU idle. In such case CPU C2/C3
992 * won't be used again.
993 * To continue to load the CPU idle driver, don't touch
994 * the boot_option_idle_override.
996 static_call_update(x86_idle, default_idle);
997 boot_option_idle_override = IDLE_HALT;
998 } else if (!strcmp(str, "nomwait")) {
1000 * If the boot option of "idle=nomwait" is added,
1001 * it means that mwait will be disabled for CPU C1/C2/C3
1004 boot_option_idle_override = IDLE_NOMWAIT;
1010 early_param("idle", idle_setup);
1012 unsigned long arch_align_stack(unsigned long sp)
1014 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1015 sp -= get_random_u32_below(8192);
1019 unsigned long arch_randomize_brk(struct mm_struct *mm)
1021 return randomize_page(mm->brk, 0x02000000);
1025 * Called from fs/proc with a reference on @p to find the function
1026 * which called into schedule(). This needs to be done carefully
1027 * because the task might wake up and we might look at a stack
1028 * changing under us.
1030 unsigned long __get_wchan(struct task_struct *p)
1032 struct unwind_state state;
1033 unsigned long addr = 0;
1035 if (!try_get_task_stack(p))
1038 for (unwind_start(&state, p, NULL, NULL); !unwind_done(&state);
1039 unwind_next_frame(&state)) {
1040 addr = unwind_get_return_address(&state);
1043 if (in_sched_functions(addr))
1053 long do_arch_prctl_common(int option, unsigned long arg2)
1056 case ARCH_GET_CPUID:
1057 return get_cpuid_mode();
1058 case ARCH_SET_CPUID:
1059 return set_cpuid_mode(arg2);
1060 case ARCH_GET_XCOMP_SUPP:
1061 case ARCH_GET_XCOMP_PERM:
1062 case ARCH_REQ_XCOMP_PERM:
1063 case ARCH_GET_XCOMP_GUEST_PERM:
1064 case ARCH_REQ_XCOMP_GUEST_PERM:
1065 return fpu_xstate_prctl(option, arg2);