1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1994 Linus Torvalds
5 * Pentium III FXSR, SSE support
6 * General FPU state handling cleanups
7 * Gareth Hughes <gareth@valinux.com>, May 2000
9 #include <asm/fpu/internal.h>
10 #include <asm/fpu/regset.h>
11 #include <asm/fpu/signal.h>
12 #include <asm/fpu/types.h>
13 #include <asm/traps.h>
14 #include <asm/irq_regs.h>
16 #include <linux/hardirq.h>
17 #include <linux/pkeys.h>
19 #define CREATE_TRACE_POINTS
20 #include <asm/trace/fpu.h>
23 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
24 * depending on the FPU hardware format:
26 union fpregs_state init_fpstate __read_mostly;
29 * Track whether the kernel is using the FPU state
34 * - by IRQ context code to potentially use the FPU
37 * - to debug kernel_fpu_begin()/end() correctness
39 static DEFINE_PER_CPU(bool, in_kernel_fpu);
42 * Track which context is using the FPU on the CPU:
44 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
46 static bool kernel_fpu_disabled(void)
48 return this_cpu_read(in_kernel_fpu);
51 static bool interrupted_kernel_fpu_idle(void)
53 return !kernel_fpu_disabled();
57 * Were we in user mode (or vm86 mode) when we were
60 * Doing kernel_fpu_begin/end() is ok if we are running
61 * in an interrupt context from user mode - we'll just
62 * save the FPU state as required.
64 static bool interrupted_user_mode(void)
66 struct pt_regs *regs = get_irq_regs();
67 return regs && user_mode(regs);
71 * Can we use the FPU in kernel mode with the
72 * whole "kernel_fpu_begin/end()" sequence?
74 * It's always ok in process context (ie "not interrupt")
75 * but it is sometimes ok even from an irq.
77 bool irq_fpu_usable(void)
79 return !in_interrupt() ||
80 interrupted_user_mode() ||
81 interrupted_kernel_fpu_idle();
83 EXPORT_SYMBOL(irq_fpu_usable);
86 * Save the FPU register state in fpu->state. The register state is
89 * Must be called with fpregs_lock() held.
91 * The legacy FNSAVE instruction clears all FPU state unconditionally, so
92 * register state has to be reloaded. That might be a pointless exercise
93 * when the FPU is going to be used by another task right after that. But
94 * this only affects 20+ years old 32bit systems and avoids conditionals all
97 * FXSAVE and all XSAVE variants preserve the FPU register state.
99 void save_fpregs_to_fpstate(struct fpu *fpu)
101 if (likely(use_xsave())) {
102 os_xsave(&fpu->state.xsave);
105 * AVX512 state is tracked here because its use is
106 * known to slow the max clock speed of the core.
108 if (fpu->state.xsave.header.xfeatures & XFEATURE_MASK_AVX512)
109 fpu->avx512_timestamp = jiffies;
113 if (likely(use_fxsr())) {
114 fxsave(&fpu->state.fxsave);
119 * Legacy FPU register saving, FNSAVE always clears FPU registers,
120 * so we have to reload them from the memory state.
122 asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave));
123 frstor(&fpu->state.fsave);
125 EXPORT_SYMBOL(save_fpregs_to_fpstate);
127 void kernel_fpu_begin_mask(unsigned int kfpu_mask)
131 WARN_ON_FPU(!irq_fpu_usable());
132 WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
134 this_cpu_write(in_kernel_fpu, true);
136 if (!(current->flags & PF_KTHREAD) &&
137 !test_thread_flag(TIF_NEED_FPU_LOAD)) {
138 set_thread_flag(TIF_NEED_FPU_LOAD);
139 save_fpregs_to_fpstate(¤t->thread.fpu);
141 __cpu_invalidate_fpregs_state();
143 /* Put sane initial values into the control registers. */
144 if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
145 ldmxcsr(MXCSR_DEFAULT);
147 if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
148 asm volatile ("fninit");
150 EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
152 void kernel_fpu_end(void)
154 WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
156 this_cpu_write(in_kernel_fpu, false);
159 EXPORT_SYMBOL_GPL(kernel_fpu_end);
162 * Save the FPU state (mark it for reload if necessary):
164 * This only ever gets called for the current task.
166 void fpu__save(struct fpu *fpu)
168 WARN_ON_FPU(fpu != ¤t->thread.fpu);
171 trace_x86_fpu_before_save(fpu);
173 if (!test_thread_flag(TIF_NEED_FPU_LOAD))
174 save_fpregs_to_fpstate(fpu);
176 trace_x86_fpu_after_save(fpu);
180 static inline void fpstate_init_xstate(struct xregs_state *xsave)
183 * XRSTORS requires these bits set in xcomp_bv, or it will
186 xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xfeatures_mask_all;
189 static inline void fpstate_init_fxstate(struct fxregs_state *fx)
192 fx->mxcsr = MXCSR_DEFAULT;
196 * Legacy x87 fpstate state init:
198 static inline void fpstate_init_fstate(struct fregs_state *fp)
200 fp->cwd = 0xffff037fu;
201 fp->swd = 0xffff0000u;
202 fp->twd = 0xffffffffu;
203 fp->fos = 0xffff0000u;
206 void fpstate_init(union fpregs_state *state)
208 if (!static_cpu_has(X86_FEATURE_FPU)) {
209 fpstate_init_soft(&state->soft);
213 memset(state, 0, fpu_kernel_xstate_size);
215 if (static_cpu_has(X86_FEATURE_XSAVES))
216 fpstate_init_xstate(&state->xsave);
217 if (static_cpu_has(X86_FEATURE_FXSR))
218 fpstate_init_fxstate(&state->fxsave);
220 fpstate_init_fstate(&state->fsave);
222 EXPORT_SYMBOL_GPL(fpstate_init);
224 int fpu__copy(struct task_struct *dst, struct task_struct *src)
226 struct fpu *dst_fpu = &dst->thread.fpu;
227 struct fpu *src_fpu = &src->thread.fpu;
229 dst_fpu->last_cpu = -1;
231 if (!static_cpu_has(X86_FEATURE_FPU))
234 WARN_ON_FPU(src_fpu != ¤t->thread.fpu);
237 * Don't let 'init optimized' areas of the XSAVE area
238 * leak into the child task:
240 memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
243 * If the FPU registers are not owned by current just memcpy() the
244 * state. Otherwise save the FPU registers directly into the
245 * child's FPU context, without any memory-to-memory copying.
248 if (test_thread_flag(TIF_NEED_FPU_LOAD))
249 memcpy(&dst_fpu->state, &src_fpu->state, fpu_kernel_xstate_size);
252 save_fpregs_to_fpstate(dst_fpu);
255 set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD);
257 trace_x86_fpu_copy_src(src_fpu);
258 trace_x86_fpu_copy_dst(dst_fpu);
264 * Activate the current task's in-memory FPU context,
265 * if it has not been used before:
267 static void fpu__initialize(struct fpu *fpu)
269 WARN_ON_FPU(fpu != ¤t->thread.fpu);
271 set_thread_flag(TIF_NEED_FPU_LOAD);
272 fpstate_init(&fpu->state);
273 trace_x86_fpu_init_state(fpu);
277 * Drops current FPU state: deactivates the fpregs and
278 * the fpstate. NOTE: it still leaves previous contents
279 * in the fpregs in the eager-FPU case.
281 * This function can be used in cases where we know that
282 * a state-restore is coming: either an explicit one,
285 void fpu__drop(struct fpu *fpu)
289 if (fpu == ¤t->thread.fpu) {
290 /* Ignore delayed exceptions from user space */
291 asm volatile("1: fwait\n"
293 _ASM_EXTABLE(1b, 2b));
294 fpregs_deactivate(fpu);
297 trace_x86_fpu_dropped(fpu);
303 * Clear FPU registers by setting them up from the init fpstate.
304 * Caller must do fpregs_[un]lock() around it.
306 static inline void restore_fpregs_from_init_fpstate(u64 features_mask)
309 os_xrstor(&init_fpstate.xsave, features_mask);
311 fxrstor(&init_fpstate.fxsave);
313 frstor(&init_fpstate.fsave);
315 if (boot_cpu_has(X86_FEATURE_OSPKE))
316 copy_init_pkru_to_fpregs();
320 * Clear the FPU state back to init state.
322 * Called by sys_execve(), by the signal handler code and by various
325 static void fpu__clear(struct fpu *fpu, bool user_only)
327 WARN_ON_FPU(fpu != ¤t->thread.fpu);
329 if (!static_cpu_has(X86_FEATURE_FPU)) {
331 fpu__initialize(fpu);
338 if (!fpregs_state_valid(fpu, smp_processor_id()) &&
339 xfeatures_mask_supervisor())
340 os_xrstor(&fpu->state.xsave, xfeatures_mask_supervisor());
341 restore_fpregs_from_init_fpstate(xfeatures_mask_user());
343 restore_fpregs_from_init_fpstate(xfeatures_mask_all);
346 fpregs_mark_activate();
350 void fpu__clear_user_states(struct fpu *fpu)
352 fpu__clear(fpu, true);
355 void fpu__clear_all(struct fpu *fpu)
357 fpu__clear(fpu, false);
361 * Load FPU context before returning to userspace.
363 void switch_fpu_return(void)
365 if (!static_cpu_has(X86_FEATURE_FPU))
368 __fpregs_load_activate();
370 EXPORT_SYMBOL_GPL(switch_fpu_return);
372 #ifdef CONFIG_X86_DEBUG_FPU
374 * If current FPU state according to its tracking (loaded FPU context on this
375 * CPU) is not valid then we must have TIF_NEED_FPU_LOAD set so the context is
376 * loaded on return to userland.
378 void fpregs_assert_state_consistent(void)
380 struct fpu *fpu = ¤t->thread.fpu;
382 if (test_thread_flag(TIF_NEED_FPU_LOAD))
385 WARN_ON_FPU(!fpregs_state_valid(fpu, smp_processor_id()));
387 EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent);
390 void fpregs_mark_activate(void)
392 struct fpu *fpu = ¤t->thread.fpu;
394 fpregs_activate(fpu);
395 fpu->last_cpu = smp_processor_id();
396 clear_thread_flag(TIF_NEED_FPU_LOAD);
398 EXPORT_SYMBOL_GPL(fpregs_mark_activate);
401 * x87 math exception handling:
404 int fpu__exception_code(struct fpu *fpu, int trap_nr)
408 if (trap_nr == X86_TRAP_MF) {
409 unsigned short cwd, swd;
411 * (~cwd & swd) will mask out exceptions that are not set to unmasked
412 * status. 0x3f is the exception bits in these regs, 0x200 is the
413 * C1 reg you need in case of a stack fault, 0x040 is the stack
414 * fault bit. We should only be taking one exception at a time,
415 * so if this combination doesn't produce any single exception,
416 * then we have a bad program that isn't synchronizing its FPU usage
417 * and it will suffer the consequences since we won't be able to
418 * fully reproduce the context of the exception.
420 if (boot_cpu_has(X86_FEATURE_FXSR)) {
421 cwd = fpu->state.fxsave.cwd;
422 swd = fpu->state.fxsave.swd;
424 cwd = (unsigned short)fpu->state.fsave.cwd;
425 swd = (unsigned short)fpu->state.fsave.swd;
431 * The SIMD FPU exceptions are handled a little differently, as there
432 * is only a single status/control register. Thus, to determine which
433 * unmasked exception was caught we must mask the exception mask bits
434 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
436 unsigned short mxcsr = MXCSR_DEFAULT;
438 if (boot_cpu_has(X86_FEATURE_XMM))
439 mxcsr = fpu->state.fxsave.mxcsr;
441 err = ~(mxcsr >> 7) & mxcsr;
444 if (err & 0x001) { /* Invalid op */
446 * swd & 0x240 == 0x040: Stack Underflow
447 * swd & 0x240 == 0x240: Stack Overflow
448 * User must clear the SF bit (0x40) if set
451 } else if (err & 0x004) { /* Divide by Zero */
453 } else if (err & 0x008) { /* Overflow */
455 } else if (err & 0x012) { /* Denormal, Underflow */
457 } else if (err & 0x020) { /* Precision */
462 * If we're using IRQ 13, or supposedly even some trap
463 * X86_TRAP_MF implementations, it's possible
464 * we get a spurious trap, which is not an error.