}
/*
- * Ensure current's FPSIMD/SVE storage in thread_struct is up to date
- * with respect to the CPU registers.
+ * Ensure FPSIMD/SVE storage in memory for the loaded context is up to
+ * date with respect to the CPU registers.
*
* Softirqs (and preemption) must be disabled.
*/
-static void task_fpsimd_save(void)
+static void fpsimd_save(void)
{
+ struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st);
+ /* set by fpsimd_bind_to_cpu() */
+
WARN_ON(!in_softirq() && !irqs_disabled());
if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
return;
}
- sve_save_state(sve_pffr(current),
- ¤t->thread.uw.fpsimd_state.fpsr);
+ sve_save_state(sve_pffr(current), &st->fpsr);
} else
- fpsimd_save_state(¤t->thread.uw.fpsimd_state);
+ fpsimd_save_state(st);
}
}
if (task == current) {
local_bh_disable();
- task_fpsimd_save();
+ fpsimd_save();
set_thread_flag(TIF_FOREIGN_FPSTATE);
}
local_bh_disable();
- task_fpsimd_save();
+ fpsimd_save();
fpsimd_to_sve(current);
/* Force ret_to_user to reload the registers: */
* 'current'.
*/
if (current->mm)
- task_fpsimd_save();
+ fpsimd_save();
if (next->mm) {
/*
return;
local_bh_disable();
- task_fpsimd_save();
+ fpsimd_save();
local_bh_enable();
}
/* Save unsaved task fpsimd state, if any: */
if (current->mm)
- task_fpsimd_save();
+ fpsimd_save();
/* Invalidate any task state remaining in the fpsimd regs: */
fpsimd_flush_cpu_state();
switch (cmd) {
case CPU_PM_ENTER:
if (current->mm)
- task_fpsimd_save();
+ fpsimd_save();
fpsimd_flush_cpu_state();
break;
case CPU_PM_EXIT: