extern void fpu__clear_user_states(struct fpu *fpu);
extern bool fpu__restore_sig(void __user *buf, int ia32_frame);
-extern void restore_fpregs_from_fpstate(union fpregs_state *fpstate, u64 mask);
+extern void restore_fpregs_from_fpstate(struct fpstate *fpstate, u64 mask);
extern bool copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
*/
mask = xfeatures_mask_restore_user() |
xfeatures_mask_supervisor();
- restore_fpregs_from_fpstate(&fpu->state, mask);
+ restore_fpregs_from_fpstate(fpu->fpstate, mask);
fpregs_activate(fpu);
fpu->last_cpu = cpu;
frstor(&fpu->state.fsave);
}
-void restore_fpregs_from_fpstate(union fpregs_state *fpstate, u64 mask)
+void restore_fpregs_from_fpstate(struct fpstate *fpstate, u64 mask)
{
/*
* AMD K7/K8 and later CPUs up to Zen don't save/restore
}
if (use_xsave()) {
- os_xrstor(&fpstate->xsave, mask);
+ os_xrstor(&fpstate->regs.xsave, mask);
} else {
if (use_fxsr())
- fxrstor(&fpstate->fxsave);
+ fxrstor(&fpstate->regs.fxsave);
else
- frstor(&fpstate->fsave);
+ frstor(&fpstate->regs.fsave);
}
}
void fpu_reset_from_exception_fixup(void)
{
- restore_fpregs_from_fpstate(&init_fpstate.regs, xfeatures_mask_fpstate());
+ restore_fpregs_from_fpstate(&init_fpstate, xfeatures_mask_fpstate());
}
#if IS_ENABLED(CONFIG_KVM)
if (rstor) {
restore_mask &= xfeatures_mask_fpstate();
- restore_fpregs_from_fpstate(&rstor->state, restore_mask);
+ restore_fpregs_from_fpstate(rstor->fpstate, restore_mask);
}
fpregs_mark_activate();