return err;
}
-extern int copy_fpregs_to_fpstate(struct fpu *fpu);
+extern int save_fpregs_to_fpstate(struct fpu *fpu);
static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
{
static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{
if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) {
- if (!copy_fpregs_to_fpstate(old_fpu))
+ if (!save_fpregs_to_fpstate(old_fpu))
old_fpu->last_cpu = -1;
else
old_fpu->last_cpu = cpu;
* Modern FPU state can be kept in registers, if there are
* no pending FP exceptions.
*/
-int copy_fpregs_to_fpstate(struct fpu *fpu)
+int save_fpregs_to_fpstate(struct fpu *fpu)
{
if (likely(use_xsave())) {
os_xsave(&fpu->state.xsave);
return 0;
}
-EXPORT_SYMBOL(copy_fpregs_to_fpstate);
+EXPORT_SYMBOL(save_fpregs_to_fpstate);
void kernel_fpu_begin_mask(unsigned int kfpu_mask)
{
* Ignore return value -- we don't care if reg state
* is clobbered.
*/
- copy_fpregs_to_fpstate(¤t->thread.fpu);
+ save_fpregs_to_fpstate(¤t->thread.fpu);
}
__cpu_invalidate_fpregs_state();
trace_x86_fpu_before_save(fpu);
if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
- if (!copy_fpregs_to_fpstate(fpu)) {
+ if (!save_fpregs_to_fpstate(fpu)) {
copy_kernel_to_fpregs(&fpu->state);
}
}
if (test_thread_flag(TIF_NEED_FPU_LOAD))
memcpy(&dst_fpu->state, &src_fpu->state, fpu_kernel_xstate_size);
- else if (!copy_fpregs_to_fpstate(dst_fpu))
+ else if (!save_fpregs_to_fpstate(dst_fpu))
copy_kernel_to_fpregs(&dst_fpu->state);
fpregs_unlock();
memcpy(&fpu->state, ¤t->thread.fpu.state,
fpu_kernel_xstate_size);
else
- copy_fpregs_to_fpstate(fpu);
+ save_fpregs_to_fpstate(fpu);
}
/* Swap (qemu) user FPU context for the guest FPU context. */