{
struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
+ /*
+ * Prevent migrating this task while
+ * we're fiddling with the FPU state
+ */
+ preempt_disable();
if (ia64_is_local_fpu_owner(task) && psr->mfh) {
psr->mfh = 0;
task->thread.flags |= IA64_THREAD_FPH_VALID;
ia64_save_fpu(&task->thread.fph[0]);
}
+ preempt_enable();
}
/*
/* first, grant user-level access to fph partition: */
psr->dfh = 0;
+
+ /*
+ * Make sure that no other task gets in on this processor
+ * while we're claiming the FPU
+ */
+ preempt_disable();
#ifndef CONFIG_SMP
{
struct task_struct *fpu_owner
= (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER);
- if (ia64_is_local_fpu_owner(current))
+ if (ia64_is_local_fpu_owner(current)) {
+ preempt_enable_no_resched();
return;
+ }
if (fpu_owner)
ia64_flush_fph(fpu_owner);
*/
psr->mfh = 1;
}
+ preempt_enable_no_resched();
}
static inline int
* task_struct at this point.
*/
-/* Return TRUE if task T owns the fph partition of the CPU we're running on. */
+/*
+ * Return TRUE if task T owns the fph partition of the CPU we're running on.
+ * Must be called from code that has preemption disabled.
+ */
#define ia64_is_local_fpu_owner(t) \
({ \
struct task_struct *__ia64_islfo_task = (t); \
&& __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \
})
-/* Mark task T as owning the fph partition of the CPU we're running on. */
+/*
+ * Mark task T as owning the fph partition of the CPU we're running on.
+ * Must be called from code that has preemption disabled.
+ */
#define ia64_set_local_fpu_owner(t) do { \
struct task_struct *__ia64_slfo_task = (t); \
__ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \