The udelay() inline for ia64 uses the ITC. If CONFIG_PREEMPT is enabled
and the platform has unsynchronized ITCs and the calling task migrates
to another CPU while doing the udelay loop, then the effective delay may
be too short or very, very long.
This patch disables preemption around 100 usec chunks of the overall
desired udelay time. This minimizes preemption-holdoffs.
udelay() is now too big to be inline, move it out of line and export it.
Signed-off-by: John Hawkes <hawkes@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
*/
set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
}
+
+#define SMALLUSECS 100
+
+void
+udelay (unsigned long usecs)
+{
+ unsigned long start;
+ unsigned long cycles;
+ unsigned long smallusecs;
+
+ /*
+ * Execute the non-preemptible delay loop (because the ITC might
+ * not be synchronized between CPUS) in relatively short time
+ * chunks, allowing preemption between the chunks.
+ */
+ while (usecs > 0) {
+ smallusecs = (usecs > SMALLUSECS) ? SMALLUSECS : usecs;
+ preempt_disable();
+ cycles = smallusecs*local_cpu_data->cyc_per_usec;
+ start = ia64_get_itc();
+
+ while (ia64_get_itc() - start < cycles)
+ cpu_relax();
+
+ preempt_enable();
+ usecs -= smallusecs;
+ }
+}
+EXPORT_SYMBOL(udelay);
ia64_delay_loop (loops - 1);
}
-static __inline__ void
-udelay (unsigned long usecs)
-{
- unsigned long start = ia64_get_itc();
- unsigned long cycles = usecs*local_cpu_data->cyc_per_usec;
-
- while (ia64_get_itc() - start < cycles)
- cpu_relax();
-}
+extern void udelay (unsigned long usecs);
#endif /* _ASM_IA64_DELAY_H */