In time-travel mode, cpu_relax() currently does actual CPU relax,
but that doesn't affect the simulation. Ideally, we wouldn't run
anything that uses it in simulation, but if we actually have virtio
devices combined with the same simulation it's possible. Implement
cpu_relax() as ndelay(1) in this case, using time_travel_ndelay(1)
directly to catch errors if this is used erroneously in builds that
don't set CONFIG_UML_TIME_TRAVEL_SUPPORT.
While at it, convert it to an __always_inline and also add that to
rep_nop() like the original does now.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Richard Weinberger <richard@nod.at>
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __UM_PROCESSOR_H
#define __UM_PROCESSOR_H
+#include <linux/time-internal.h>
/* include faultinfo structure */
#include <sysdep/faultinfo.h>
#include <asm/user.h>
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-static inline void rep_nop(void)
+static __always_inline void rep_nop(void)
{
__asm__ __volatile__("rep;nop": : :"memory");
}
-#define cpu_relax() rep_nop()
+static __always_inline void cpu_relax(void)
+{
+ if (time_travel_mode == TT_MODE_INFCPU ||
+ time_travel_mode == TT_MODE_EXTERNAL)
+ time_travel_ndelay(1);
+ else
+ rep_nop();
+}
#define task_pt_regs(t) (&(t)->thread.regs)