#include <linux/sched.h>
#include <linux/sched/hotplug.h>
#include <linux/mm_types.h>
+#include <linux/ipipe.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
}
static inline void
-switch_mm(struct mm_struct *prev, struct mm_struct *next,
+do_switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
if (prev != next)
update_saved_ttbr0(tsk, next);
}
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned long flags;
+
+ flags = hard_cond_local_irq_save();
+ do_switch_mm(prev, next, tsk);
+ hard_cond_local_irq_restore(flags);
+}
+
#define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(prev,next) switch_mm(prev, next, current)
+#ifdef CONFIG_IPIPE
+static inline void
+ipipe_switch_mm_head(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ do_switch_mm(prev, next, tsk);
+}
+#endif
+
void verify_cpu_asid_bits(void);
#endif /* !__ASSEMBLY__ */
#define cpu_switch_mm(pgd,mm) \
do { \
+ unsigned long __flags; \
BUG_ON(pgd == swapper_pg_dir); \
+ __flags = hard_local_irq_save(); \
cpu_do_switch_mm(virt_to_phys(pgd),mm); \
+ hard_local_irq_restore(__flags); \
} while (0)
#endif /* __ASSEMBLY__ */
#define TIF_RESTORE_SIGMASK 20
#define TIF_SINGLESTEP 21
#define TIF_32BIT 22 /* 32bit process */
+#define TIF_MMSWITCH_INT 25
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_FSCHECK (1 << TIF_FSCHECK)
#define _TIF_32BIT (1 << TIF_32BIT)
+#define _TIF_MMSWITCH_INT (1 << TIF_MMSWITCH_INT)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
.macro irq_stack_entry
mov x19, sp // preserve the original sp
+#ifdef CONFIG_IPIPE
+ /*
+ * When the pipeline is enabled, context switches over the irq
+ * stack are allowed (for the co-kernel), and more interrupts
+ * can be taken over sibling stack contexts. So we need a not so
+ * subtle way of figuring out whether the irq stack was actually
+ * exited, which cannot depend on the current task pointer.
+ */
+ adr_this_cpu x25, irq_nesting, x26
+ ldr w26, [x25]
+ cmp w26, #0
+ add w26, w26, #1
+ str w26, [x25]
+ b.ne 9998f
+#else
/*
* Compare sp with the base of the task stack.
* If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
eor x25, x25, x19
and x25, x25, #~(THREAD_SIZE - 1)
cbnz x25, 9998f
-
+#endif
ldr_this_cpu x25, irq_stack_ptr, x26
mov x26, #IRQ_STACK_SIZE
add x26, x25, x26
*/
.macro irq_stack_exit
mov sp, x19
+#ifdef CONFIG_IPIPE
+ adr_this_cpu x1, irq_nesting, x0
+ ldr w0, [x1]
+ add w0, w0, #-1
+ str w0, [x1]
+#endif
.endm
/*
ldp x29, x9, [x8], #16
ldr lr, [x8]
mov sp, x9
+#ifdef CONFIG_IPIPE
+ /*
+ * Unlike the main kernel, the co-kernel may switch contexts
+ * over the global IRQ stack, so we can't assume that the saved
+ * %sp we just reloaded in x9 lives within the incoming task's
+ * stack. Therefore we can't compute 'tsk' from this pointer,
+ * but we can peek at the incoming task struct for the stack
+ * base address, where the thread_info struct lives.
+ */
+ mov x10, #TSK_STACK
+ add x10, x1, x10
+ ldr x9, [x10]
+#endif
msr sp_el0, x1
ret
ENDPROC(cpu_switch_to)
#include <asm/tlbflush.h>
static u32 asid_bits;
-static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
+static IPIPE_DEFINE_RAW_SPINLOCK(cpu_asid_lock);
static atomic64_t asid_generation;
static unsigned long *asid_map;