ARM64: ipipe: share context switch code with the head domain
authorPhilippe Gerum <rpm@xenomai.org>
Fri, 29 Dec 2017 04:19:48 +0000 (20:19 -0800)
committerMarek Szyprowski <m.szyprowski@samsung.com>
Fri, 27 Apr 2018 09:21:34 +0000 (11:21 +0200)
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/proc-fns.h
arch/arm64/include/asm/thread_info.h
arch/arm64/kernel/entry.S
arch/arm64/mm/context.c

index 9d155fa9a50791af293916cfdc1ede087f850c6d..8650006f72613b5767329376922ebf2c90c20110 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/sched.h>
 #include <linux/sched/hotplug.h>
 #include <linux/mm_types.h>
+#include <linux/ipipe.h>
 
 #include <asm/cacheflush.h>
 #include <asm/cpufeature.h>
@@ -206,7 +207,7 @@ static inline void __switch_mm(struct mm_struct *next)
 }
 
 static inline void
-switch_mm(struct mm_struct *prev, struct mm_struct *next,
+do_switch_mm(struct mm_struct *prev, struct mm_struct *next,
          struct task_struct *tsk)
 {
        if (prev != next)
@@ -221,9 +222,29 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
        update_saved_ttbr0(tsk, next);
 }
 
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+         struct task_struct *tsk)
+{
+       unsigned long flags;
+
+       flags = hard_cond_local_irq_save();
+       do_switch_mm(prev, next, tsk);
+       hard_cond_local_irq_restore(flags);
+}
+
 #define deactivate_mm(tsk,mm)  do { } while (0)
 #define activate_mm(prev,next) switch_mm(prev, next, current)
 
+#ifdef CONFIG_IPIPE
+static inline void
+ipipe_switch_mm_head(struct mm_struct *prev, struct mm_struct *next,
+                          struct task_struct *tsk)
+{
+       do_switch_mm(prev, next, tsk);
+}
+#endif
+
 void verify_cpu_asid_bits(void);
 
 #endif /* !__ASSEMBLY__ */
index 14ad6e4e87d11477ca350b7dbf6ff7e6017a7cc4..0408e1bcfad6c0161caeaeb5bc425e70d3a24f04 100644 (file)
@@ -37,8 +37,11 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
 
 #define cpu_switch_mm(pgd,mm)                          \
 do {                                                   \
+       unsigned long __flags;                          \
        BUG_ON(pgd == swapper_pg_dir);                  \
+       __flags = hard_local_irq_save();                \
        cpu_do_switch_mm(virt_to_phys(pgd),mm);         \
+       hard_local_irq_restore(__flags);                \
 } while (0)
 
 #endif /* __ASSEMBLY__ */
index c65d8686c482a114af95b2942c8e3617e6e2c377..8e8e63e40ac175e75b29e335fd329894689f6013 100644 (file)
@@ -97,6 +97,7 @@ void arch_setup_new_exec(void);
 #define TIF_RESTORE_SIGMASK    20
 #define TIF_SINGLESTEP         21
 #define TIF_32BIT              22      /* 32bit process */
+#define TIF_MMSWITCH_INT       25
 
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
@@ -110,6 +111,7 @@ void arch_setup_new_exec(void);
 #define _TIF_UPROBE            (1 << TIF_UPROBE)
 #define _TIF_FSCHECK           (1 << TIF_FSCHECK)
 #define _TIF_32BIT             (1 << TIF_32BIT)
+#define _TIF_MMSWITCH_INT      (1 << TIF_MMSWITCH_INT)
 
 #define _TIF_WORK_MASK         (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
                                 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
index ff310274a71529e5bdb5e36e8d9974ff2d189b19..cda07908685e52b9b199c3c498190cd17c046d1d 100644 (file)
@@ -321,6 +321,21 @@ alternative_else_nop_endif
        .macro  irq_stack_entry
        mov     x19, sp                 // preserve the original sp
 
+#ifdef CONFIG_IPIPE
+       /*
+        * When the pipeline is enabled, context switches over the irq
+        * stack are allowed (for the co-kernel), and more interrupts
+        * can be taken over sibling stack contexts. So we need a not so
+        * subtle way of figuring out whether the irq stack was actually
+        * exited, which cannot depend on the current task pointer.
+        */
+       adr_this_cpu x25, irq_nesting, x26
+       ldr     w26, [x25]
+       cmp     w26, #0
+       add     w26, w26, #1
+       str     w26, [x25]
+       b.ne    9998f
+#else
        /*
         * Compare sp with the base of the task stack.
         * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
@@ -330,7 +345,7 @@ alternative_else_nop_endif
        eor     x25, x25, x19
        and     x25, x25, #~(THREAD_SIZE - 1)
        cbnz    x25, 9998f
-
+#endif
        ldr_this_cpu x25, irq_stack_ptr, x26
        mov     x26, #IRQ_STACK_SIZE
        add     x26, x25, x26
@@ -346,6 +361,12 @@ alternative_else_nop_endif
         */
        .macro  irq_stack_exit
        mov     sp, x19
+#ifdef CONFIG_IPIPE
+       adr_this_cpu x1, irq_nesting, x0
+       ldr     w0, [x1]
+       add     w0, w0, #-1
+       str     w0, [x1]
+#endif
        .endm
 
 /*
@@ -963,6 +984,19 @@ ENTRY(cpu_switch_to)
        ldp     x29, x9, [x8], #16
        ldr     lr, [x8]
        mov     sp, x9
+#ifdef CONFIG_IPIPE
+       /*
+        * Unlike the main kernel, the co-kernel may switch contexts
+        * over the global IRQ stack, so we can't assume that the saved
+        * %sp we just reloaded in x9 lives within the incoming task's
+        * stack. Therefore we can't compute 'tsk' from this pointer,
+        * but we can peek at the incoming task struct for the stack
+        * base address, where the thread_info struct lives.
+        */
+       mov     x10, #TSK_STACK
+       add     x10, x1, x10
+       ldr     x9, [x10]
+#endif
        msr     sp_el0, x1
        ret
 ENDPROC(cpu_switch_to)
index ab9f5f0fb2c7fc6ada0605e31d8f73b0024ae277..30e45038fd3a04bedd86f1b82415b0b4b342ce82 100644 (file)
@@ -28,7 +28,7 @@
 #include <asm/tlbflush.h>
 
 static u32 asid_bits;
-static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
+static IPIPE_DEFINE_RAW_SPINLOCK(cpu_asid_lock);
 
 static atomic64_t asid_generation;
 static unsigned long *asid_map;