arm64: mte: make the per-task SCTLR_EL1 field usable elsewhere
authorPeter Collingbourne <pcc@google.com>
Fri, 19 Mar 2021 03:10:52 +0000 (20:10 -0700)
committerCatalin Marinas <catalin.marinas@arm.com>
Tue, 13 Apr 2021 16:31:44 +0000 (17:31 +0100)
In an upcoming change we are going to introduce per-task SCTLR_EL1
bits for PAC. Move the existing per-task SCTLR_EL1 field out of the
MTE-specific code so that we will be able to use it from both the
PAC and MTE code paths and make the task switching code more efficient.

Signed-off-by: Peter Collingbourne <pcc@google.com>
Link: https://linux-review.googlesource.com/id/Ic65fac78a7926168fa68f9e8da591c9e04ff7278
Link: https://lore.kernel.org/r/13d725cb8e741950fb9d6e64b2cd9bd54ff7c3f9.1616123271.git.pcc@google.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/processor.h
arch/arm64/kernel/mte.c
arch/arm64/kernel/process.c

index ca2cd75..80895bb 100644 (file)
@@ -151,11 +151,13 @@ struct thread_struct {
        struct ptrauth_keys_kernel      keys_kernel;
 #endif
 #ifdef CONFIG_ARM64_MTE
-       u64                     sctlr_tcf0;
        u64                     gcr_user_excl;
 #endif
+       u64                     sctlr_user;
 };
 
+#define SCTLR_USER_MASK SCTLR_EL1_TCF0_MASK
+
 static inline void arch_thread_struct_whitelist(unsigned long *offset,
                                                unsigned long *size)
 {
@@ -247,6 +249,8 @@ extern void release_thread(struct task_struct *);
 
 unsigned long get_wchan(struct task_struct *p);
 
+void set_task_sctlr_el1(u64 sctlr);
+
 /* Thread switching */
 extern struct task_struct *cpu_switch_to(struct task_struct *prev,
                                         struct task_struct *next);
index 820bad9..8da597e 100644 (file)
@@ -185,26 +185,6 @@ void mte_check_tfsr_el1(void)
 }
 #endif
 
-static void update_sctlr_el1_tcf0(u64 tcf0)
-{
-       /* ISB required for the kernel uaccess routines */
-       sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0);
-       isb();
-}
-
-static void set_sctlr_el1_tcf0(u64 tcf0)
-{
-       /*
-        * mte_thread_switch() checks current->thread.sctlr_tcf0 as an
-        * optimisation. Disable preemption so that it does not see
-        * the variable update before the SCTLR_EL1.TCF0 one.
-        */
-       preempt_disable();
-       current->thread.sctlr_tcf0 = tcf0;
-       update_sctlr_el1_tcf0(tcf0);
-       preempt_enable();
-}
-
 static void update_gcr_el1_excl(u64 excl)
 {
 
@@ -237,31 +217,22 @@ void flush_mte_state(void)
        write_sysreg_s(0, SYS_TFSRE0_EL1);
        clear_thread_flag(TIF_MTE_ASYNC_FAULT);
        /* disable tag checking */
-       set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
+       set_task_sctlr_el1((current->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK) |
+                          SCTLR_EL1_TCF0_NONE);
        /* reset tag generation mask */
        set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK);
 }
 
 void mte_thread_switch(struct task_struct *next)
 {
-       if (!system_supports_mte())
-               return;
-
-       /* avoid expensive SCTLR_EL1 accesses if no change */
-       if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
-               update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
-       else
-               isb();
-
        /*
         * Check if an async tag exception occurred at EL1.
         *
         * Note: On the context switch path we rely on the dsb() present
         * in __switch_to() to guarantee that the indirect writes to TFSR_EL1
         * are synchronized before this point.
-        * isb() above is required for the same reason.
-        *
         */
+       isb();
        mte_check_tfsr_el1();
 }
 
@@ -291,7 +262,7 @@ void mte_suspend_exit(void)
 
 long set_mte_ctrl(struct task_struct *task, unsigned long arg)
 {
-       u64 tcf0;
+       u64 sctlr = task->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK;
        u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
                       SYS_GCR_EL1_EXCL_MASK;
 
@@ -300,23 +271,23 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
 
        switch (arg & PR_MTE_TCF_MASK) {
        case PR_MTE_TCF_NONE:
-               tcf0 = SCTLR_EL1_TCF0_NONE;
+               sctlr |= SCTLR_EL1_TCF0_NONE;
                break;
        case PR_MTE_TCF_SYNC:
-               tcf0 = SCTLR_EL1_TCF0_SYNC;
+               sctlr |= SCTLR_EL1_TCF0_SYNC;
                break;
        case PR_MTE_TCF_ASYNC:
-               tcf0 = SCTLR_EL1_TCF0_ASYNC;
+               sctlr |= SCTLR_EL1_TCF0_ASYNC;
                break;
        default:
                return -EINVAL;
        }
 
        if (task != current) {
-               task->thread.sctlr_tcf0 = tcf0;
+               task->thread.sctlr_user = sctlr;
                task->thread.gcr_user_excl = gcr_excl;
        } else {
-               set_sctlr_el1_tcf0(tcf0);
+               set_task_sctlr_el1(sctlr);
                set_gcr_el1_excl(gcr_excl);
        }
 
@@ -333,7 +304,7 @@ long get_mte_ctrl(struct task_struct *task)
 
        ret = incl << PR_MTE_TAG_SHIFT;
 
-       switch (task->thread.sctlr_tcf0) {
+       switch (task->thread.sctlr_user & SCTLR_EL1_TCF0_MASK) {
        case SCTLR_EL1_TCF0_NONE:
                ret |= PR_MTE_TCF_NONE;
                break;
index 325c83b..de0ab08 100644 (file)
@@ -529,6 +529,27 @@ static void erratum_1418040_thread_switch(struct task_struct *prev,
        write_sysreg(val, cntkctl_el1);
 }
 
+static void update_sctlr_el1(u64 sctlr)
+{
+       sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK, sctlr);
+
+       /* ISB required for the kernel uaccess routines when setting TCF0. */
+       isb();
+}
+
+void set_task_sctlr_el1(u64 sctlr)
+{
+       /*
+        * __switch_to() checks current->thread.sctlr as an
+        * optimisation. Disable preemption so that it does not see
+        * the variable update before the SCTLR_EL1 one.
+        */
+       preempt_disable();
+       current->thread.sctlr_user = sctlr;
+       update_sctlr_el1(sctlr);
+       preempt_enable();
+}
+
 /*
  * Thread switching.
  */
@@ -559,6 +580,9 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
         * registers.
         */
        mte_thread_switch(next);
+       /* avoid expensive SCTLR_EL1 accesses if no change */
+       if (prev->thread.sctlr_user != next->thread.sctlr_user)
+               update_sctlr_el1(next->thread.sctlr_user);
 
        /* the actual thread switch */
        last = cpu_switch_to(prev, next);