arm64: errata: add workaround for cortex-a53 erratum #845719
authorWill Deacon <will.deacon@arm.com>
Fri, 24 Apr 2015 17:30:51 +0000 (10:30 -0700)
committerSasha Levin <sasha.levin@oracle.com>
Mon, 27 Apr 2015 21:13:46 +0000 (17:13 -0400)
When running a compat (AArch32) userspace on Cortex-A53, a load at EL0
from a virtual address that matches the bottom 32 bits of the virtual
address used by a recent load at (AArch64) EL1 might return incorrect
data.

This patch works around the issue by writing to the contextidr_el1
register on the exception return path when returning to a 32-bit task.
This workaround is patched in at runtime based on the MIDR value of the
processor.

Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Tested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Cc: <stable@vger.kernel.org> # v3.18.y
(cherry picked from commit 905e8c5dcaa147163672b06fe9dcb5abaacbc711)
Signed-off-by: Kevin Hilman <khilman@linaro.org>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
arch/arm64/Kconfig
arch/arm64/include/asm/cpufeature.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/entry.S

index db4465b..dc2d66c 100644 (file)
@@ -298,6 +298,27 @@ config ARM64_ERRATUM_832075
 
          If unsure, say Y.
 
+config ARM64_ERRATUM_845719
+       bool "Cortex-A53: 845719: a load might read incorrect data"
+       depends on COMPAT
+       default y
+       help
+         This option adds an alternative code sequence to work around ARM
+         erratum 845719 on Cortex-A53 parts up to r0p4.
+
+         When running a compat (AArch32) userspace on an affected Cortex-A53
+         part, a load at EL0 from a virtual address that matches the bottom 32
+         bits of the virtual address used by a recent load at (AArch64) EL1
+         might return incorrect data.
+
+         The workaround is to write the contextidr_el1 register on exception
+         return to a 32-bit task.
+         Please note that this does not necessarily enable the workaround,
+         as it depends on the alternative framework, which will only patch
+         the kernel if an affected CPU is detected.
+
+         If unsure, say Y.
+
 endmenu
 
 
index 0362f80..c008bae 100644 (file)
@@ -23,8 +23,9 @@
 
 #define ARM64_WORKAROUND_CLEAN_CACHE           0
 #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE   1
+#define ARM64_WORKAROUND_845719                        2
 
-#define NCAPS                                  2
+#define NCAPS                                  3
 
 #ifndef __ASSEMBLY__
 
index 5a5226f..8b32340 100644 (file)
@@ -91,6 +91,14 @@ struct arm64_cpu_capabilities arm64_errata[] = {
                MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 0x12),
        },
 #endif
+#ifdef CONFIG_ARM64_ERRATUM_845719
+       {
+       /* Cortex-A53 r0p[01234] */
+               .desc = "ARM erratum 845719",
+               .capability = ARM64_WORKAROUND_845719,
+               MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
+       },
+#endif
        {
        }
 };
index 726b910..2b0f3d5 100644 (file)
 #include <linux/init.h>
 #include <linux/linkage.h>
 
+#include <asm/alternative-asm.h>
 #include <asm/assembler.h>
 #include <asm/asm-offsets.h>
+#include <asm/cpufeature.h>
 #include <asm/errno.h>
 #include <asm/esr.h>
 #include <asm/thread_info.h>
        .if     \el == 0
        ct_user_enter
        ldr     x23, [sp, #S_SP]                // load return stack pointer
+
+#ifdef CONFIG_ARM64_ERRATUM_845719
+       alternative_insn                                                \
+       "nop",                                                          \
+       "tbz x22, #4, 1f",                                              \
+       ARM64_WORKAROUND_845719
+#ifdef CONFIG_PID_IN_CONTEXTIDR
+       alternative_insn                                                \
+       "nop; nop",                                                     \
+       "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:",         \
+       ARM64_WORKAROUND_845719
+#else
+       alternative_insn                                                \
+       "nop",                                                          \
+       "msr contextidr_el1, xzr; 1:",                                  \
+       ARM64_WORKAROUND_845719
+#endif
+#endif
        .endif
        .if     \ret
        ldr     x1, [sp, #S_X1]                 // preserve x0 (syscall return)