Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 28 Jun 2021 21:04:24 +0000 (14:04 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 28 Jun 2021 21:04:24 +0000 (14:04 -0700)
Pull arm64 updates from Will Deacon:
 "There's a reasonable amount here and the juicy details are all below.

  It's worth noting that the MTE/KASAN changes strayed outside of our
  usual directories due to core mm changes and some associated changes
  to some other architectures; Andrew asked for us to carry these [1]
  rather that take them via the -mm tree.

  Summary:

   - Optimise SVE switching for CPUs with 128-bit implementations.

   - Fix output format from SVE selftest.

   - Add support for versions v1.2 and 1.3 of the SMC calling
     convention.

   - Allow Pointer Authentication to be configured independently for
     kernel and userspace.

   - PMU driver cleanups for managing IRQ affinity and exposing event
     attributes via sysfs.

   - KASAN optimisations for both hardware tagging (MTE) and out-of-line
     software tagging implementations.

   - Relax frame record alignment requirements to facilitate 8-byte
     alignment with KASAN and Clang.

   - Cleanup of page-table definitions and removal of unused memory
     types.

   - Reduction of ARCH_DMA_MINALIGN back to 64 bytes.

   - Refactoring of our instruction decoding routines and addition of
     some missing encodings.

   - Move entry code moved into C and hardened against harmful compiler
     instrumentation.

   - Update booting requirements for the FEAT_HCX feature, added to v8.7
     of the architecture.

   - Fix resume from idle when pNMI is being used.

   - Additional CPU sanity checks for MTE and preparatory changes for
     systems where not all of the CPUs support 32-bit EL0.

   - Update our kernel string routines to the latest Cortex Strings
     implementation.

   - Big cleanup of our cache maintenance routines, which were
     confusingly named and inconsistent in their implementations.

   - Tweak linker flags so that GDB can understand vmlinux when using
     RELR relocations.

   - Boot path cleanups to enable early initialisation of per-cpu
     operations needed by KCSAN.

   - Non-critical fixes and miscellaneous cleanup"

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (150 commits)
  arm64: tlb: fix the TTL value of tlb_get_level
  arm64: Restrict undef hook for cpufeature registers
  arm64/mm: Rename ARM64_SWAPPER_USES_SECTION_MAPS
  arm64: insn: avoid circular include dependency
  arm64: smp: Bump debugging information print down to KERN_DEBUG
  drivers/perf: fix the missed ida_simple_remove() in ddr_perf_probe()
  perf/arm-cmn: Fix invalid pointer when access dtc object sharing the same IRQ number
  arm64: suspend: Use cpuidle context helpers in cpu_suspend()
  PSCI: Use cpuidle context helpers in psci_cpu_suspend_enter()
  arm64: Convert cpu_do_idle() to using cpuidle context helpers
  arm64: Add cpuidle context save/restore helpers
  arm64: head: fix code comments in set_cpu_boot_mode_flag
  arm64: mm: drop unused __pa(__idmap_text_start)
  arm64: mm: fix the count comments in compute_indices
  arm64/mm: Fix ttbr0 values stored in struct thread_info for software-pan
  arm64: mm: Pass original fault address to handle_mm_fault()
  arm64/mm: Drop SECTION_[SHIFT|SIZE|MASK]
  arm64/mm: Use CONT_PMD_SHIFT for ARM64_MEMSTART_SHIFT
  arm64/mm: Drop SWAPPER_INIT_MAP_SIZE
  arm64: Conditionally configure PTR_AUTH key of the kernel.
  ...

158 files changed:
Documentation/arm64/booting.rst
Makefile
arch/alpha/include/asm/page.h
arch/arm/include/asm/cpuidle.h
arch/arm/kernel/perf_event_v7.c
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/include/asm/alternative-macros.h
arch/arm64/include/asm/arch_gicv3.h
arch/arm64/include/asm/asm-prototypes.h
arch/arm64/include/asm/asm_pointer_auth.h
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/cache.h
arch/arm64/include/asm/cacheflush.h
arch/arm64/include/asm/cpu.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/cpuidle.h
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/exception.h
arch/arm64/include/asm/fpsimd.h
arch/arm64/include/asm/fpsimdmacros.h
arch/arm64/include/asm/insn-def.h [new file with mode: 0644]
arch/arm64/include/asm/insn.h
arch/arm64/include/asm/kernel-pgtable.h
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/linkage.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/module.lds.h
arch/arm64/include/asm/mte-kasan.h
arch/arm64/include/asm/mte.h
arch/arm64/include/asm/page.h
arch/arm64/include/asm/patching.h [new file with mode: 0644]
arch/arm64/include/asm/perf_event.h
arch/arm64/include/asm/pgtable-hwdef.h
arch/arm64/include/asm/pgtable-prot.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/pointer_auth.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/scs.h
arch/arm64/include/asm/sdei.h
arch/arm64/include/asm/smp.h
arch/arm64/include/asm/stacktrace.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/tlb.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/acpi.c
arch/arm64/kernel/alternative.c
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/efi-entry.S
arch/arm64/kernel/entry-common.c
arch/arm64/kernel/entry-fpsimd.S
arch/arm64/kernel/entry.S
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/ftrace.c
arch/arm64/kernel/head.S
arch/arm64/kernel/hibernate-asm.S
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/idle.c [new file with mode: 0644]
arch/arm64/kernel/idreg-override.c
arch/arm64/kernel/image-vars.h
arch/arm64/kernel/jump_label.c
arch/arm64/kernel/kaslr.c
arch/arm64/kernel/kgdb.c
arch/arm64/kernel/machine_kexec.c
arch/arm64/kernel/patching.c [new file with mode: 0644]
arch/arm64/kernel/perf_callchain.c
arch/arm64/kernel/perf_event.c
arch/arm64/kernel/probes/kprobes.c
arch/arm64/kernel/probes/simulate-insn.c
arch/arm64/kernel/probes/uprobes.c
arch/arm64/kernel/process.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/sdei.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/signal.c
arch/arm64/kernel/smccc-call.S
arch/arm64/kernel/smp.c
arch/arm64/kernel/smp_spin_table.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kernel/suspend.c
arch/arm64/kernel/sys_compat.c
arch/arm64/kernel/traps.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/nvhe/cache.S
arch/arm64/kvm/hyp/nvhe/setup.c
arch/arm64/kvm/hyp/nvhe/tlb.c
arch/arm64/kvm/hyp/pgtable.c
arch/arm64/lib/Makefile
arch/arm64/lib/clear_user.S
arch/arm64/lib/insn.c [moved from arch/arm64/kernel/insn.c with 86% similarity]
arch/arm64/lib/kasan_sw_tags.S [new file with mode: 0644]
arch/arm64/lib/memchr.S
arch/arm64/lib/memcmp.S
arch/arm64/lib/memcpy.S
arch/arm64/lib/memmove.S [deleted file]
arch/arm64/lib/mte.S
arch/arm64/lib/strcmp.S
arch/arm64/lib/strlen.S
arch/arm64/lib/strncmp.S
arch/arm64/lib/uaccess_flushcache.c
arch/arm64/mm/cache.S
arch/arm64/mm/context.c
arch/arm64/mm/fault.c
arch/arm64/mm/flush.c
arch/arm64/mm/init.c
arch/arm64/mm/mmu.c
arch/arm64/mm/proc.S
arch/arm64/mm/ptdump.c
arch/arm64/net/bpf_jit_comp.c
arch/arm64/tools/cpucaps
arch/ia64/include/asm/page.h
arch/m68k/include/asm/page_no.h
arch/s390/include/asm/page.h
arch/x86/include/asm/page.h
drivers/firmware/psci/psci.c
drivers/firmware/smccc/smccc.c
drivers/misc/lkdtm/bugs.c
drivers/perf/arm-cci.c
drivers/perf/arm-ccn.c
drivers/perf/arm-cmn.c
drivers/perf/arm_dmc620_pmu.c
drivers/perf/arm_dsu_pmu.c
drivers/perf/arm_pmu.c
drivers/perf/arm_smmuv3_pmu.c
drivers/perf/arm_spe_pmu.c
drivers/perf/fsl_imx8_ddr_perf.c
drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
drivers/perf/hisilicon/hisi_uncore_pmu.c
drivers/perf/hisilicon/hisi_uncore_pmu.h
drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
drivers/perf/qcom_l2_pmu.c
drivers/perf/qcom_l3_pmu.c
drivers/perf/thunderx2_pmu.c
drivers/perf/xgene_pmu.c
include/linux/arm-smccc.h
include/linux/gfp.h
include/linux/highmem.h
include/linux/interrupt.h
include/linux/kasan.h
include/linux/page-flags.h
include/linux/perf_event.h
include/trace/events/mmflags.h
kernel/irq/manage.c
mm/kasan/common.c
mm/kasan/hw_tags.c
mm/kasan/sw_tags.c
mm/mempool.c
mm/page_alloc.c
scripts/Makefile.kasan
scripts/tools-support-relr.sh
tools/testing/selftests/arm64/fp/sve-probe-vls.c

index 18b8cc1..a9192e7 100644 (file)
@@ -277,6 +277,12 @@ Before jumping into the kernel, the following conditions must be met:
 
     - SCR_EL3.FGTEn (bit 27) must be initialised to 0b1.
 
+  For CPUs with support for HCRX_EL2 (FEAT_HCX) present:
+
+  - If EL3 is present and the kernel is entered at EL2:
+
+    - SCR_EL3.HXEn (bit 38) must be initialised to 0b1.
+
   For CPUs with Advanced SIMD and floating point support:
 
   - If EL3 is present:
index 0565cae..88888ff 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1039,7 +1039,7 @@ LDFLAGS_vmlinux   += $(call ld-option, -X,)
 endif
 
 ifeq ($(CONFIG_RELR),y)
-LDFLAGS_vmlinux        += --pack-dyn-relocs=relr
+LDFLAGS_vmlinux        += --pack-dyn-relocs=relr --use-android-relr-tags
 endif
 
 # We never want expected sections to be placed heuristically by the
index 268f99b..18f48a6 100644 (file)
@@ -17,9 +17,9 @@
 extern void clear_page(void *page);
 #define clear_user_page(page, vaddr, pg)       clear_page(page)
 
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
-       alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vmaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
+       alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vmaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
 
 extern void copy_page(void * _to, void * _from);
 #define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
index bc4ffa7..397be5e 100644 (file)
@@ -50,4 +50,9 @@ extern int arm_cpuidle_suspend(int index);
 
 extern int arm_cpuidle_init(int cpu);
 
+struct arm_cpuidle_irq_context { };
+
+#define arm_cpuidle_save_irq_context(c)                (void)c
+#define arm_cpuidle_restore_irq_context(c)     (void)c
+
 #endif
index 2924d79..eb21904 100644 (file)
@@ -773,10 +773,10 @@ static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
                pr_err("CPU%u writing wrong counter %d\n",
                        smp_processor_id(), idx);
        } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
-               asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
+               asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value));
        } else {
                armv7_pmnc_select_counter(idx);
-               asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
+               asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value));
        }
 }
 
index 9f1d856..dabe9b8 100644 (file)
@@ -1481,12 +1481,6 @@ menu "ARMv8.3 architectural features"
 config ARM64_PTR_AUTH
        bool "Enable support for pointer authentication"
        default y
-       depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
-       # Modern compilers insert a .note.gnu.property section note for PAC
-       # which is only understood by binutils starting with version 2.33.1.
-       depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100)
-       depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
-       depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
        help
          Pointer authentication (part of the ARMv8.3 Extensions) provides
          instructions for signing and authenticating pointers against secret
@@ -1498,13 +1492,6 @@ config ARM64_PTR_AUTH
          for each process at exec() time, with these keys being
          context-switched along with the process.
 
-         If the compiler supports the -mbranch-protection or
-         -msign-return-address flag (e.g. GCC 7 or later), then this option
-         will also cause the kernel itself to be compiled with return address
-         protection. In this case, and if the target hardware is known to
-         support pointer authentication, then CONFIG_STACKPROTECTOR can be
-         disabled with minimal loss of protection.
-
          The feature is detected at runtime. If the feature is not present in
          hardware it will not be advertised to userspace/KVM guest nor will it
          be enabled.
@@ -1515,6 +1502,24 @@ config ARM64_PTR_AUTH
          but with the feature disabled. On such a system, this option should
          not be selected.
 
+config ARM64_PTR_AUTH_KERNEL
+       bool "Use pointer authentication for kernel"
+       default y
+       depends on ARM64_PTR_AUTH
+       depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
+       # Modern compilers insert a .note.gnu.property section note for PAC
+       # which is only understood by binutils starting with version 2.33.1.
+       depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100)
+       depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
+       depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
+       help
+         If the compiler supports the -mbranch-protection or
+         -msign-return-address flag (e.g. GCC 7 or later), then this option
+         will cause the kernel itself to be compiled with return address
+         protection. In this case, and if the target hardware is known to
+         support pointer authentication, then CONFIG_STACKPROTECTOR can be
+         disabled with minimal loss of protection.
+
          This feature works with FUNCTION_GRAPH_TRACER option only if
          DYNAMIC_FTRACE_WITH_REGS is enabled.
 
@@ -1606,7 +1611,7 @@ config ARM64_BTI_KERNEL
        bool "Use Branch Target Identification for kernel"
        default y
        depends on ARM64_BTI
-       depends on ARM64_PTR_AUTH
+       depends on ARM64_PTR_AUTH_KERNEL
        depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI
        # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697
        depends on !CC_IS_GCC || GCC_VERSION >= 100100
index b52481f..3b5b1c4 100644 (file)
@@ -70,7 +70,7 @@ endif
 # off, this will be overridden if we are using branch protection.
 branch-prot-flags-y += $(call cc-option,-mbranch-protection=none)
 
-ifeq ($(CONFIG_ARM64_PTR_AUTH),y)
+ifeq ($(CONFIG_ARM64_PTR_AUTH_KERNEL),y)
 branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all
 # We enable additional protection for leaf functions as there is some
 # narrow potential for ROP protection benefits and no substantial
index 8a078fc..7e157ab 100644 (file)
@@ -3,12 +3,10 @@
 #define __ASM_ALTERNATIVE_MACROS_H
 
 #include <asm/cpucaps.h>
+#include <asm/insn-def.h>
 
 #define ARM64_CB_PATCH ARM64_NCAPS
 
-/* A64 instructions are always 32 bits. */
-#define        AARCH64_INSN_SIZE               4
-
 #ifndef __ASSEMBLY__
 
 #include <linux/stringify.h>
@@ -197,11 +195,6 @@ alternative_endif
 #define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...)  \
        alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
 
-.macro user_alt, label, oldinstr, newinstr, cond
-9999:  alternative_insn "\oldinstr", "\newinstr", \cond
-       _asm_extable 9999b, \label
-.endm
-
 #endif  /*  __ASSEMBLY__  */
 
 /*
index 934b9be..4ad22c3 100644 (file)
@@ -124,7 +124,8 @@ static inline u32 gic_read_rpr(void)
 #define gic_read_lpir(c)               readq_relaxed(c)
 #define gic_write_lpir(v, c)           writeq_relaxed(v, c)
 
-#define gic_flush_dcache_to_poc(a,l)   __flush_dcache_area((a), (l))
+#define gic_flush_dcache_to_poc(a,l)   \
+       dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
 
 #define gits_read_baser(c)             readq_relaxed(c)
 #define gits_write_baser(v, c)         writeq_relaxed(v, c)
index 1c9a3a0..ec1d965 100644 (file)
@@ -23,4 +23,10 @@ long long __ashlti3(long long a, int b);
 long long __ashrti3(long long a, int b);
 long long __lshrti3(long long a, int b);
 
+/*
+ * This function uses a custom calling convention and cannot be called from C so
+ * this prototype is not entirely accurate.
+ */
+void __hwasan_tag_mismatch(unsigned long addr, unsigned long access_info);
+
 #endif /* __ASM_PROTOTYPES_H */
index 8ca2dc0..f1bba5f 100644 (file)
@@ -7,19 +7,7 @@
 #include <asm/cpufeature.h>
 #include <asm/sysreg.h>
 
-#ifdef CONFIG_ARM64_PTR_AUTH
-/*
- * thread.keys_user.ap* as offset exceeds the #imm offset range
- * so use the base value of ldp as thread.keys_user and offset as
- * thread.keys_user.ap*.
- */
-       .macro __ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
-       mov     \tmp1, #THREAD_KEYS_USER
-       add     \tmp1, \tsk, \tmp1
-       ldp     \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA]
-       msr_s   SYS_APIAKEYLO_EL1, \tmp2
-       msr_s   SYS_APIAKEYHI_EL1, \tmp3
-       .endm
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
 
        .macro __ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
        mov     \tmp1, #THREAD_KEYS_KERNEL
@@ -42,6 +30,33 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
 alternative_else_nop_endif
        .endm
 
+#else /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
+       .macro __ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
+       .endm
+
+       .macro ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
+       .endm
+
+       .macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3
+       .endm
+
+#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+/*
+ * thread.keys_user.ap* as offset exceeds the #imm offset range
+ * so use the base value of ldp as thread.keys_user and offset as
+ * thread.keys_user.ap*.
+ */
+       .macro __ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
+       mov     \tmp1, #THREAD_KEYS_USER
+       add     \tmp1, \tsk, \tmp1
+       ldp     \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA]
+       msr_s   SYS_APIAKEYLO_EL1, \tmp2
+       msr_s   SYS_APIAKEYHI_EL1, \tmp3
+       .endm
+
        .macro __ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3
        mrs     \tmp1, id_aa64isar1_el1
        ubfx    \tmp1, \tmp1, #ID_AA64ISAR1_APA_SHIFT, #8
@@ -64,17 +79,11 @@ alternative_else_nop_endif
 .Lno_addr_auth\@:
        .endm
 
-#else /* CONFIG_ARM64_PTR_AUTH */
+#else /* !CONFIG_ARM64_PTR_AUTH */
 
        .macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
        .endm
 
-       .macro ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
-       .endm
-
-       .macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3
-       .endm
-
 #endif /* CONFIG_ARM64_PTR_AUTH */
 
 #endif /* __ASM_ASM_POINTER_AUTH_H */
index 8418c1b..89faca0 100644 (file)
@@ -130,15 +130,27 @@ alternative_endif
        .endm
 
 /*
- * Emit an entry into the exception table
+ * Create an exception table entry for `insn`, which will branch to `fixup`
+ * when an unhandled fault is taken.
  */
-       .macro          _asm_extable, from, to
+       .macro          _asm_extable, insn, fixup
        .pushsection    __ex_table, "a"
        .align          3
-       .long           (\from - .), (\to - .)
+       .long           (\insn - .), (\fixup - .)
        .popsection
        .endm
 
+/*
+ * Create an exception table entry for `insn` if `fixup` is provided. Otherwise
+ * do nothing.
+ */
+       .macro          _cond_extable, insn, fixup
+       .ifnc           \fixup,
+       _asm_extable    \insn, \fixup
+       .endif
+       .endm
+
+
 #define USER(l, x...)                          \
 9999:  x;                                      \
        _asm_extable    9999b, l
@@ -232,17 +244,25 @@ lr        .req    x30             // link register
         * @dst: destination register
         */
 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
-       .macro  this_cpu_offset, dst
+       .macro  get_this_cpu_offset, dst
        mrs     \dst, tpidr_el2
        .endm
 #else
-       .macro  this_cpu_offset, dst
+       .macro  get_this_cpu_offset, dst
 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
        mrs     \dst, tpidr_el1
 alternative_else
        mrs     \dst, tpidr_el2
 alternative_endif
        .endm
+
+       .macro  set_this_cpu_offset, src
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+       msr     tpidr_el1, \src
+alternative_else
+       msr     tpidr_el2, \src
+alternative_endif
+       .endm
 #endif
 
        /*
@@ -253,7 +273,7 @@ alternative_endif
        .macro adr_this_cpu, dst, sym, tmp
        adrp    \tmp, \sym
        add     \dst, \tmp, #:lo12:\sym
-       this_cpu_offset \tmp
+       get_this_cpu_offset \tmp
        add     \dst, \dst, \tmp
        .endm
 
@@ -264,7 +284,7 @@ alternative_endif
         */
        .macro ldr_this_cpu dst, sym, tmp
        adr_l   \dst, \sym
-       this_cpu_offset \tmp
+       get_this_cpu_offset \tmp
        ldr     \dst, [\dst, \tmp]
        .endm
 
@@ -375,51 +395,53 @@ alternative_cb_end
        bfi     \tcr, \tmp0, \pos, #3
        .endm
 
+       .macro __dcache_op_workaround_clean_cache, op, addr
+alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
+       dc      \op, \addr
+alternative_else
+       dc      civac, \addr
+alternative_endif
+       .endm
+
 /*
  * Macro to perform a data cache maintenance for the interval
- * [kaddr, kaddr + size)
+ * [start, end)
  *
  *     op:             operation passed to dc instruction
  *     domain:         domain used in dsb instruciton
- *     kaddr:          starting virtual address of the region
- *     size:           size of the region
- *     Corrupts:       kaddr, size, tmp1, tmp2
+ *     start:          starting virtual address of the region
+ *     end:            end virtual address of the region
+ *     fixup:          optional label to branch to on user fault
+ *     Corrupts:       start, end, tmp1, tmp2
  */
-       .macro __dcache_op_workaround_clean_cache, op, kaddr
-alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
-       dc      \op, \kaddr
-alternative_else
-       dc      civac, \kaddr
-alternative_endif
-       .endm
-
-       .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
+       .macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
        dcache_line_size \tmp1, \tmp2
-       add     \size, \kaddr, \size
        sub     \tmp2, \tmp1, #1
-       bic     \kaddr, \kaddr, \tmp2
-9998:
+       bic     \start, \start, \tmp2
+.Ldcache_op\@:
        .ifc    \op, cvau
-       __dcache_op_workaround_clean_cache \op, \kaddr
+       __dcache_op_workaround_clean_cache \op, \start
        .else
        .ifc    \op, cvac
-       __dcache_op_workaround_clean_cache \op, \kaddr
+       __dcache_op_workaround_clean_cache \op, \start
        .else
        .ifc    \op, cvap
-       sys     3, c7, c12, 1, \kaddr   // dc cvap
+       sys     3, c7, c12, 1, \start   // dc cvap
        .else
        .ifc    \op, cvadp
-       sys     3, c7, c13, 1, \kaddr   // dc cvadp
+       sys     3, c7, c13, 1, \start   // dc cvadp
        .else
-       dc      \op, \kaddr
+       dc      \op, \start
        .endif
        .endif
        .endif
        .endif
-       add     \kaddr, \kaddr, \tmp1
-       cmp     \kaddr, \size
-       b.lo    9998b
+       add     \start, \start, \tmp1
+       cmp     \start, \end
+       b.lo    .Ldcache_op\@
        dsb     \domain
+
+       _cond_extable .Ldcache_op\@, \fixup
        .endm
 
 /*
@@ -427,20 +449,22 @@ alternative_endif
  * [start, end)
  *
  *     start, end:     virtual addresses describing the region
- *     label:          A label to branch to on user fault.
+ *     fixup:          optional label to branch to on user fault
  *     Corrupts:       tmp1, tmp2
  */
-       .macro invalidate_icache_by_line start, end, tmp1, tmp2, label
+       .macro invalidate_icache_by_line start, end, tmp1, tmp2, fixup
        icache_line_size \tmp1, \tmp2
        sub     \tmp2, \tmp1, #1
        bic     \tmp2, \start, \tmp2
-9997:
-USER(\label, ic        ivau, \tmp2)                    // invalidate I line PoU
+.Licache_op\@:
+       ic      ivau, \tmp2                     // invalidate I line PoU
        add     \tmp2, \tmp2, \tmp1
        cmp     \tmp2, \end
-       b.lo    9997b
+       b.lo    .Licache_op\@
        dsb     ish
        isb
+
+       _cond_extable .Licache_op\@, \fixup
        .endm
 
 /*
@@ -745,7 +769,7 @@ USER(\label, ic     ivau, \tmp2)                    // invalidate I line PoU
        cbz             \tmp, \lbl
 #endif
        adr_l           \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
-       this_cpu_offset \tmp2
+       get_this_cpu_offset     \tmp2
        ldr             w\tmp, [\tmp, \tmp2]
        cbnz            w\tmp, \lbl     // yield on pending softirq in task context
 .Lnoyield_\@:
index a074459..a9c0716 100644 (file)
@@ -47,7 +47,7 @@
  * cache before the transfer is done, causing old data to be seen by
  * the CPU.
  */
-#define ARCH_DMA_MINALIGN      (128)
+#define ARCH_DMA_MINALIGN      L1_CACHE_BYTES
 
 #ifdef CONFIG_KASAN_SW_TAGS
 #define ARCH_SLAB_MINALIGN     (1ULL << KASAN_SHADOW_SCALE_SHIFT)
index 52e5c16..543c997 100644 (file)
  *     the implementation assumes non-aliasing VIPT D-cache and (aliasing)
  *     VIPT I-cache.
  *
- *     flush_icache_range(start, end)
+ *     All functions below apply to the interval [start, end)
+ *             - start  - virtual start address (inclusive)
+ *             - end    - virtual end address (exclusive)
  *
- *             Ensure coherency between the I-cache and the D-cache in the
- *             region described by start, end.
- *             - start  - virtual start address
- *             - end    - virtual end address
+ *     caches_clean_inval_pou(start, end)
  *
- *     invalidate_icache_range(start, end)
+ *             Ensure coherency between the I-cache and the D-cache region to
+ *             the Point of Unification.
  *
- *             Invalidate the I-cache in the region described by start, end.
- *             - start  - virtual start address
- *             - end    - virtual end address
+ *     caches_clean_inval_user_pou(start, end)
  *
- *     __flush_cache_user_range(start, end)
+ *             Ensure coherency between the I-cache and the D-cache region to
+ *             the Point of Unification.
+ *             Use only if the region might access user memory.
  *
- *             Ensure coherency between the I-cache and the D-cache in the
- *             region described by start, end.
- *             - start  - virtual start address
- *             - end    - virtual end address
+ *     icache_inval_pou(start, end)
  *
- *     __flush_dcache_area(kaddr, size)
+ *             Invalidate I-cache region to the Point of Unification.
  *
- *             Ensure that the data held in page is written back.
- *             - kaddr  - page address
- *             - size   - region size
+ *     dcache_clean_inval_poc(start, end)
+ *
+ *             Clean and invalidate D-cache region to the Point of Coherency.
+ *
+ *     dcache_inval_poc(start, end)
+ *
+ *             Invalidate D-cache region to the Point of Coherency.
+ *
+ *     dcache_clean_poc(start, end)
+ *
+ *             Clean D-cache region to the Point of Coherency.
+ *
+ *     dcache_clean_pop(start, end)
+ *
+ *             Clean D-cache region to the Point of Persistence.
+ *
+ *     dcache_clean_pou(start, end)
+ *
+ *             Clean D-cache region to the Point of Unification.
  */
-extern void __flush_icache_range(unsigned long start, unsigned long end);
-extern int  invalidate_icache_range(unsigned long start, unsigned long end);
-extern void __flush_dcache_area(void *addr, size_t len);
-extern void __inval_dcache_area(void *addr, size_t len);
-extern void __clean_dcache_area_poc(void *addr, size_t len);
-extern void __clean_dcache_area_pop(void *addr, size_t len);
-extern void __clean_dcache_area_pou(void *addr, size_t len);
-extern long __flush_cache_user_range(unsigned long start, unsigned long end);
-extern void sync_icache_aliases(void *kaddr, unsigned long len);
+extern void caches_clean_inval_pou(unsigned long start, unsigned long end);
+extern void icache_inval_pou(unsigned long start, unsigned long end);
+extern void dcache_clean_inval_poc(unsigned long start, unsigned long end);
+extern void dcache_inval_poc(unsigned long start, unsigned long end);
+extern void dcache_clean_poc(unsigned long start, unsigned long end);
+extern void dcache_clean_pop(unsigned long start, unsigned long end);
+extern void dcache_clean_pou(unsigned long start, unsigned long end);
+extern long caches_clean_inval_user_pou(unsigned long start, unsigned long end);
+extern void sync_icache_aliases(unsigned long start, unsigned long end);
 
 static inline void flush_icache_range(unsigned long start, unsigned long end)
 {
-       __flush_icache_range(start, end);
+       caches_clean_inval_pou(start, end);
 
        /*
         * IPI all online CPUs so that they undergo a context synchronization
@@ -122,7 +135,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 extern void flush_dcache_page(struct page *);
 
-static __always_inline void __flush_icache_all(void)
+static __always_inline void icache_inval_all_pou(void)
 {
        if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
                return;
index 7faae6f..0f6d16f 100644 (file)
 /*
  * Records attributes of an individual CPU.
  */
-struct cpuinfo_arm64 {
-       struct cpu      cpu;
-       struct kobject  kobj;
-       u32             reg_ctr;
-       u32             reg_cntfrq;
-       u32             reg_dczid;
-       u32             reg_midr;
-       u32             reg_revidr;
-
-       u64             reg_id_aa64dfr0;
-       u64             reg_id_aa64dfr1;
-       u64             reg_id_aa64isar0;
-       u64             reg_id_aa64isar1;
-       u64             reg_id_aa64mmfr0;
-       u64             reg_id_aa64mmfr1;
-       u64             reg_id_aa64mmfr2;
-       u64             reg_id_aa64pfr0;
-       u64             reg_id_aa64pfr1;
-       u64             reg_id_aa64zfr0;
-
+struct cpuinfo_32bit {
        u32             reg_id_dfr0;
        u32             reg_id_dfr1;
        u32             reg_id_isar0;
@@ -54,6 +35,30 @@ struct cpuinfo_arm64 {
        u32             reg_mvfr0;
        u32             reg_mvfr1;
        u32             reg_mvfr2;
+};
+
+struct cpuinfo_arm64 {
+       struct cpu      cpu;
+       struct kobject  kobj;
+       u64             reg_ctr;
+       u64             reg_cntfrq;
+       u64             reg_dczid;
+       u64             reg_midr;
+       u64             reg_revidr;
+       u64             reg_gmid;
+
+       u64             reg_id_aa64dfr0;
+       u64             reg_id_aa64dfr1;
+       u64             reg_id_aa64isar0;
+       u64             reg_id_aa64isar1;
+       u64             reg_id_aa64mmfr0;
+       u64             reg_id_aa64mmfr1;
+       u64             reg_id_aa64mmfr2;
+       u64             reg_id_aa64pfr0;
+       u64             reg_id_aa64pfr1;
+       u64             reg_id_aa64zfr0;
+
+       struct cpuinfo_32bit    aarch32;
 
        /* pseudo-ZCR for recording maximum ZCR_EL1 LEN value: */
        u64             reg_zcr;
index 338840c..9bb9d11 100644 (file)
@@ -619,6 +619,13 @@ static inline bool id_aa64pfr0_sve(u64 pfr0)
        return val > 0;
 }
 
+static inline bool id_aa64pfr1_mte(u64 pfr1)
+{
+       u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_MTE_SHIFT);
+
+       return val >= ID_AA64PFR1_MTE;
+}
+
 void __init setup_cpu_features(void);
 void check_local_cpu_capabilities(void);
 
@@ -630,9 +637,15 @@ static inline bool cpu_supports_mixed_endian_el0(void)
        return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
 }
 
+const struct cpumask *system_32bit_el0_cpumask(void);
+DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
+
 static inline bool system_supports_32bit_el0(void)
 {
-       return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
+       u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
+       return static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
+              id_aa64pfr0_32bit_el0(pfr0);
 }
 
 static inline bool system_supports_4kb_granule(void)
index 3c5ddb4..14a19d1 100644 (file)
@@ -18,4 +18,39 @@ static inline int arm_cpuidle_suspend(int index)
        return -EOPNOTSUPP;
 }
 #endif
+
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+#include <asm/arch_gicv3.h>
+
+struct arm_cpuidle_irq_context {
+       unsigned long pmr;
+       unsigned long daif_bits;
+};
+
+#define arm_cpuidle_save_irq_context(__c)                              \
+       do {                                                            \
+               struct arm_cpuidle_irq_context *c = __c;                \
+               if (system_uses_irq_prio_masking()) {                   \
+                       c->daif_bits = read_sysreg(daif);               \
+                       write_sysreg(c->daif_bits | PSR_I_BIT | PSR_F_BIT, \
+                                    daif);                             \
+                       c->pmr = gic_read_pmr();                        \
+                       gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); \
+               }                                                       \
+       } while (0)
+
+#define arm_cpuidle_restore_irq_context(__c)                           \
+       do {                                                            \
+               struct arm_cpuidle_irq_context *c = __c;                \
+               if (system_uses_irq_prio_masking()) {                   \
+                       gic_write_pmr(c->pmr);                          \
+                       write_sysreg(c->daif_bits, daif);               \
+               }                                                       \
+       } while (0)
+#else
+struct arm_cpuidle_irq_context { };
+
+#define arm_cpuidle_save_irq_context(c)                (void)c
+#define arm_cpuidle_restore_irq_context(c)     (void)c
+#endif
 #endif
index 3578aba..1bed37e 100644 (file)
@@ -137,7 +137,7 @@ void efi_virtmap_unload(void);
 
 static inline void efi_capsule_flush_cache_range(void *addr, int size)
 {
-       __flush_dcache_area(addr, size);
+       dcache_clean_inval_poc((unsigned long)addr, (unsigned long)addr + size);
 }
 
 #endif /* _ASM_EFI_H */
index 6546158..4afbc45 100644 (file)
@@ -31,20 +31,35 @@ static inline u32 disr_to_esr(u64 disr)
        return esr;
 }
 
-asmlinkage void el1_sync_handler(struct pt_regs *regs);
-asmlinkage void el0_sync_handler(struct pt_regs *regs);
-asmlinkage void el0_sync_compat_handler(struct pt_regs *regs);
+asmlinkage void handle_bad_stack(struct pt_regs *regs);
 
-asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs);
-asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs);
+asmlinkage void el1t_64_sync_handler(struct pt_regs *regs);
+asmlinkage void el1t_64_irq_handler(struct pt_regs *regs);
+asmlinkage void el1t_64_fiq_handler(struct pt_regs *regs);
+asmlinkage void el1t_64_error_handler(struct pt_regs *regs);
+
+asmlinkage void el1h_64_sync_handler(struct pt_regs *regs);
+asmlinkage void el1h_64_irq_handler(struct pt_regs *regs);
+asmlinkage void el1h_64_fiq_handler(struct pt_regs *regs);
+asmlinkage void el1h_64_error_handler(struct pt_regs *regs);
+
+asmlinkage void el0t_64_sync_handler(struct pt_regs *regs);
+asmlinkage void el0t_64_irq_handler(struct pt_regs *regs);
+asmlinkage void el0t_64_fiq_handler(struct pt_regs *regs);
+asmlinkage void el0t_64_error_handler(struct pt_regs *regs);
+
+asmlinkage void el0t_32_sync_handler(struct pt_regs *regs);
+asmlinkage void el0t_32_irq_handler(struct pt_regs *regs);
+asmlinkage void el0t_32_fiq_handler(struct pt_regs *regs);
+asmlinkage void el0t_32_error_handler(struct pt_regs *regs);
+
+asmlinkage void call_on_irq_stack(struct pt_regs *regs,
+                                 void (*func)(struct pt_regs *));
 asmlinkage void enter_from_user_mode(void);
 asmlinkage void exit_to_user_mode(void);
-void arm64_enter_nmi(struct pt_regs *regs);
-void arm64_exit_nmi(struct pt_regs *regs);
 void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs);
 void do_undefinstr(struct pt_regs *regs);
 void do_bti(struct pt_regs *regs);
-asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr);
 void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
                        struct pt_regs *regs);
 void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs);
@@ -57,4 +72,7 @@ void do_cp15instr(unsigned int esr, struct pt_regs *regs);
 void do_el0_svc(struct pt_regs *regs);
 void do_el0_svc_compat(struct pt_regs *regs);
 void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr);
+void do_serror(struct pt_regs *regs, unsigned int esr);
+
+void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far);
 #endif /* __ASM_EXCEPTION_H */
index 2599504..c072161 100644 (file)
@@ -69,7 +69,7 @@ static inline void *sve_pffr(struct thread_struct *thread)
 extern void sve_save_state(void *state, u32 *pfpsr);
 extern void sve_load_state(void const *state, u32 const *pfpsr,
                           unsigned long vq_minus_1);
-extern void sve_flush_live(void);
+extern void sve_flush_live(unsigned long vq_minus_1);
 extern void sve_load_from_fpsimd_state(struct user_fpsimd_state const *state,
                                       unsigned long vq_minus_1);
 extern unsigned int sve_get_vl(void);
index a256399..0592044 100644 (file)
        mov     v\nz\().16b, v\nz\().16b
 .endm
 
-.macro sve_flush
+.macro sve_flush_z
  _for n, 0, 31, _sve_flush_z   \n
+.endm
+.macro sve_flush_p_ffr
  _for n, 0, 15, _sve_pfalse    \n
                _sve_wrffr      0
 .endm
diff --git a/arch/arm64/include/asm/insn-def.h b/arch/arm64/include/asm/insn-def.h
new file mode 100644 (file)
index 0000000..2c075f6
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_INSN_DEF_H
+#define __ASM_INSN_DEF_H
+
+/* A64 instructions are always 32 bits. */
+#define        AARCH64_INSN_SIZE               4
+
+#endif /* __ASM_INSN_DEF_H */
index 4ebb9c0..6b776c8 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/build_bug.h>
 #include <linux/types.h>
 
-#include <asm/alternative.h>
+#include <asm/insn-def.h>
 
 #ifndef __ASSEMBLY__
 /*
@@ -30,6 +30,7 @@
  */
 enum aarch64_insn_encoding_class {
        AARCH64_INSN_CLS_UNKNOWN,       /* UNALLOCATED */
+       AARCH64_INSN_CLS_SVE,           /* SVE instructions */
        AARCH64_INSN_CLS_DP_IMM,        /* Data processing - immediate */
        AARCH64_INSN_CLS_DP_REG,        /* Data processing - register */
        AARCH64_INSN_CLS_DP_FPSIMD,     /* Data processing - SIMD and FP */
@@ -294,6 +295,12 @@ __AARCH64_INSN_FUNCS(adr,  0x9F000000, 0x10000000)
 __AARCH64_INSN_FUNCS(adrp,     0x9F000000, 0x90000000)
 __AARCH64_INSN_FUNCS(prfm,     0x3FC00000, 0x39800000)
 __AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000)
+__AARCH64_INSN_FUNCS(store_imm,        0x3FC00000, 0x39000000)
+__AARCH64_INSN_FUNCS(load_imm, 0x3FC00000, 0x39400000)
+__AARCH64_INSN_FUNCS(store_pre,        0x3FE00C00, 0x38000C00)
+__AARCH64_INSN_FUNCS(load_pre, 0x3FE00C00, 0x38400C00)
+__AARCH64_INSN_FUNCS(store_post,       0x3FE00C00, 0x38000400)
+__AARCH64_INSN_FUNCS(load_post,        0x3FE00C00, 0x38400400)
 __AARCH64_INSN_FUNCS(str_reg,  0x3FE0EC00, 0x38206800)
 __AARCH64_INSN_FUNCS(ldadd,    0x3F20FC00, 0x38200000)
 __AARCH64_INSN_FUNCS(ldr_reg,  0x3FE0EC00, 0x38606800)
@@ -302,6 +309,8 @@ __AARCH64_INSN_FUNCS(ldrsw_lit,     0xFF000000, 0x98000000)
 __AARCH64_INSN_FUNCS(exclusive,        0x3F800000, 0x08000000)
 __AARCH64_INSN_FUNCS(load_ex,  0x3F400000, 0x08400000)
 __AARCH64_INSN_FUNCS(store_ex, 0x3F400000, 0x08000000)
+__AARCH64_INSN_FUNCS(stp,      0x7FC00000, 0x29000000)
+__AARCH64_INSN_FUNCS(ldp,      0x7FC00000, 0x29400000)
 __AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000)
 __AARCH64_INSN_FUNCS(ldp_post, 0x7FC00000, 0x28C00000)
 __AARCH64_INSN_FUNCS(stp_pre,  0x7FC00000, 0x29800000)
@@ -334,6 +343,7 @@ __AARCH64_INSN_FUNCS(rev64, 0x7FFFFC00, 0x5AC00C00)
 __AARCH64_INSN_FUNCS(and,      0x7F200000, 0x0A000000)
 __AARCH64_INSN_FUNCS(bic,      0x7F200000, 0x0A200000)
 __AARCH64_INSN_FUNCS(orr,      0x7F200000, 0x2A000000)
+__AARCH64_INSN_FUNCS(mov_reg,  0x7FE0FFE0, 0x2A0003E0)
 __AARCH64_INSN_FUNCS(orn,      0x7F200000, 0x2A200000)
 __AARCH64_INSN_FUNCS(eor,      0x7F200000, 0x4A000000)
 __AARCH64_INSN_FUNCS(eon,      0x7F200000, 0x4A200000)
@@ -368,6 +378,14 @@ __AARCH64_INSN_FUNCS(eret_auth,    0xFFFFFBFF, 0xD69F0BFF)
 __AARCH64_INSN_FUNCS(mrs,      0xFFF00000, 0xD5300000)
 __AARCH64_INSN_FUNCS(msr_imm,  0xFFF8F01F, 0xD500401F)
 __AARCH64_INSN_FUNCS(msr_reg,  0xFFF00000, 0xD5100000)
+__AARCH64_INSN_FUNCS(dmb,      0xFFFFF0FF, 0xD50330BF)
+__AARCH64_INSN_FUNCS(dsb_base, 0xFFFFF0FF, 0xD503309F)
+__AARCH64_INSN_FUNCS(dsb_nxs,  0xFFFFF3FF, 0xD503323F)
+__AARCH64_INSN_FUNCS(isb,      0xFFFFF0FF, 0xD50330DF)
+__AARCH64_INSN_FUNCS(sb,       0xFFFFFFFF, 0xD50330FF)
+__AARCH64_INSN_FUNCS(clrex,    0xFFFFF0FF, 0xD503305F)
+__AARCH64_INSN_FUNCS(ssbb,     0xFFFFFFFF, 0xD503309F)
+__AARCH64_INSN_FUNCS(pssbb,    0xFFFFFFFF, 0xD503349F)
 
 #undef __AARCH64_INSN_FUNCS
 
@@ -379,8 +397,47 @@ static inline bool aarch64_insn_is_adr_adrp(u32 insn)
        return aarch64_insn_is_adr(insn) || aarch64_insn_is_adrp(insn);
 }
 
-int aarch64_insn_read(void *addr, u32 *insnp);
-int aarch64_insn_write(void *addr, u32 insn);
+static inline bool aarch64_insn_is_dsb(u32 insn)
+{
+       return aarch64_insn_is_dsb_base(insn) || aarch64_insn_is_dsb_nxs(insn);
+}
+
+static inline bool aarch64_insn_is_barrier(u32 insn)
+{
+       return aarch64_insn_is_dmb(insn) || aarch64_insn_is_dsb(insn) ||
+              aarch64_insn_is_isb(insn) || aarch64_insn_is_sb(insn) ||
+              aarch64_insn_is_clrex(insn) || aarch64_insn_is_ssbb(insn) ||
+              aarch64_insn_is_pssbb(insn);
+}
+
+static inline bool aarch64_insn_is_store_single(u32 insn)
+{
+       return aarch64_insn_is_store_imm(insn) ||
+              aarch64_insn_is_store_pre(insn) ||
+              aarch64_insn_is_store_post(insn);
+}
+
+static inline bool aarch64_insn_is_store_pair(u32 insn)
+{
+       return aarch64_insn_is_stp(insn) ||
+              aarch64_insn_is_stp_pre(insn) ||
+              aarch64_insn_is_stp_post(insn);
+}
+
+static inline bool aarch64_insn_is_load_single(u32 insn)
+{
+       return aarch64_insn_is_load_imm(insn) ||
+              aarch64_insn_is_load_pre(insn) ||
+              aarch64_insn_is_load_post(insn);
+}
+
+static inline bool aarch64_insn_is_load_pair(u32 insn)
+{
+       return aarch64_insn_is_ldp(insn) ||
+              aarch64_insn_is_ldp_pre(insn) ||
+              aarch64_insn_is_ldp_post(insn);
+}
+
 enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn);
 bool aarch64_insn_uses_literal(u32 insn);
 bool aarch64_insn_is_branch(u32 insn);
@@ -487,9 +544,6 @@ u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
 s32 aarch64_get_branch_offset(u32 insn);
 u32 aarch64_set_branch_offset(u32 insn, s32 offset);
 
-int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
-int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
-
 s32 aarch64_insn_adrp_get_offset(u32 insn);
 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset);
 
@@ -506,6 +560,7 @@ u32 aarch32_insn_mcr_extract_crm(u32 insn);
 
 typedef bool (pstate_check_t)(unsigned long);
 extern pstate_check_t * const aarch32_opcode_cond_checks[16];
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __ASM_INSN_H */
index d44df9d..3512184 100644 (file)
@@ -18,9 +18,9 @@
  * 64K (section size = 512M).
  */
 #ifdef CONFIG_ARM64_4K_PAGES
-#define ARM64_SWAPPER_USES_SECTION_MAPS 1
+#define ARM64_KERNEL_USES_PMD_MAPS 1
 #else
-#define ARM64_SWAPPER_USES_SECTION_MAPS 0
+#define ARM64_KERNEL_USES_PMD_MAPS 0
 #endif
 
 /*
@@ -33,7 +33,7 @@
  * VA range, so pages required to map highest possible PA are reserved in all
  * cases.
  */
-#if ARM64_SWAPPER_USES_SECTION_MAPS
+#if ARM64_KERNEL_USES_PMD_MAPS
 #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1)
 #define IDMAP_PGTABLE_LEVELS   (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT) - 1)
 #else
@@ -90,9 +90,9 @@
 #define IDMAP_DIR_SIZE         (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
 
 /* Initial memory map size */
-#if ARM64_SWAPPER_USES_SECTION_MAPS
-#define SWAPPER_BLOCK_SHIFT    SECTION_SHIFT
-#define SWAPPER_BLOCK_SIZE     SECTION_SIZE
+#if ARM64_KERNEL_USES_PMD_MAPS
+#define SWAPPER_BLOCK_SHIFT    PMD_SHIFT
+#define SWAPPER_BLOCK_SIZE     PMD_SIZE
 #define SWAPPER_TABLE_SHIFT    PUD_SHIFT
 #else
 #define SWAPPER_BLOCK_SHIFT    PAGE_SHIFT
 #define SWAPPER_TABLE_SHIFT    PMD_SHIFT
 #endif
 
-/* The size of the initial kernel direct mapping */
-#define SWAPPER_INIT_MAP_SIZE  (_AC(1, UL) << SWAPPER_TABLE_SHIFT)
-
 /*
  * Initial memory map attributes.
  */
 #define SWAPPER_PTE_FLAGS      (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
 #define SWAPPER_PMD_FLAGS      (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
 
-#if ARM64_SWAPPER_USES_SECTION_MAPS
+#if ARM64_KERNEL_USES_PMD_MAPS
 #define SWAPPER_MM_MMUFLAGS    (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
 #else
 #define SWAPPER_MM_MMUFLAGS    (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
 #if defined(CONFIG_ARM64_4K_PAGES)
 #define ARM64_MEMSTART_SHIFT           PUD_SHIFT
 #elif defined(CONFIG_ARM64_16K_PAGES)
-#define ARM64_MEMSTART_SHIFT           (PMD_SHIFT + 5)
+#define ARM64_MEMSTART_SHIFT           CONT_PMD_SHIFT
 #else
 #define ARM64_MEMSTART_SHIFT           PMD_SHIFT
 #endif
index 5e9b33c..9f0bf21 100644 (file)
@@ -8,6 +8,7 @@
 #define __ARM_KVM_ASM_H__
 
 #include <asm/hyp_image.h>
+#include <asm/insn.h>
 #include <asm/virt.h>
 
 #define ARM_EXIT_WITH_SERROR_BIT  31
index 25ed956..f4cbfa9 100644 (file)
@@ -180,7 +180,8 @@ static inline void *__kvm_vector_slot2addr(void *base,
 
 struct kvm;
 
-#define kvm_flush_dcache_to_poc(a,l)   __flush_dcache_area((a), (l))
+#define kvm_flush_dcache_to_poc(a,l)   \
+       dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
 
 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
 {
@@ -208,12 +209,12 @@ static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
 {
        if (icache_is_aliasing()) {
                /* any kind of VIPT cache */
-               __flush_icache_all();
+               icache_inval_all_pou();
        } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
                /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
                void *va = page_address(pfn_to_page(pfn));
 
-               invalidate_icache_range((unsigned long)va,
+               icache_inval_pou((unsigned long)va,
                                        (unsigned long)va + size);
        }
 }
index ba89a9a..9906541 100644 (file)
                SYM_FUNC_START_ALIAS(__pi_##x); \
                SYM_FUNC_START_WEAK(x)
 
+#define SYM_FUNC_START_WEAK_ALIAS_PI(x)                \
+               SYM_FUNC_START_ALIAS(__pi_##x); \
+               SYM_START(x, SYM_L_WEAK, SYM_A_ALIGN)
+
 #define SYM_FUNC_END_PI(x)                     \
                SYM_FUNC_END(x);                \
                SYM_FUNC_END_ALIAS(__pi_##x)
 
+#define SYM_FUNC_END_ALIAS_PI(x)               \
+               SYM_FUNC_END_ALIAS(x);          \
+               SYM_FUNC_END_ALIAS(__pi_##x)
+
 #endif
index 87b90dc..7b36096 100644 (file)
 #define MT_NORMAL              0
 #define MT_NORMAL_TAGGED       1
 #define MT_NORMAL_NC           2
-#define MT_NORMAL_WT           3
-#define MT_DEVICE_nGnRnE       4
-#define MT_DEVICE_nGnRE                5
-#define MT_DEVICE_GRE          6
+#define MT_DEVICE_nGnRnE       3
+#define MT_DEVICE_nGnRE                4
 
 /*
  * Memory types for Stage-2 translation
index d3cef91..eeb2109 100644 (file)
@@ -177,9 +177,9 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
                return;
 
        if (mm == &init_mm)
-               ttbr = __pa_symbol(reserved_pg_dir);
+               ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
        else
-               ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
+               ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
 
        WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
 }
index 8100456..a11ccad 100644 (file)
@@ -1,7 +1,20 @@
-#ifdef CONFIG_ARM64_MODULE_PLTS
 SECTIONS {
+#ifdef CONFIG_ARM64_MODULE_PLTS
        .plt 0 (NOLOAD) : { BYTE(0) }
        .init.plt 0 (NOLOAD) : { BYTE(0) }
        .text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) }
-}
 #endif
+
+#ifdef CONFIG_KASAN_SW_TAGS
+       /*
+        * Outlined checks go into comdat-deduplicated sections named .text.hot.
+        * Because they are in comdats they are not combined by the linker and
+        * we otherwise end up with multiple sections with the same .text.hot
+        * name in the .ko file. The kernel module loader warns if it sees
+        * multiple sections with the same name so we use this sections
+        * directive to force them into a single section and silence the
+        * warning.
+        */
+       .text.hot : { *(.text.hot) }
+#endif
+}
index ddd4d17..d952352 100644 (file)
@@ -48,43 +48,84 @@ static inline u8 mte_get_random_tag(void)
        return mte_get_ptr_tag(addr);
 }
 
+static inline u64 __stg_post(u64 p)
+{
+       asm volatile(__MTE_PREAMBLE "stg %0, [%0], #16"
+                    : "+r"(p)
+                    :
+                    : "memory");
+       return p;
+}
+
+static inline u64 __stzg_post(u64 p)
+{
+       asm volatile(__MTE_PREAMBLE "stzg %0, [%0], #16"
+                    : "+r"(p)
+                    :
+                    : "memory");
+       return p;
+}
+
+static inline void __dc_gva(u64 p)
+{
+       asm volatile(__MTE_PREAMBLE "dc gva, %0" : : "r"(p) : "memory");
+}
+
+static inline void __dc_gzva(u64 p)
+{
+       asm volatile(__MTE_PREAMBLE "dc gzva, %0" : : "r"(p) : "memory");
+}
+
 /*
  * Assign allocation tags for a region of memory based on the pointer tag.
  * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
- * size must be non-zero and MTE_GRANULE_SIZE aligned.
+ * size must be MTE_GRANULE_SIZE aligned.
  */
-static inline void mte_set_mem_tag_range(void *addr, size_t size,
-                                               u8 tag, bool init)
+static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
+                                        bool init)
 {
-       u64 curr, end;
+       u64 curr, mask, dczid_bs, end1, end2, end3;
 
-       if (!size)
-               return;
+       /* Read DC G(Z)VA block size from the system register. */
+       dczid_bs = 4ul << (read_cpuid(DCZID_EL0) & 0xf);
 
        curr = (u64)__tag_set(addr, tag);
-       end = curr + size;
+       mask = dczid_bs - 1;
+       /* STG/STZG up to the end of the first block. */
+       end1 = curr | mask;
+       end3 = curr + size;
+       /* DC GVA / GZVA in [end1, end2) */
+       end2 = end3 & ~mask;
 
        /*
-        * 'asm volatile' is required to prevent the compiler to move
-        * the statement outside of the loop.
+        * The following code uses STG on the first DC GVA block even if the
+        * start address is aligned - it appears to be faster than an alignment
+        * check + conditional branch. Also, if the range size is at least 2 DC
+        * GVA blocks, the first two loops can use post-condition to save one
+        * branch each.
         */
-       if (init) {
-               do {
-                       asm volatile(__MTE_PREAMBLE "stzg %0, [%0]"
-                                    :
-                                    : "r" (curr)
-                                    : "memory");
-                       curr += MTE_GRANULE_SIZE;
-               } while (curr != end);
-       } else {
-               do {
-                       asm volatile(__MTE_PREAMBLE "stg %0, [%0]"
-                                    :
-                                    : "r" (curr)
-                                    : "memory");
-                       curr += MTE_GRANULE_SIZE;
-               } while (curr != end);
-       }
+#define SET_MEMTAG_RANGE(stg_post, dc_gva)             \
+       do {                                            \
+               if (size >= 2 * dczid_bs) {             \
+                       do {                            \
+                               curr = stg_post(curr);  \
+                       } while (curr < end1);          \
+                                                       \
+                       do {                            \
+                               dc_gva(curr);           \
+                               curr += dczid_bs;       \
+                       } while (curr < end2);          \
+               }                                       \
+                                                       \
+               while (curr < end3)                     \
+                       curr = stg_post(curr);          \
+       } while (0)
+
+       if (init)
+               SET_MEMTAG_RANGE(__stzg_post, __dc_gzva);
+       else
+               SET_MEMTAG_RANGE(__stg_post, __dc_gva);
+#undef SET_MEMTAG_RANGE
 }
 
 void mte_enable_kernel_sync(void);
index bc88a1c..67bf259 100644 (file)
@@ -37,6 +37,7 @@ void mte_free_tag_storage(char *storage);
 /* track which pages have valid allocation tags */
 #define PG_mte_tagged  PG_arch_2
 
+void mte_zero_clear_page_tags(void *addr);
 void mte_sync_tags(pte_t *ptep, pte_t pte);
 void mte_copy_page_tags(void *kto, const void *kfrom);
 void mte_thread_init_user(void);
@@ -53,6 +54,9 @@ int mte_ptrace_copy_tags(struct task_struct *child, long request,
 /* unused if !CONFIG_ARM64_MTE, silence the compiler */
 #define PG_mte_tagged  0
 
+static inline void mte_zero_clear_page_tags(void *addr)
+{
+}
 static inline void mte_sync_tags(pte_t *ptep, pte_t pte)
 {
 }
index 012cffc..ed1b9dc 100644 (file)
@@ -13,6 +13,7 @@
 #ifndef __ASSEMBLY__
 
 #include <linux/personality.h> /* for READ_IMPLIES_EXEC */
+#include <linux/types.h> /* for gfp_t */
 #include <asm/pgtable-types.h>
 
 struct page;
@@ -28,9 +29,12 @@ void copy_user_highpage(struct page *to, struct page *from,
 void copy_highpage(struct page *to, struct page *from);
 #define __HAVE_ARCH_COPY_HIGHPAGE
 
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
-       alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+                                               unsigned long vaddr);
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+
+void tag_clear_highpage(struct page *to);
+#define __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
 
 #define clear_user_page(page, vaddr, pg)       clear_page(page)
 #define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
diff --git a/arch/arm64/include/asm/patching.h b/arch/arm64/include/asm/patching.h
new file mode 100644 (file)
index 0000000..6bf5adc
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef        __ASM_PATCHING_H
+#define        __ASM_PATCHING_H
+
+#include <linux/types.h>
+
+int aarch64_insn_read(void *addr, u32 *insnp);
+int aarch64_insn_write(void *addr, u32 insn);
+
+int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
+int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
+
+#endif /* __ASM_PATCHING_H */
index 60731f6..4ef6f19 100644 (file)
 /* PMMIR_EL1.SLOTS mask */
 #define ARMV8_PMU_SLOTS_MASK   0xff
 
+#define ARMV8_PMU_BUS_SLOTS_SHIFT 8
+#define ARMV8_PMU_BUS_SLOTS_MASK 0xff
+#define ARMV8_PMU_BUS_WIDTH_SHIFT 16
+#define ARMV8_PMU_BUS_WIDTH_MASK 0xf
+
 #ifdef CONFIG_PERF_EVENTS
 struct pt_regs;
 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
index b82575a..40085e5 100644 (file)
 #define PTRS_PER_PGD           (1 << (VA_BITS - PGDIR_SHIFT))
 
 /*
- * Section address mask and size definitions.
- */
-#define SECTION_SHIFT          PMD_SHIFT
-#define SECTION_SIZE           (_AC(1, UL) << SECTION_SHIFT)
-#define SECTION_MASK           (~(SECTION_SIZE-1))
-
-/*
  * Contiguous page definitions.
  */
 #define CONT_PTE_SHIFT         (CONFIG_ARM64_CONT_PTE_SHIFT + PAGE_SHIFT)
index 938092d..7032f04 100644 (file)
@@ -55,7 +55,6 @@ extern bool arm64_use_ng_mappings;
 #define PROT_DEVICE_nGnRnE     (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
 #define PROT_DEVICE_nGnRE      (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
 #define PROT_NORMAL_NC         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
-#define PROT_NORMAL_WT         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
 #define PROT_NORMAL            (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
 #define PROT_NORMAL_TAGGED     (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_TAGGED))
 
index 0b10204..11e60d0 100644 (file)
@@ -511,13 +511,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 
 #define pmd_none(pmd)          (!pmd_val(pmd))
 
-#define pmd_bad(pmd)           (!(pmd_val(pmd) & PMD_TABLE_BIT))
-
 #define pmd_table(pmd)         ((pmd_val(pmd) & PMD_TYPE_MASK) == \
                                 PMD_TYPE_TABLE)
 #define pmd_sect(pmd)          ((pmd_val(pmd) & PMD_TYPE_MASK) == \
                                 PMD_TYPE_SECT)
 #define pmd_leaf(pmd)          pmd_sect(pmd)
+#define pmd_bad(pmd)           (!pmd_table(pmd))
 
 #define pmd_leaf_size(pmd)     (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
 #define pte_leaf_size(pte)     (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)
@@ -604,7 +603,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
        pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
 
 #define pud_none(pud)          (!pud_val(pud))
-#define pud_bad(pud)           (!(pud_val(pud) & PUD_TABLE_BIT))
+#define pud_bad(pud)           (!pud_table(pud))
 #define pud_present(pud)       pte_present(pud_pte(pud))
 #define pud_leaf(pud)          pud_sect(pud)
 #define pud_valid(pud)         pte_valid(pud_pte(pud))
index d50416b..28a78b6 100644 (file)
@@ -31,10 +31,6 @@ struct ptrauth_keys_user {
        struct ptrauth_key apga;
 };
 
-struct ptrauth_keys_kernel {
-       struct ptrauth_key apia;
-};
-
 #define __ptrauth_key_install_nosync(k, v)                     \
 do {                                                           \
        struct ptrauth_key __pki_v = (v);                       \
@@ -42,6 +38,29 @@ do {                                                         \
        write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1);     \
 } while (0)
 
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
+
+struct ptrauth_keys_kernel {
+       struct ptrauth_key apia;
+};
+
+static __always_inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys)
+{
+       if (system_supports_address_auth())
+               get_random_bytes(&keys->apia, sizeof(keys->apia));
+}
+
+static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kernel *keys)
+{
+       if (!system_supports_address_auth())
+               return;
+
+       __ptrauth_key_install_nosync(APIA, keys->apia);
+       isb();
+}
+
+#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
 static inline void ptrauth_keys_install_user(struct ptrauth_keys_user *keys)
 {
        if (system_supports_address_auth()) {
@@ -69,21 +88,6 @@ static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys)
        ptrauth_keys_install_user(keys);
 }
 
-static __always_inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys)
-{
-       if (system_supports_address_auth())
-               get_random_bytes(&keys->apia, sizeof(keys->apia));
-}
-
-static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kernel *keys)
-{
-       if (!system_supports_address_auth())
-               return;
-
-       __ptrauth_key_install_nosync(APIA, keys->apia);
-       isb();
-}
-
 extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg);
 
 extern int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
@@ -121,11 +125,6 @@ static __always_inline void ptrauth_enable(void)
 #define ptrauth_thread_switch_user(tsk)                                        \
        ptrauth_keys_install_user(&(tsk)->thread.keys_user)
 
-#define ptrauth_thread_init_kernel(tsk)                                        \
-       ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel)
-#define ptrauth_thread_switch_kernel(tsk)                              \
-       ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel)
-
 #else /* CONFIG_ARM64_PTR_AUTH */
 #define ptrauth_enable()
 #define ptrauth_prctl_reset_keys(tsk, arg)     (-EINVAL)
@@ -134,11 +133,19 @@ static __always_inline void ptrauth_enable(void)
 #define ptrauth_strip_insn_pac(lr)     (lr)
 #define ptrauth_suspend_exit()
 #define ptrauth_thread_init_user()
-#define ptrauth_thread_init_kernel(tsk)
 #define ptrauth_thread_switch_user(tsk)
-#define ptrauth_thread_switch_kernel(tsk)
 #endif /* CONFIG_ARM64_PTR_AUTH */
 
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
+#define ptrauth_thread_init_kernel(tsk)                                        \
+       ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel)
+#define ptrauth_thread_switch_kernel(tsk)                              \
+       ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel)
+#else
+#define ptrauth_thread_init_kernel(tsk)
+#define ptrauth_thread_switch_kernel(tsk)
+#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
 #define PR_PAC_ENABLED_KEYS_MASK                                               \
        (PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY)
 
index 9df3fee..b6517fd 100644 (file)
@@ -148,8 +148,10 @@ struct thread_struct {
        struct debug_info       debug;          /* debugging */
 #ifdef CONFIG_ARM64_PTR_AUTH
        struct ptrauth_keys_user        keys_user;
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
        struct ptrauth_keys_kernel      keys_kernel;
 #endif
+#endif
 #ifdef CONFIG_ARM64_MTE
        u64                     gcr_user_excl;
 #endif
@@ -257,8 +259,6 @@ void set_task_sctlr_el1(u64 sctlr);
 extern struct task_struct *cpu_switch_to(struct task_struct *prev,
                                         struct task_struct *next);
 
-asmlinkage void arm64_preempt_schedule_irq(void);
-
 #define task_pt_regs(p) \
        ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
 
@@ -329,13 +329,13 @@ long get_tagged_addr_ctrl(struct task_struct *task);
  * of header definitions for the use of task_stack_page.
  */
 
-#define current_top_of_stack()                                                 \
-({                                                                             \
-       struct stack_info _info;                                                \
-       BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info));   \
-       _info.high;                                                             \
+#define current_top_of_stack()                                                         \
+({                                                                                     \
+       struct stack_info _info;                                                        \
+       BUG_ON(!on_accessible_stack(current, current_stack_pointer, 1, &_info));        \
+       _info.high;                                                                     \
 })
-#define on_thread_stack()      (on_task_stack(current, current_stack_pointer, NULL))
+#define on_thread_stack()      (on_task_stack(current, current_stack_pointer, 1, NULL))
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_PROCESSOR_H */
index eaa2cd9..8297bcc 100644 (file)
@@ -9,18 +9,18 @@
 #ifdef CONFIG_SHADOW_CALL_STACK
        scs_sp  .req    x18
 
-       .macro scs_load tsk, tmp
+       .macro scs_load tsk
        ldr     scs_sp, [\tsk, #TSK_TI_SCS_SP]
        .endm
 
-       .macro scs_save tsk, tmp
+       .macro scs_save tsk
        str     scs_sp, [\tsk, #TSK_TI_SCS_SP]
        .endm
 #else
-       .macro scs_load tsk, tmp
+       .macro scs_load tsk
        .endm
 
-       .macro scs_save tsk, tmp
+       .macro scs_save tsk
        .endm
 #endif /* CONFIG_SHADOW_CALL_STACK */
 
index 63e0b92..7bea1d7 100644 (file)
@@ -37,13 +37,17 @@ struct sdei_registered_event;
 asmlinkage unsigned long __sdei_handler(struct pt_regs *regs,
                                        struct sdei_registered_event *arg);
 
+unsigned long do_sdei_event(struct pt_regs *regs,
+                           struct sdei_registered_event *arg);
+
 unsigned long sdei_arch_get_entry_point(int conduit);
 #define sdei_arch_get_entry_point(x)   sdei_arch_get_entry_point(x)
 
 struct stack_info;
 
-bool _on_sdei_stack(unsigned long sp, struct stack_info *info);
-static inline bool on_sdei_stack(unsigned long sp,
+bool _on_sdei_stack(unsigned long sp, unsigned long size,
+                   struct stack_info *info);
+static inline bool on_sdei_stack(unsigned long sp, unsigned long size,
                                struct stack_info *info)
 {
        if (!IS_ENABLED(CONFIG_VMAP_STACK))
@@ -51,7 +55,7 @@ static inline bool on_sdei_stack(unsigned long sp,
        if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
                return false;
        if (in_nmi())
-               return _on_sdei_stack(sp, info);
+               return _on_sdei_stack(sp, size, info);
 
        return false;
 }
index 0e35775..fc55f5a 100644 (file)
@@ -73,12 +73,10 @@ asmlinkage void secondary_start_kernel(void);
 
 /*
  * Initial data for bringing up a secondary CPU.
- * @stack  - sp for the secondary CPU
  * @status - Result passed back from the secondary CPU to
  *           indicate failure.
  */
 struct secondary_data {
-       void *stack;
        struct task_struct *task;
        long status;
 };
index 4b33ca6..1801399 100644 (file)
@@ -69,14 +69,14 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
 
 DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
 
-static inline bool on_stack(unsigned long sp, unsigned long low,
-                               unsigned long high, enum stack_type type,
-                               struct stack_info *info)
+static inline bool on_stack(unsigned long sp, unsigned long size,
+                           unsigned long low, unsigned long high,
+                           enum stack_type type, struct stack_info *info)
 {
        if (!low)
                return false;
 
-       if (sp < low || sp >= high)
+       if (sp < low || sp + size < sp || sp + size > high)
                return false;
 
        if (info) {
@@ -87,38 +87,38 @@ static inline bool on_stack(unsigned long sp, unsigned long low,
        return true;
 }
 
-static inline bool on_irq_stack(unsigned long sp,
+static inline bool on_irq_stack(unsigned long sp, unsigned long size,
                                struct stack_info *info)
 {
        unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
        unsigned long high = low + IRQ_STACK_SIZE;
 
-       return on_stack(sp, low, high, STACK_TYPE_IRQ, info);
+       return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info);
 }
 
 static inline bool on_task_stack(const struct task_struct *tsk,
-                                unsigned long sp,
+                                unsigned long sp, unsigned long size,
                                 struct stack_info *info)
 {
        unsigned long low = (unsigned long)task_stack_page(tsk);
        unsigned long high = low + THREAD_SIZE;
 
-       return on_stack(sp, low, high, STACK_TYPE_TASK, info);
+       return on_stack(sp, size, low, high, STACK_TYPE_TASK, info);
 }
 
 #ifdef CONFIG_VMAP_STACK
 DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
 
-static inline bool on_overflow_stack(unsigned long sp,
+static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
                                struct stack_info *info)
 {
        unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
        unsigned long high = low + OVERFLOW_STACK_SIZE;
 
-       return on_stack(sp, low, high, STACK_TYPE_OVERFLOW, info);
+       return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
 }
 #else
-static inline bool on_overflow_stack(unsigned long sp,
+static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
                        struct stack_info *info) { return false; }
 #endif
 
@@ -128,21 +128,21 @@ static inline bool on_overflow_stack(unsigned long sp,
  * context.
  */
 static inline bool on_accessible_stack(const struct task_struct *tsk,
-                                      unsigned long sp,
+                                      unsigned long sp, unsigned long size,
                                       struct stack_info *info)
 {
        if (info)
                info->type = STACK_TYPE_UNKNOWN;
 
-       if (on_task_stack(tsk, sp, info))
+       if (on_task_stack(tsk, sp, size, info))
                return true;
        if (tsk != current || preemptible())
                return false;
-       if (on_irq_stack(sp, info))
+       if (on_irq_stack(sp, size, info))
                return true;
-       if (on_overflow_stack(sp, info))
+       if (on_overflow_stack(sp, size, info))
                return true;
-       if (on_sdei_stack(sp, info))
+       if (on_sdei_stack(sp, size, info))
                return true;
 
        return false;
index 65d1570..9ea84bc 100644 (file)
 /* MAIR_ELx memory attributes (used by Linux) */
 #define MAIR_ATTR_DEVICE_nGnRnE                UL(0x00)
 #define MAIR_ATTR_DEVICE_nGnRE         UL(0x04)
-#define MAIR_ATTR_DEVICE_GRE           UL(0x0c)
 #define MAIR_ATTR_NORMAL_NC            UL(0x44)
-#define MAIR_ATTR_NORMAL_WT            UL(0xbb)
 #define MAIR_ATTR_NORMAL_TAGGED                UL(0xf0)
 #define MAIR_ATTR_NORMAL               UL(0xff)
 #define MAIR_ATTR_MASK                 UL(0xff)
index 61c97d3..c995d1f 100644 (file)
@@ -28,6 +28,10 @@ static void tlb_flush(struct mmu_gather *tlb);
  */
 static inline int tlb_get_level(struct mmu_gather *tlb)
 {
+       /* The TTL field is only valid for the leaf entry. */
+       if (tlb->freed_tables)
+               return 0;
+
        if (tlb->cleared_ptes && !(tlb->cleared_pmds ||
                                   tlb->cleared_puds ||
                                   tlb->cleared_p4ds))
index 6cc9773..cce3085 100644 (file)
@@ -14,15 +14,22 @@ CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
 CFLAGS_REMOVE_syscall.o         = -fstack-protector -fstack-protector-strong
 CFLAGS_syscall.o       += -fno-stack-protector
 
+# It's not safe to invoke KCOV when portions of the kernel environment aren't
+# available or are out-of-sync with HW state. Since `noinstr` doesn't always
+# inhibit KCOV instrumentation, disable it for the entire compilation unit.
+KCOV_INSTRUMENT_entry.o := n
+KCOV_INSTRUMENT_idle.o := n
+
 # Object file lists.
 obj-y                  := debug-monitors.o entry.o irq.o fpsimd.o              \
                           entry-common.o entry-fpsimd.o process.o ptrace.o     \
                           setup.o signal.o sys.o stacktrace.o time.o traps.o   \
-                          io.o vdso.o hyp-stub.o psci.o cpu_ops.o insn.o       \
+                          io.o vdso.o hyp-stub.o psci.o cpu_ops.o              \
                           return_address.o cpuinfo.o cpu_errata.o              \
                           cpufeature.o alternative.o cacheinfo.o               \
                           smp.o smp_spin_table.o topology.o smccc-call.o       \
-                          syscall.o proton-pack.o idreg-override.o
+                          syscall.o proton-pack.o idreg-override.o idle.o      \
+                          patching.o
 
 targets                        += efi-entry.o
 
index cada0b8..f385172 100644 (file)
@@ -239,6 +239,18 @@ done:
        }
 }
 
+static pgprot_t __acpi_get_writethrough_mem_attribute(void)
+{
+       /*
+        * Although UEFI specifies the use of Normal Write-through for
+        * EFI_MEMORY_WT, it is seldom used in practice and not implemented
+        * by most (all?) CPUs. Rather than allocate a MAIR just for this
+        * purpose, emit a warning and use Normal Non-cacheable instead.
+        */
+       pr_warn_once("No MAIR allocation for EFI_MEMORY_WT; treating as Normal Non-cacheable\n");
+       return __pgprot(PROT_NORMAL_NC);
+}
+
 pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
 {
        /*
@@ -246,7 +258,7 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
         * types" of UEFI 2.5 section 2.3.6.1, each EFI memory type is
         * mapped to a corresponding MAIR attribute encoding.
         * The EFI memory attribute advises all possible capabilities
-        * of a memory region. We use the most efficient capability.
+        * of a memory region.
         */
 
        u64 attr;
@@ -254,10 +266,10 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
        attr = efi_mem_attributes(addr);
        if (attr & EFI_MEMORY_WB)
                return PAGE_KERNEL;
-       if (attr & EFI_MEMORY_WT)
-               return __pgprot(PROT_NORMAL_WT);
        if (attr & EFI_MEMORY_WC)
                return __pgprot(PROT_NORMAL_NC);
+       if (attr & EFI_MEMORY_WT)
+               return __acpi_get_writethrough_mem_attribute();
        return __pgprot(PROT_DEVICE_nGnRnE);
 }
 
@@ -340,10 +352,10 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
                default:
                        if (region->attribute & EFI_MEMORY_WB)
                                prot = PAGE_KERNEL;
-                       else if (region->attribute & EFI_MEMORY_WT)
-                               prot = __pgprot(PROT_NORMAL_WT);
                        else if (region->attribute & EFI_MEMORY_WC)
                                prot = __pgprot(PROT_NORMAL_NC);
+                       else if (region->attribute & EFI_MEMORY_WT)
+                               prot = __acpi_get_writethrough_mem_attribute();
                }
        }
        return __ioremap(phys, size, prot);
index c906d20..3fb79b7 100644 (file)
@@ -181,7 +181,7 @@ static void __nocfi __apply_alternatives(struct alt_region *region, bool is_modu
         */
        if (!is_module) {
                dsb(ish);
-               __flush_icache_all();
+               icache_inval_all_pou();
                isb();
 
                /* Ignore ARM64_CB bit from feature mask */
index 0cb34cc..0a73b76 100644 (file)
@@ -27,6 +27,7 @@
 int main(void)
 {
   DEFINE(TSK_ACTIVE_MM,                offsetof(struct task_struct, active_mm));
+  DEFINE(TSK_CPU,              offsetof(struct task_struct, cpu));
   BLANK();
   DEFINE(TSK_TI_FLAGS,         offsetof(struct task_struct, thread_info.flags));
   DEFINE(TSK_TI_PREEMPT,       offsetof(struct task_struct, thread_info.preempt_count));
@@ -46,6 +47,8 @@ int main(void)
   DEFINE(THREAD_SCTLR_USER,    offsetof(struct task_struct, thread.sctlr_user));
 #ifdef CONFIG_ARM64_PTR_AUTH
   DEFINE(THREAD_KEYS_USER,     offsetof(struct task_struct, thread.keys_user));
+#endif
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
   DEFINE(THREAD_KEYS_KERNEL,   offsetof(struct task_struct, thread.keys_kernel));
 #endif
 #ifdef CONFIG_ARM64_MTE
@@ -99,7 +102,6 @@ int main(void)
   DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT);
   DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
   BLANK();
-  DEFINE(CPU_BOOT_STACK,       offsetof(struct secondary_data, stack));
   DEFINE(CPU_BOOT_TASK,                offsetof(struct secondary_data, task));
   BLANK();
   DEFINE(FTR_OVR_VAL_OFFSET,   offsetof(struct arm64_ftr_override, val));
@@ -138,6 +140,15 @@ int main(void)
   DEFINE(ARM_SMCCC_RES_X2_OFFS,                offsetof(struct arm_smccc_res, a2));
   DEFINE(ARM_SMCCC_QUIRK_ID_OFFS,      offsetof(struct arm_smccc_quirk, id));
   DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS,   offsetof(struct arm_smccc_quirk, state));
+  DEFINE(ARM_SMCCC_1_2_REGS_X0_OFFS,   offsetof(struct arm_smccc_1_2_regs, a0));
+  DEFINE(ARM_SMCCC_1_2_REGS_X2_OFFS,   offsetof(struct arm_smccc_1_2_regs, a2));
+  DEFINE(ARM_SMCCC_1_2_REGS_X4_OFFS,   offsetof(struct arm_smccc_1_2_regs, a4));
+  DEFINE(ARM_SMCCC_1_2_REGS_X6_OFFS,   offsetof(struct arm_smccc_1_2_regs, a6));
+  DEFINE(ARM_SMCCC_1_2_REGS_X8_OFFS,   offsetof(struct arm_smccc_1_2_regs, a8));
+  DEFINE(ARM_SMCCC_1_2_REGS_X10_OFFS,  offsetof(struct arm_smccc_1_2_regs, a10));
+  DEFINE(ARM_SMCCC_1_2_REGS_X12_OFFS,  offsetof(struct arm_smccc_1_2_regs, a12));
+  DEFINE(ARM_SMCCC_1_2_REGS_X14_OFFS,  offsetof(struct arm_smccc_1_2_regs, a14));
+  DEFINE(ARM_SMCCC_1_2_REGS_X16_OFFS,  offsetof(struct arm_smccc_1_2_regs, a16));
   BLANK();
   DEFINE(HIBERN_PBE_ORIG,      offsetof(struct pbe, orig_address));
   DEFINE(HIBERN_PBE_ADDR,      offsetof(struct pbe, address));
@@ -153,7 +164,9 @@ int main(void)
 #endif
 #ifdef CONFIG_ARM64_PTR_AUTH
   DEFINE(PTRAUTH_USER_KEY_APIA,                offsetof(struct ptrauth_keys_user, apia));
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
   DEFINE(PTRAUTH_KERNEL_KEY_APIA,      offsetof(struct ptrauth_keys_kernel, apia));
+#endif
   BLANK();
 #endif
   return 0;
index efed283..125d5c9 100644 (file)
@@ -76,6 +76,7 @@
 #include <asm/cpufeature.h>
 #include <asm/cpu_ops.h>
 #include <asm/fpsimd.h>
+#include <asm/insn.h>
 #include <asm/kvm_host.h>
 #include <asm/mmu_context.h>
 #include <asm/mte.h>
@@ -108,6 +109,24 @@ bool arm64_use_ng_mappings = false;
 EXPORT_SYMBOL(arm64_use_ng_mappings);
 
 /*
+ * Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs
+ * support it?
+ */
+static bool __read_mostly allow_mismatched_32bit_el0;
+
+/*
+ * Static branch enabled only if allow_mismatched_32bit_el0 is set and we have
+ * seen at least one CPU capable of 32-bit EL0.
+ */
+DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
+
+/*
+ * Mask of CPUs supporting 32-bit EL0.
+ * Only valid if arm64_mismatched_32bit_el0 is enabled.
+ */
+static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly;
+
+/*
  * Flag to indicate if we have computed the system wide
  * capabilities based on the boot time active CPUs. This
  * will be used to determine if a new booting CPU should
@@ -400,6 +419,11 @@ static const struct arm64_ftr_bits ftr_dczid[] = {
        ARM64_FTR_END,
 };
 
+static const struct arm64_ftr_bits ftr_gmid[] = {
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, SYS_GMID_EL1_BS_SHIFT, 4, 0),
+       ARM64_FTR_END,
+};
+
 static const struct arm64_ftr_bits ftr_id_isar0[] = {
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DIVIDE_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DEBUG_SHIFT, 4, 0),
@@ -617,6 +641,9 @@ static const struct __ftr_reg_entry {
        /* Op1 = 0, CRn = 1, CRm = 2 */
        ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
 
+       /* Op1 = 1, CRn = 0, CRm = 0 */
+       ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid),
+
        /* Op1 = 3, CRn = 0, CRm = 0 */
        { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
        ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
@@ -767,7 +794,7 @@ static void __init sort_ftr_regs(void)
  * Any bits that are not covered by an arm64_ftr_bits entry are considered
  * RES0 for the system-wide value, and must strictly match.
  */
-static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
+static void init_cpu_ftr_reg(u32 sys_reg, u64 new)
 {
        u64 val = 0;
        u64 strict_mask = ~0x0ULL;
@@ -863,6 +890,31 @@ static void __init init_cpu_hwcaps_indirect_list(void)
 
 static void __init setup_boot_cpu_capabilities(void);
 
+static void init_32bit_cpu_features(struct cpuinfo_32bit *info)
+{
+       init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
+       init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1);
+       init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
+       init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
+       init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
+       init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
+       init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
+       init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
+       init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
+       init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
+       init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
+       init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
+       init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
+       init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4);
+       init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5);
+       init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
+       init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
+       init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2);
+       init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
+       init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
+       init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
+}
+
 void __init init_cpu_features(struct cpuinfo_arm64 *info)
 {
        /* Before we start using the tables, make sure it is sorted */
@@ -882,35 +934,17 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
        init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
        init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
 
-       if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
-               init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
-               init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1);
-               init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
-               init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
-               init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
-               init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
-               init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
-               init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
-               init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
-               init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
-               init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
-               init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
-               init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
-               init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4);
-               init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5);
-               init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
-               init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
-               init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2);
-               init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
-               init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
-               init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
-       }
+       if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
+               init_32bit_cpu_features(&info->aarch32);
 
        if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
                init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
                sve_init_vq_map();
        }
 
+       if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
+               init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
+
        /*
         * Initialize the indirect array of CPU hwcaps capabilities pointers
         * before we handle the boot CPU below.
@@ -975,21 +1009,29 @@ static void relax_cpu_ftr_reg(u32 sys_id, int field)
        WARN_ON(!ftrp->width);
 }
 
-static int update_32bit_cpu_features(int cpu, struct cpuinfo_arm64 *info,
-                                    struct cpuinfo_arm64 *boot)
+static void lazy_init_32bit_cpu_features(struct cpuinfo_arm64 *info,
+                                        struct cpuinfo_arm64 *boot)
+{
+       static bool boot_cpu_32bit_regs_overridden = false;
+
+       if (!allow_mismatched_32bit_el0 || boot_cpu_32bit_regs_overridden)
+               return;
+
+       if (id_aa64pfr0_32bit_el0(boot->reg_id_aa64pfr0))
+               return;
+
+       boot->aarch32 = info->aarch32;
+       init_32bit_cpu_features(&boot->aarch32);
+       boot_cpu_32bit_regs_overridden = true;
+}
+
+static int update_32bit_cpu_features(int cpu, struct cpuinfo_32bit *info,
+                                    struct cpuinfo_32bit *boot)
 {
        int taint = 0;
        u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
 
        /*
-        * If we don't have AArch32 at all then skip the checks entirely
-        * as the register values may be UNKNOWN and we're not going to be
-        * using them for anything.
-        */
-       if (!id_aa64pfr0_32bit_el0(pfr0))
-               return taint;
-
-       /*
         * If we don't have AArch32 at EL1, then relax the strictness of
         * EL1-dependent register fields to avoid spurious sanity check fails.
         */
@@ -1135,10 +1177,29 @@ void update_cpu_features(int cpu,
        }
 
        /*
+        * The kernel uses the LDGM/STGM instructions and the number of tags
+        * they read/write depends on the GMID_EL1.BS field. Check that the
+        * value is the same on all CPUs.
+        */
+       if (IS_ENABLED(CONFIG_ARM64_MTE) &&
+           id_aa64pfr1_mte(info->reg_id_aa64pfr1)) {
+               taint |= check_update_ftr_reg(SYS_GMID_EL1, cpu,
+                                             info->reg_gmid, boot->reg_gmid);
+       }
+
+       /*
+        * If we don't have AArch32 at all then skip the checks entirely
+        * as the register values may be UNKNOWN and we're not going to be
+        * using them for anything.
+        *
         * This relies on a sanitised view of the AArch64 ID registers
         * (e.g. SYS_ID_AA64PFR0_EL1), so we call it last.
         */
-       taint |= update_32bit_cpu_features(cpu, info, boot);
+       if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
+               lazy_init_32bit_cpu_features(info, boot);
+               taint |= update_32bit_cpu_features(cpu, &info->aarch32,
+                                                  &boot->aarch32);
+       }
 
        /*
         * Mismatched CPU features are a recipe for disaster. Don't even
@@ -1248,6 +1309,28 @@ has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
        return feature_matches(val, entry);
 }
 
+const struct cpumask *system_32bit_el0_cpumask(void)
+{
+       if (!system_supports_32bit_el0())
+               return cpu_none_mask;
+
+       if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
+               return cpu_32bit_el0_mask;
+
+       return cpu_possible_mask;
+}
+
+static bool has_32bit_el0(const struct arm64_cpu_capabilities *entry, int scope)
+{
+       if (!has_cpuid_feature(entry, scope))
+               return allow_mismatched_32bit_el0;
+
+       if (scope == SCOPE_SYSTEM)
+               pr_info("detected: 32-bit EL0 Support\n");
+
+       return true;
+}
+
 static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
 {
        bool has_sre;
@@ -1866,10 +1949,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .cpu_enable = cpu_copy_el2regs,
        },
        {
-               .desc = "32-bit EL0 Support",
-               .capability = ARM64_HAS_32BIT_EL0,
+               .capability = ARM64_HAS_32BIT_EL0_DO_NOT_USE,
                .type = ARM64_CPUCAP_SYSTEM_FEATURE,
-               .matches = has_cpuid_feature,
+               .matches = has_32bit_el0,
                .sys_reg = SYS_ID_AA64PFR0_EL1,
                .sign = FTR_UNSIGNED,
                .field_pos = ID_AA64PFR0_EL0_SHIFT,
@@ -2378,7 +2460,7 @@ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
        {},
 };
 
-static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
+static void cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
 {
        switch (cap->hwcap_type) {
        case CAP_HWCAP:
@@ -2423,7 +2505,7 @@ static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
        return rc;
 }
 
-static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
+static void setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
 {
        /* We support emulation of accesses to CPU ID feature registers */
        cpu_set_named_feature(CPUID);
@@ -2598,7 +2680,7 @@ static void check_early_cpu_features(void)
 }
 
 static void
-verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
+__verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
 {
 
        for (; caps->matches; caps++)
@@ -2609,6 +2691,14 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
                }
 }
 
+static void verify_local_elf_hwcaps(void)
+{
+       __verify_local_elf_hwcaps(arm64_elf_hwcaps);
+
+       if (id_aa64pfr0_32bit_el0(read_cpuid(ID_AA64PFR0_EL1)))
+               __verify_local_elf_hwcaps(compat_elf_hwcaps);
+}
+
 static void verify_sve_features(void)
 {
        u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
@@ -2673,11 +2763,7 @@ static void verify_local_cpu_capabilities(void)
         * on all secondary CPUs.
         */
        verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU);
-
-       verify_local_elf_hwcaps(arm64_elf_hwcaps);
-
-       if (system_supports_32bit_el0())
-               verify_local_elf_hwcaps(compat_elf_hwcaps);
+       verify_local_elf_hwcaps();
 
        if (system_supports_sve())
                verify_sve_features();
@@ -2812,6 +2898,34 @@ void __init setup_cpu_features(void)
                        ARCH_DMA_MINALIGN);
 }
 
+static int enable_mismatched_32bit_el0(unsigned int cpu)
+{
+       struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
+       bool cpu_32bit = id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0);
+
+       if (cpu_32bit) {
+               cpumask_set_cpu(cpu, cpu_32bit_el0_mask);
+               static_branch_enable_cpuslocked(&arm64_mismatched_32bit_el0);
+               setup_elf_hwcaps(compat_elf_hwcaps);
+       }
+
+       return 0;
+}
+
+static int __init init_32bit_el0_mask(void)
+{
+       if (!allow_mismatched_32bit_el0)
+               return 0;
+
+       if (!zalloc_cpumask_var(&cpu_32bit_el0_mask, GFP_KERNEL))
+               return -ENOMEM;
+
+       return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+                                "arm64/mismatched_32bit_el0:online",
+                                enable_mismatched_32bit_el0, NULL);
+}
+subsys_initcall_sync(init_32bit_el0_mask);
+
 static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
 {
        cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
@@ -2905,8 +3019,8 @@ static int emulate_mrs(struct pt_regs *regs, u32 insn)
 }
 
 static struct undef_hook mrs_hook = {
-       .instr_mask = 0xfff00000,
-       .instr_val  = 0xd5300000,
+       .instr_mask = 0xffff0000,
+       .instr_val  = 0xd5380000,
        .pstate_mask = PSR_AA32_MODE_MASK,
        .pstate_val = PSR_MODE_EL0t,
        .fn = emulate_mrs,
index 51fcf99..87731fe 100644 (file)
@@ -246,7 +246,7 @@ static struct kobj_type cpuregs_kobj_type = {
                struct cpuinfo_arm64 *info = kobj_to_cpuinfo(kobj);             \
                                                                                \
                if (info->reg_midr)                                             \
-                       return sprintf(buf, "0x%016x\n", info->reg_##_field);   \
+                       return sprintf(buf, "0x%016llx\n", info->reg_##_field); \
                else                                                            \
                        return 0;                                               \
        }                                                                       \
@@ -344,6 +344,32 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
        pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
 }
 
+static void __cpuinfo_store_cpu_32bit(struct cpuinfo_32bit *info)
+{
+       info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
+       info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1);
+       info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
+       info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
+       info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
+       info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
+       info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
+       info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
+       info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1);
+       info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
+       info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
+       info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
+       info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
+       info->reg_id_mmfr4 = read_cpuid(ID_MMFR4_EL1);
+       info->reg_id_mmfr5 = read_cpuid(ID_MMFR5_EL1);
+       info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
+       info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
+       info->reg_id_pfr2 = read_cpuid(ID_PFR2_EL1);
+
+       info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
+       info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
+       info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
+}
+
 static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
 {
        info->reg_cntfrq = arch_timer_get_cntfrq();
@@ -371,31 +397,11 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
        info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
        info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1);
 
-       /* Update the 32bit ID registers only if AArch32 is implemented */
-       if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
-               info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
-               info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1);
-               info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
-               info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
-               info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
-               info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
-               info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
-               info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
-               info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1);
-               info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
-               info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
-               info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
-               info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
-               info->reg_id_mmfr4 = read_cpuid(ID_MMFR4_EL1);
-               info->reg_id_mmfr5 = read_cpuid(ID_MMFR5_EL1);
-               info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
-               info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
-               info->reg_id_pfr2 = read_cpuid(ID_PFR2_EL1);
-
-               info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
-               info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
-               info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
-       }
+       if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
+               info->reg_gmid = read_cpuid(GMID_EL1);
+
+       if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
+               __cpuinfo_store_cpu_32bit(&info->aarch32);
 
        if (IS_ENABLED(CONFIG_ARM64_SVE) &&
            id_aa64pfr0_sve(info->reg_id_aa64pfr0))
index 0073b24..61a87fa 100644 (file)
@@ -28,7 +28,8 @@ SYM_CODE_START(efi_enter_kernel)
         * stale icache entries from before relocation.
         */
        ldr     w1, =kernel_size
-       bl      __clean_dcache_area_poc
+       add     x1, x0, x1
+       bl      dcache_clean_poc
        ic      ialluis
 
        /*
@@ -36,8 +37,8 @@ SYM_CODE_START(efi_enter_kernel)
         * so that we can safely disable the MMU and caches.
         */
        adr     x0, 0f
-       ldr     w1, 3f
-       bl      __clean_dcache_area_poc
+       adr     x1, 3f
+       bl      dcache_clean_poc
 0:
        /* Turn off Dcache and MMU */
        mrs     x0, CurrentEL
@@ -64,5 +65,5 @@ SYM_CODE_START(efi_enter_kernel)
        mov     x2, xzr
        mov     x3, xzr
        br      x19
+3:
 SYM_CODE_END(efi_enter_kernel)
-3:     .long   . - 0b
index 340d04e..12ce14a 100644 (file)
@@ -6,7 +6,11 @@
  */
 
 #include <linux/context_tracking.h>
+#include <linux/linkage.h>
+#include <linux/lockdep.h>
 #include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
 #include <linux/thread_info.h>
 
 #include <asm/cpufeature.h>
 #include <asm/exception.h>
 #include <asm/kprobes.h>
 #include <asm/mmu.h>
+#include <asm/processor.h>
+#include <asm/sdei.h>
+#include <asm/stacktrace.h>
 #include <asm/sysreg.h>
+#include <asm/system_misc.h>
 
 /*
  * This is intended to match the logic in irqentry_enter(), handling the kernel
@@ -67,7 +75,7 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
        }
 }
 
-void noinstr arm64_enter_nmi(struct pt_regs *regs)
+static void noinstr arm64_enter_nmi(struct pt_regs *regs)
 {
        regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
 
@@ -80,7 +88,7 @@ void noinstr arm64_enter_nmi(struct pt_regs *regs)
        ftrace_nmi_enter();
 }
 
-void noinstr arm64_exit_nmi(struct pt_regs *regs)
+static void noinstr arm64_exit_nmi(struct pt_regs *regs)
 {
        bool restore = regs->lockdep_hardirqs;
 
@@ -97,7 +105,7 @@ void noinstr arm64_exit_nmi(struct pt_regs *regs)
        __nmi_exit();
 }
 
-asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
+static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
 {
        if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
                arm64_enter_nmi(regs);
@@ -105,7 +113,7 @@ asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
                enter_from_kernel_mode(regs);
 }
 
-asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
+static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
 {
        if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
                arm64_exit_nmi(regs);
@@ -113,6 +121,65 @@ asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
                exit_to_kernel_mode(regs);
 }
 
+static void __sched arm64_preempt_schedule_irq(void)
+{
+       lockdep_assert_irqs_disabled();
+
+       /*
+        * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
+        * priority masking is used the GIC irqchip driver will clear DAIF.IF
+        * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
+        * DAIF we must have handled an NMI, so skip preemption.
+        */
+       if (system_uses_irq_prio_masking() && read_sysreg(daif))
+               return;
+
+       /*
+        * Preempting a task from an IRQ means we leave copies of PSTATE
+        * on the stack. cpufeature's enable calls may modify PSTATE, but
+        * resuming one of these preempted tasks would undo those changes.
+        *
+        * Only allow a task to be preempted once cpufeatures have been
+        * enabled.
+        */
+       if (system_capabilities_finalized())
+               preempt_schedule_irq();
+}
+
+static void do_interrupt_handler(struct pt_regs *regs,
+                                void (*handler)(struct pt_regs *))
+{
+       if (on_thread_stack())
+               call_on_irq_stack(regs, handler);
+       else
+               handler(regs);
+}
+
+extern void (*handle_arch_irq)(struct pt_regs *);
+extern void (*handle_arch_fiq)(struct pt_regs *);
+
+static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
+                                     unsigned int esr)
+{
+       arm64_enter_nmi(regs);
+
+       console_verbose();
+
+       pr_crit("Unhandled %s exception on CPU%d, ESR 0x%08x -- %s\n",
+               vector, smp_processor_id(), esr,
+               esr_get_class_string(esr));
+
+       __show_regs(regs);
+       panic("Unhandled exception");
+}
+
+#define UNHANDLED(el, regsize, vector)                                                 \
+asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs)      \
+{                                                                                      \
+       const char *desc = #regsize "-bit " #el " " #vector;                            \
+       __panic_unhandled(regs, desc, read_sysreg(esr_el1));                            \
+}
+
 #ifdef CONFIG_ARM64_ERRATUM_1463225
 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
 
@@ -162,6 +229,11 @@ static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
 }
 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
 
+UNHANDLED(el1t, 64, sync)
+UNHANDLED(el1t, 64, irq)
+UNHANDLED(el1t, 64, fiq)
+UNHANDLED(el1t, 64, error)
+
 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
 {
        unsigned long far = read_sysreg(far_el1);
@@ -193,15 +265,6 @@ static void noinstr el1_undef(struct pt_regs *regs)
        exit_to_kernel_mode(regs);
 }
 
-static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
-{
-       enter_from_kernel_mode(regs);
-       local_daif_inherit(regs);
-       bad_mode(regs, 0, esr);
-       local_daif_mask();
-       exit_to_kernel_mode(regs);
-}
-
 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
 {
        regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
@@ -245,7 +308,7 @@ static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
        exit_to_kernel_mode(regs);
 }
 
-asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
+asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
 {
        unsigned long esr = read_sysreg(esr_el1);
 
@@ -275,10 +338,50 @@ asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
                el1_fpac(regs, esr);
                break;
        default:
-               el1_inv(regs, esr);
+               __panic_unhandled(regs, "64-bit el1h sync", esr);
        }
 }
 
+static void noinstr el1_interrupt(struct pt_regs *regs,
+                                 void (*handler)(struct pt_regs *))
+{
+       write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
+
+       enter_el1_irq_or_nmi(regs);
+       do_interrupt_handler(regs, handler);
+
+       /*
+        * Note: thread_info::preempt_count includes both thread_info::count
+        * and thread_info::need_resched, and is not equivalent to
+        * preempt_count().
+        */
+       if (IS_ENABLED(CONFIG_PREEMPTION) &&
+           READ_ONCE(current_thread_info()->preempt_count) == 0)
+               arm64_preempt_schedule_irq();
+
+       exit_el1_irq_or_nmi(regs);
+}
+
+asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
+{
+       el1_interrupt(regs, handle_arch_irq);
+}
+
+asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
+{
+       el1_interrupt(regs, handle_arch_fiq);
+}
+
+asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
+{
+       unsigned long esr = read_sysreg(esr_el1);
+
+       local_daif_restore(DAIF_ERRCTX);
+       arm64_enter_nmi(regs);
+       do_serror(regs, esr);
+       arm64_exit_nmi(regs);
+}
+
 asmlinkage void noinstr enter_from_user_mode(void)
 {
        lockdep_hardirqs_off(CALLER_ADDR0);
@@ -398,7 +501,7 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
 
        enter_from_user_mode();
        do_debug_exception(far, esr, regs);
-       local_daif_restore(DAIF_PROCCTX_NOIRQ);
+       local_daif_restore(DAIF_PROCCTX);
 }
 
 static void noinstr el0_svc(struct pt_regs *regs)
@@ -415,7 +518,7 @@ static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
        do_ptrauth_fault(regs, esr);
 }
 
-asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
+asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
 {
        unsigned long esr = read_sysreg(esr_el1);
 
@@ -468,6 +571,56 @@ asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
        }
 }
 
+static void noinstr el0_interrupt(struct pt_regs *regs,
+                                 void (*handler)(struct pt_regs *))
+{
+       enter_from_user_mode();
+
+       write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
+
+       if (regs->pc & BIT(55))
+               arm64_apply_bp_hardening();
+
+       do_interrupt_handler(regs, handler);
+}
+
+static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
+{
+       el0_interrupt(regs, handle_arch_irq);
+}
+
+asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs)
+{
+       __el0_irq_handler_common(regs);
+}
+
+static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
+{
+       el0_interrupt(regs, handle_arch_fiq);
+}
+
+asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
+{
+       __el0_fiq_handler_common(regs);
+}
+
+static void __el0_error_handler_common(struct pt_regs *regs)
+{
+       unsigned long esr = read_sysreg(esr_el1);
+
+       enter_from_user_mode();
+       local_daif_restore(DAIF_ERRCTX);
+       arm64_enter_nmi(regs);
+       do_serror(regs, esr);
+       arm64_exit_nmi(regs);
+       local_daif_restore(DAIF_PROCCTX);
+}
+
+asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
+{
+       __el0_error_handler_common(regs);
+}
+
 #ifdef CONFIG_COMPAT
 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
 {
@@ -483,7 +636,7 @@ static void noinstr el0_svc_compat(struct pt_regs *regs)
        do_el0_svc_compat(regs);
 }
 
-asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
+asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
 {
        unsigned long esr = read_sysreg(esr_el1);
 
@@ -526,4 +679,71 @@ asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
                el0_inv(regs, esr);
        }
 }
+
+asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs)
+{
+       __el0_irq_handler_common(regs);
+}
+
+asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs)
+{
+       __el0_fiq_handler_common(regs);
+}
+
+asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs)
+{
+       __el0_error_handler_common(regs);
+}
+#else /* CONFIG_COMPAT */
+UNHANDLED(el0t, 32, sync)
+UNHANDLED(el0t, 32, irq)
+UNHANDLED(el0t, 32, fiq)
+UNHANDLED(el0t, 32, error)
 #endif /* CONFIG_COMPAT */
+
+#ifdef CONFIG_VMAP_STACK
+asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
+{
+       unsigned int esr = read_sysreg(esr_el1);
+       unsigned long far = read_sysreg(far_el1);
+
+       arm64_enter_nmi(regs);
+       panic_bad_stack(regs, esr, far);
+}
+#endif /* CONFIG_VMAP_STACK */
+
+#ifdef CONFIG_ARM_SDE_INTERFACE
+asmlinkage noinstr unsigned long
+__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
+{
+       unsigned long ret;
+
+       /*
+        * We didn't take an exception to get here, so the HW hasn't
+        * set/cleared bits in PSTATE that we may rely on.
+        *
+        * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
+        * whether PSTATE bits are inherited unchanged or generated from
+        * scratch, and the TF-A implementation always clears PAN and always
+        * clears UAO. There are no other known implementations.
+        *
+        * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
+        * PSTATE is modified upon architectural exceptions, and so PAN is
+        * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
+        * cleared.
+        *
+        * We must explicitly reset PAN to the expected state, including
+        * clearing it when the host isn't using it, in case a VM had it set.
+        */
+       if (system_uses_hw_pan())
+               set_pstate_pan(1);
+       else if (cpu_has_pan())
+               set_pstate_pan(0);
+
+       arm64_enter_nmi(regs);
+       ret = do_sdei_event(regs, arg);
+       arm64_exit_nmi(regs);
+
+       return ret;
+}
+#endif /* CONFIG_ARM_SDE_INTERFACE */
index 3ecec60..0a7a647 100644 (file)
@@ -63,16 +63,24 @@ SYM_FUNC_END(sve_set_vq)
  * and the rest zeroed. All the other SVE registers will be zeroed.
  */
 SYM_FUNC_START(sve_load_from_fpsimd_state)
-               sve_load_vq     x1, x2, x3
-               fpsimd_restore  x0, 8
- _for n, 0, 15, _sve_pfalse    \n
-               _sve_wrffr      0
-               ret
+       sve_load_vq     x1, x2, x3
+       fpsimd_restore  x0, 8
+       sve_flush_p_ffr
+       ret
 SYM_FUNC_END(sve_load_from_fpsimd_state)
 
-/* Zero all SVE registers but the first 128-bits of each vector */
+/*
+ * Zero all SVE registers but the first 128-bits of each vector
+ *
+ * VQ must already be configured by caller, any further updates of VQ
+ * will need to ensure that the register state remains valid.
+ *
+ * x0 = VQ - 1
+ */
 SYM_FUNC_START(sve_flush_live)
-       sve_flush
+       cbz             x0, 1f  // A VQ-1 of 0 is 128 bits so no extra Z state
+       sve_flush_z
+1:     sve_flush_p_ffr
        ret
 SYM_FUNC_END(sve_flush_live)
 
index 3513984..863d44f 100644 (file)
  * Context tracking and irqflag tracing need to instrument transitions between
  * user and kernel mode.
  */
-       .macro user_exit_irqoff
-#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
-       bl      enter_from_user_mode
-#endif
-       .endm
-
        .macro user_enter_irqoff
 #if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
        bl      exit_to_user_mode
        .endr
        .endm
 
-/*
- * Bad Abort numbers
- *-----------------
- */
-#define BAD_SYNC       0
-#define BAD_IRQ                1
-#define BAD_FIQ                2
-#define BAD_ERROR      3
-
-       .macro kernel_ventry, el, label, regsize = 64
+       .macro kernel_ventry, el:req, ht:req, regsize:req, label:req
        .align 7
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
        .if     \el == 0
@@ -87,7 +72,7 @@ alternative_else_nop_endif
        tbnz    x0, #THREAD_SHIFT, 0f
        sub     x0, sp, x0                      // x0'' = sp' - x0' = (sp + x0) - sp = x0
        sub     sp, sp, x0                      // sp'' = sp' - x0 = (sp + x0) - x0 = sp
-       b       el\()\el\()_\label
+       b       el\el\ht\()_\regsize\()_\label
 
 0:
        /*
@@ -119,7 +104,7 @@ alternative_else_nop_endif
        sub     sp, sp, x0
        mrs     x0, tpidrro_el0
 #endif
-       b       el\()\el\()_\label
+       b       el\el\ht\()_\regsize\()_\label
        .endm
 
        .macro tramp_alias, dst, sym
@@ -275,7 +260,7 @@ alternative_else_nop_endif
 
        mte_set_kernel_gcr x22, x23
 
-       scs_load tsk, x20
+       scs_load tsk
        .else
        add     x21, sp, #PT_REGS_SIZE
        get_current_task tsk
@@ -285,7 +270,7 @@ alternative_else_nop_endif
        stp     lr, x21, [sp, #S_LR]
 
        /*
-        * For exceptions from EL0, create a terminal frame record.
+        * For exceptions from EL0, create a final frame record.
         * For exceptions from EL1, create a synthetic frame record so the
         * interrupted code shows up in the backtrace.
         */
@@ -375,7 +360,7 @@ alternative_if ARM64_WORKAROUND_845719
 alternative_else_nop_endif
 #endif
 3:
-       scs_save tsk, x0
+       scs_save tsk
 
 #ifdef CONFIG_ARM64_PTR_AUTH
 alternative_if ARM64_HAS_ADDRESS_AUTH
@@ -486,63 +471,12 @@ SYM_CODE_START_LOCAL(__swpan_exit_el0)
 SYM_CODE_END(__swpan_exit_el0)
 #endif
 
-       .macro  irq_stack_entry
-       mov     x19, sp                 // preserve the original sp
-#ifdef CONFIG_SHADOW_CALL_STACK
-       mov     x24, scs_sp             // preserve the original shadow stack
-#endif
-
-       /*
-        * Compare sp with the base of the task stack.
-        * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
-        * and should switch to the irq stack.
-        */
-       ldr     x25, [tsk, TSK_STACK]
-       eor     x25, x25, x19
-       and     x25, x25, #~(THREAD_SIZE - 1)
-       cbnz    x25, 9998f
-
-       ldr_this_cpu x25, irq_stack_ptr, x26
-       mov     x26, #IRQ_STACK_SIZE
-       add     x26, x25, x26
-
-       /* switch to the irq stack */
-       mov     sp, x26
-
-#ifdef CONFIG_SHADOW_CALL_STACK
-       /* also switch to the irq shadow stack */
-       ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26
-#endif
-
-9998:
-       .endm
-
-       /*
-        * The callee-saved regs (x19-x29) should be preserved between
-        * irq_stack_entry and irq_stack_exit, but note that kernel_entry
-        * uses x20-x23 to store data for later use.
-        */
-       .macro  irq_stack_exit
-       mov     sp, x19
-#ifdef CONFIG_SHADOW_CALL_STACK
-       mov     scs_sp, x24
-#endif
-       .endm
-
 /* GPRs used by entry code */
 tsk    .req    x28             // current thread_info
 
 /*
  * Interrupt handling.
  */
-       .macro  irq_handler, handler:req
-       ldr_l   x1, \handler
-       mov     x0, sp
-       irq_stack_entry
-       blr     x1
-       irq_stack_exit
-       .endm
-
        .macro  gic_prio_kentry_setup, tmp:req
 #ifdef CONFIG_ARM64_PSEUDO_NMI
        alternative_if ARM64_HAS_IRQ_PRIO_MASKING
@@ -552,45 +486,6 @@ tsk        .req    x28             // current thread_info
 #endif
        .endm
 
-       .macro el1_interrupt_handler, handler:req
-       enable_da
-
-       mov     x0, sp
-       bl      enter_el1_irq_or_nmi
-
-       irq_handler     \handler
-
-#ifdef CONFIG_PREEMPTION
-       ldr     x24, [tsk, #TSK_TI_PREEMPT]     // get preempt count
-alternative_if ARM64_HAS_IRQ_PRIO_MASKING
-       /*
-        * DA were cleared at start of handling, and IF are cleared by
-        * the GIC irqchip driver using gic_arch_enable_irqs() for
-        * normal IRQs. If anything is set, it means we come back from
-        * an NMI instead of a normal IRQ, so skip preemption
-        */
-       mrs     x0, daif
-       orr     x24, x24, x0
-alternative_else_nop_endif
-       cbnz    x24, 1f                         // preempt count != 0 || NMI return path
-       bl      arm64_preempt_schedule_irq      // irq en/disable is done inside
-1:
-#endif
-
-       mov     x0, sp
-       bl      exit_el1_irq_or_nmi
-       .endm
-
-       .macro el0_interrupt_handler, handler:req
-       user_exit_irqoff
-       enable_da
-
-       tbz     x22, #55, 1f
-       bl      do_el0_irq_bp_hardening
-1:
-       irq_handler     \handler
-       .endm
-
        .text
 
 /*
@@ -600,32 +495,25 @@ alternative_else_nop_endif
 
        .align  11
 SYM_CODE_START(vectors)
-       kernel_ventry   1, sync_invalid                 // Synchronous EL1t
-       kernel_ventry   1, irq_invalid                  // IRQ EL1t
-       kernel_ventry   1, fiq_invalid                  // FIQ EL1t
-       kernel_ventry   1, error_invalid                // Error EL1t
-
-       kernel_ventry   1, sync                         // Synchronous EL1h
-       kernel_ventry   1, irq                          // IRQ EL1h
-       kernel_ventry   1, fiq                          // FIQ EL1h
-       kernel_ventry   1, error                        // Error EL1h
-
-       kernel_ventry   0, sync                         // Synchronous 64-bit EL0
-       kernel_ventry   0, irq                          // IRQ 64-bit EL0
-       kernel_ventry   0, fiq                          // FIQ 64-bit EL0
-       kernel_ventry   0, error                        // Error 64-bit EL0
-
-#ifdef CONFIG_COMPAT
-       kernel_ventry   0, sync_compat, 32              // Synchronous 32-bit EL0
-       kernel_ventry   0, irq_compat, 32               // IRQ 32-bit EL0
-       kernel_ventry   0, fiq_compat, 32               // FIQ 32-bit EL0
-       kernel_ventry   0, error_compat, 32             // Error 32-bit EL0
-#else
-       kernel_ventry   0, sync_invalid, 32             // Synchronous 32-bit EL0
-       kernel_ventry   0, irq_invalid, 32              // IRQ 32-bit EL0
-       kernel_ventry   0, fiq_invalid, 32              // FIQ 32-bit EL0
-       kernel_ventry   0, error_invalid, 32            // Error 32-bit EL0
-#endif
+       kernel_ventry   1, t, 64, sync          // Synchronous EL1t
+       kernel_ventry   1, t, 64, irq           // IRQ EL1t
+       kernel_ventry   1, t, 64, fiq           // FIQ EL1h
+       kernel_ventry   1, t, 64, error         // Error EL1t
+
+       kernel_ventry   1, h, 64, sync          // Synchronous EL1h
+       kernel_ventry   1, h, 64, irq           // IRQ EL1h
+       kernel_ventry   1, h, 64, fiq           // FIQ EL1h
+       kernel_ventry   1, h, 64, error         // Error EL1h
+
+       kernel_ventry   0, t, 64, sync          // Synchronous 64-bit EL0
+       kernel_ventry   0, t, 64, irq           // IRQ 64-bit EL0
+       kernel_ventry   0, t, 64, fiq           // FIQ 64-bit EL0
+       kernel_ventry   0, t, 64, error         // Error 64-bit EL0
+
+       kernel_ventry   0, t, 32, sync          // Synchronous 32-bit EL0
+       kernel_ventry   0, t, 32, irq           // IRQ 32-bit EL0
+       kernel_ventry   0, t, 32, fiq           // FIQ 32-bit EL0
+       kernel_ventry   0, t, 32, error         // Error 32-bit EL0
 SYM_CODE_END(vectors)
 
 #ifdef CONFIG_VMAP_STACK
@@ -656,147 +544,46 @@ __bad_stack:
        ASM_BUG()
 #endif /* CONFIG_VMAP_STACK */
 
-/*
- * Invalid mode handlers
- */
-       .macro  inv_entry, el, reason, regsize = 64
+
+       .macro entry_handler el:req, ht:req, regsize:req, label:req
+SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label)
        kernel_entry \el, \regsize
        mov     x0, sp
-       mov     x1, #\reason
-       mrs     x2, esr_el1
-       bl      bad_mode
-       ASM_BUG()
+       bl      el\el\ht\()_\regsize\()_\label\()_handler
+       .if \el == 0
+       b       ret_to_user
+       .else
+       b       ret_to_kernel
+       .endif
+SYM_CODE_END(el\el\ht\()_\regsize\()_\label)
        .endm
 
-SYM_CODE_START_LOCAL(el0_sync_invalid)
-       inv_entry 0, BAD_SYNC
-SYM_CODE_END(el0_sync_invalid)
-
-SYM_CODE_START_LOCAL(el0_irq_invalid)
-       inv_entry 0, BAD_IRQ
-SYM_CODE_END(el0_irq_invalid)
-
-SYM_CODE_START_LOCAL(el0_fiq_invalid)
-       inv_entry 0, BAD_FIQ
-SYM_CODE_END(el0_fiq_invalid)
-
-SYM_CODE_START_LOCAL(el0_error_invalid)
-       inv_entry 0, BAD_ERROR
-SYM_CODE_END(el0_error_invalid)
-
-SYM_CODE_START_LOCAL(el1_sync_invalid)
-       inv_entry 1, BAD_SYNC
-SYM_CODE_END(el1_sync_invalid)
-
-SYM_CODE_START_LOCAL(el1_irq_invalid)
-       inv_entry 1, BAD_IRQ
-SYM_CODE_END(el1_irq_invalid)
-
-SYM_CODE_START_LOCAL(el1_fiq_invalid)
-       inv_entry 1, BAD_FIQ
-SYM_CODE_END(el1_fiq_invalid)
-
-SYM_CODE_START_LOCAL(el1_error_invalid)
-       inv_entry 1, BAD_ERROR
-SYM_CODE_END(el1_error_invalid)
-
 /*
- * EL1 mode handlers.
+ * Early exception handlers
  */
-       .align  6
-SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
-       kernel_entry 1
-       mov     x0, sp
-       bl      el1_sync_handler
-       kernel_exit 1
-SYM_CODE_END(el1_sync)
-
-       .align  6
-SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
-       kernel_entry 1
-       el1_interrupt_handler handle_arch_irq
-       kernel_exit 1
-SYM_CODE_END(el1_irq)
-
-SYM_CODE_START_LOCAL_NOALIGN(el1_fiq)
-       kernel_entry 1
-       el1_interrupt_handler handle_arch_fiq
-       kernel_exit 1
-SYM_CODE_END(el1_fiq)
-
-/*
- * EL0 mode handlers.
- */
-       .align  6
-SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
-       kernel_entry 0
-       mov     x0, sp
-       bl      el0_sync_handler
-       b       ret_to_user
-SYM_CODE_END(el0_sync)
-
-#ifdef CONFIG_COMPAT
-       .align  6
-SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
-       kernel_entry 0, 32
-       mov     x0, sp
-       bl      el0_sync_compat_handler
-       b       ret_to_user
-SYM_CODE_END(el0_sync_compat)
-
-       .align  6
-SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
-       kernel_entry 0, 32
-       b       el0_irq_naked
-SYM_CODE_END(el0_irq_compat)
-
-SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat)
-       kernel_entry 0, 32
-       b       el0_fiq_naked
-SYM_CODE_END(el0_fiq_compat)
-
-SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
-       kernel_entry 0, 32
-       b       el0_error_naked
-SYM_CODE_END(el0_error_compat)
-#endif
-
-       .align  6
-SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
-       kernel_entry 0
-el0_irq_naked:
-       el0_interrupt_handler handle_arch_irq
-       b       ret_to_user
-SYM_CODE_END(el0_irq)
-
-SYM_CODE_START_LOCAL_NOALIGN(el0_fiq)
-       kernel_entry 0
-el0_fiq_naked:
-       el0_interrupt_handler handle_arch_fiq
-       b       ret_to_user
-SYM_CODE_END(el0_fiq)
-
-SYM_CODE_START_LOCAL(el1_error)
-       kernel_entry 1
-       mrs     x1, esr_el1
-       enable_dbg
-       mov     x0, sp
-       bl      do_serror
+       entry_handler   1, t, 64, sync
+       entry_handler   1, t, 64, irq
+       entry_handler   1, t, 64, fiq
+       entry_handler   1, t, 64, error
+
+       entry_handler   1, h, 64, sync
+       entry_handler   1, h, 64, irq
+       entry_handler   1, h, 64, fiq
+       entry_handler   1, h, 64, error
+
+       entry_handler   0, t, 64, sync
+       entry_handler   0, t, 64, irq
+       entry_handler   0, t, 64, fiq
+       entry_handler   0, t, 64, error
+
+       entry_handler   0, t, 32, sync
+       entry_handler   0, t, 32, irq
+       entry_handler   0, t, 32, fiq
+       entry_handler   0, t, 32, error
+
+SYM_CODE_START_LOCAL(ret_to_kernel)
        kernel_exit 1
-SYM_CODE_END(el1_error)
-
-SYM_CODE_START_LOCAL(el0_error)
-       kernel_entry 0
-el0_error_naked:
-       mrs     x25, esr_el1
-       user_exit_irqoff
-       enable_dbg
-       mov     x0, sp
-       mov     x1, x25
-       bl      do_serror
-       enable_da
-       b       ret_to_user
-SYM_CODE_END(el0_error)
+SYM_CODE_END(ret_to_kernel)
 
 /*
  * "slow" syscall return path.
@@ -979,8 +766,8 @@ SYM_FUNC_START(cpu_switch_to)
        mov     sp, x9
        msr     sp_el0, x1
        ptrauth_keys_install_kernel x1, x8, x9, x10
-       scs_save x0, x8
-       scs_load x1, x8
+       scs_save x0
+       scs_load x1
        ret
 SYM_FUNC_END(cpu_switch_to)
 NOKPROBE(cpu_switch_to)
@@ -998,6 +785,42 @@ SYM_CODE_START(ret_from_fork)
 SYM_CODE_END(ret_from_fork)
 NOKPROBE(ret_from_fork)
 
+/*
+ * void call_on_irq_stack(struct pt_regs *regs,
+ *                       void (*func)(struct pt_regs *));
+ *
+ * Calls func(regs) using this CPU's irq stack and shadow irq stack.
+ */
+SYM_FUNC_START(call_on_irq_stack)
+#ifdef CONFIG_SHADOW_CALL_STACK
+       stp     scs_sp, xzr, [sp, #-16]!
+       ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17
+#endif
+       /* Create a frame record to save our LR and SP (implicit in FP) */
+       stp     x29, x30, [sp, #-16]!
+       mov     x29, sp
+
+       ldr_this_cpu x16, irq_stack_ptr, x17
+       mov     x15, #IRQ_STACK_SIZE
+       add     x16, x16, x15
+
+       /* Move to the new stack and call the function there */
+       mov     sp, x16
+       blr     x1
+
+       /*
+        * Restore the SP from the FP, and restore the FP and LR from the frame
+        * record.
+        */
+       mov     sp, x29
+       ldp     x29, x30, [sp], #16
+#ifdef CONFIG_SHADOW_CALL_STACK
+       ldp     scs_sp, xzr, [sp], #16
+#endif
+       ret
+SYM_FUNC_END(call_on_irq_stack)
+NOKPROBE(call_on_irq_stack)
+
 #ifdef CONFIG_ARM_SDE_INTERFACE
 
 #include <asm/sdei.h>
index ad3dd34..e57b23f 100644 (file)
@@ -957,8 +957,10 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs)
         * disabling the trap, otherwise update our in-memory copy.
         */
        if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
-               sve_set_vq(sve_vq_from_vl(current->thread.sve_vl) - 1);
-               sve_flush_live();
+               unsigned long vq_minus_one =
+                       sve_vq_from_vl(current->thread.sve_vl) - 1;
+               sve_set_vq(vq_minus_one);
+               sve_flush_live(vq_minus_one);
                fpsimd_bind_task_to_cpu();
        } else {
                fpsimd_to_sve(current);
index b5d3dda..7f467bd 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/debug-monitors.h>
 #include <asm/ftrace.h>
 #include <asm/insn.h>
+#include <asm/patching.h>
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 /*
index 96873df..c5c994a 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/asm_pointer_auth.h>
 #include <asm/assembler.h>
 #include <asm/boot.h>
+#include <asm/bug.h>
 #include <asm/ptrace.h>
 #include <asm/asm-offsets.h>
 #include <asm/cache.h>
@@ -117,8 +118,8 @@ SYM_CODE_START_LOCAL(preserve_boot_args)
        dmb     sy                              // needed before dc ivac with
                                                // MMU off
 
-       mov     x1, #0x20                       // 4 x 8 bytes
-       b       __inval_dcache_area             // tail call
+       add     x1, x0, #0x20                   // 4 x 8 bytes
+       b       dcache_inval_poc                // tail call
 SYM_CODE_END(preserve_boot_args)
 
 /*
@@ -195,7 +196,7 @@ SYM_CODE_END(preserve_boot_args)
        and     \iend, \iend, \istart   // iend = (vend >> shift) & (ptrs - 1)
        mov     \istart, \ptrs
        mul     \istart, \istart, \count
-       add     \iend, \iend, \istart   // iend += (count - 1) * ptrs
+       add     \iend, \iend, \istart   // iend += count * ptrs
                                        // our entries span multiple tables
 
        lsr     \istart, \vstart, \shift
@@ -268,8 +269,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
         */
        adrp    x0, init_pg_dir
        adrp    x1, init_pg_end
-       sub     x1, x1, x0
-       bl      __inval_dcache_area
+       bl      dcache_inval_poc
 
        /*
         * Clear the init page tables.
@@ -354,7 +354,6 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
 #endif
 1:
        ldr_l   x4, idmap_ptrs_per_pgd
-       mov     x5, x3                          // __pa(__idmap_text_start)
        adr_l   x6, __idmap_text_end            // __pa(__idmap_text_end)
 
        map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
@@ -382,39 +381,57 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
 
        adrp    x0, idmap_pg_dir
        adrp    x1, idmap_pg_end
-       sub     x1, x1, x0
-       bl      __inval_dcache_area
+       bl      dcache_inval_poc
 
        adrp    x0, init_pg_dir
        adrp    x1, init_pg_end
-       sub     x1, x1, x0
-       bl      __inval_dcache_area
+       bl      dcache_inval_poc
 
        ret     x28
 SYM_FUNC_END(__create_page_tables)
 
+       /*
+        * Initialize CPU registers with task-specific and cpu-specific context.
+        *
+        * Create a final frame record at task_pt_regs(current)->stackframe, so
+        * that the unwinder can identify the final frame record of any task by
+        * its location in the task stack. We reserve the entire pt_regs space
+        * for consistency with user tasks and kthreads.
+        */
+       .macro  init_cpu_task tsk, tmp1, tmp2
+       msr     sp_el0, \tsk
+
+       ldr     \tmp1, [\tsk, #TSK_STACK]
+       add     sp, \tmp1, #THREAD_SIZE
+       sub     sp, sp, #PT_REGS_SIZE
+
+       stp     xzr, xzr, [sp, #S_STACKFRAME]
+       add     x29, sp, #S_STACKFRAME
+
+       scs_load \tsk
+
+       adr_l   \tmp1, __per_cpu_offset
+       ldr     w\tmp2, [\tsk, #TSK_CPU]
+       ldr     \tmp1, [\tmp1, \tmp2, lsl #3]
+       set_this_cpu_offset \tmp1
+       .endm
+
 /*
  * The following fragment of code is executed with the MMU enabled.
  *
  *   x0 = __PHYS_OFFSET
  */
 SYM_FUNC_START_LOCAL(__primary_switched)
-       adrp    x4, init_thread_union
-       add     sp, x4, #THREAD_SIZE
-       adr_l   x5, init_task
-       msr     sp_el0, x5                      // Save thread_info
+       adr_l   x4, init_task
+       init_cpu_task x4, x5, x6
 
        adr_l   x8, vectors                     // load VBAR_EL1 with virtual
        msr     vbar_el1, x8                    // vector table address
        isb
 
-       stp     xzr, x30, [sp, #-16]!
+       stp     x29, x30, [sp, #-16]!
        mov     x29, sp
 
-#ifdef CONFIG_SHADOW_CALL_STACK
-       adr_l   scs_sp, init_shadow_call_stack  // Set shadow call stack
-#endif
-
        str_l   x21, __fdt_pointer, x5          // Save FDT pointer
 
        ldr_l   x4, kimage_vaddr                // Save the offset between
@@ -446,10 +463,9 @@ SYM_FUNC_START_LOCAL(__primary_switched)
 0:
 #endif
        bl      switch_to_vhe                   // Prefer VHE if possible
-       add     sp, sp, #16
-       mov     x29, #0
-       mov     x30, #0
-       b       start_kernel
+       ldp     x29, x30, [sp], #16
+       bl      start_kernel
+       ASM_BUG()
 SYM_FUNC_END(__primary_switched)
 
        .pushsection ".rodata", "a"
@@ -551,7 +567,7 @@ SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
        cmp     w0, #BOOT_CPU_MODE_EL2
        b.ne    1f
        add     x1, x1, #4
-1:     str     w0, [x1]                        // This CPU has booted in EL1
+1:     str     w0, [x1]                        // Save CPU boot mode
        dmb     sy
        dc      ivac, x1                        // Invalidate potentially stale cache line
        ret
@@ -632,21 +648,17 @@ SYM_FUNC_START_LOCAL(__secondary_switched)
        isb
 
        adr_l   x0, secondary_data
-       ldr     x1, [x0, #CPU_BOOT_STACK]       // get secondary_data.stack
-       cbz     x1, __secondary_too_slow
-       mov     sp, x1
        ldr     x2, [x0, #CPU_BOOT_TASK]
        cbz     x2, __secondary_too_slow
-       msr     sp_el0, x2
-       scs_load x2, x3
-       mov     x29, #0
-       mov     x30, #0
+
+       init_cpu_task x2, x1, x3
 
 #ifdef CONFIG_ARM64_PTR_AUTH
        ptrauth_keys_init_cpu x2, x3, x4, x5
 #endif
 
-       b       secondary_start_kernel
+       bl      secondary_start_kernel
+       ASM_BUG()
 SYM_FUNC_END(__secondary_switched)
 
 SYM_FUNC_START_LOCAL(__secondary_too_slow)
index 8ccca66..81c0186 100644 (file)
@@ -45,7 +45,7 @@
  * Because this code has to be copied to a 'safe' page, it can't call out to
  * other functions by PC-relative address. Also remember that it may be
  * mid-way through over-writing other functions. For this reason it contains
- * code from flush_icache_range() and uses the copy_page() macro.
+ * code from caches_clean_inval_pou() and uses the copy_page() macro.
  *
  * This 'safe' page is mapped via ttbr0, and executed from there. This function
  * switches to a copy of the linear map in ttbr1, performs the restore, then
@@ -87,11 +87,12 @@ SYM_CODE_START(swsusp_arch_suspend_exit)
        copy_page       x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
 
        add     x1, x10, #PAGE_SIZE
-       /* Clean the copied page to PoU - based on flush_icache_range() */
+       /* Clean the copied page to PoU - based on caches_clean_inval_pou() */
        raw_dcache_line_size x2, x3
        sub     x3, x2, #1
        bic     x4, x10, x3
-2:     dc      cvau, x4        /* clean D line / unified line */
+2:     /* clean D line / unified line */
+alternative_insn "dc cvau, x4",  "dc civac, x4",  ARM64_WORKAROUND_CLEAN_CACHE
        add     x4, x4, x2
        cmp     x4, x1
        b.lo    2b
index b1cef37..46a0b4d 100644 (file)
@@ -210,7 +210,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
                return -ENOMEM;
 
        memcpy(page, src_start, length);
-       __flush_icache_range((unsigned long)page, (unsigned long)page + length);
+       caches_clean_inval_pou((unsigned long)page, (unsigned long)page + length);
        rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page);
        if (rc)
                return rc;
@@ -240,8 +240,6 @@ static int create_safe_exec_page(void *src_start, size_t length,
        return 0;
 }
 
-#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
-
 #ifdef CONFIG_ARM64_MTE
 
 static DEFINE_XARRAY(mte_pages);
@@ -383,13 +381,18 @@ int swsusp_arch_suspend(void)
                ret = swsusp_save();
        } else {
                /* Clean kernel core startup/idle code to PoC*/
-               dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end);
-               dcache_clean_range(__idmap_text_start, __idmap_text_end);
+               dcache_clean_inval_poc((unsigned long)__mmuoff_data_start,
+                                   (unsigned long)__mmuoff_data_end);
+               dcache_clean_inval_poc((unsigned long)__idmap_text_start,
+                                   (unsigned long)__idmap_text_end);
 
                /* Clean kvm setup code to PoC? */
                if (el2_reset_needed()) {
-                       dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
-                       dcache_clean_range(__hyp_text_start, __hyp_text_end);
+                       dcache_clean_inval_poc(
+                               (unsigned long)__hyp_idmap_text_start,
+                               (unsigned long)__hyp_idmap_text_end);
+                       dcache_clean_inval_poc((unsigned long)__hyp_text_start,
+                                           (unsigned long)__hyp_text_end);
                }
 
                swsusp_mte_restore_tags();
@@ -474,7 +477,8 @@ int swsusp_arch_resume(void)
         * The hibernate exit text contains a set of el2 vectors, that will
         * be executed at el2 with the mmu off in order to reload hyp-stub.
         */
-       __flush_dcache_area(hibernate_exit, exit_size);
+       dcache_clean_inval_poc((unsigned long)hibernate_exit,
+                           (unsigned long)hibernate_exit + exit_size);
 
        /*
         * KASLR will cause the el2 vectors to be in a different location in
diff --git a/arch/arm64/kernel/idle.c b/arch/arm64/kernel/idle.c
new file mode 100644 (file)
index 0000000..a2cfbac
--- /dev/null
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Low-level idle sequences
+ */
+
+#include <linux/cpu.h>
+#include <linux/irqflags.h>
+
+#include <asm/barrier.h>
+#include <asm/cpuidle.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
+
+/*
+ *     cpu_do_idle()
+ *
+ *     Idle the processor (wait for interrupt).
+ *
+ *     If the CPU supports priority masking we must do additional work to
+ *     ensure that interrupts are not masked at the PMR (because the core will
+ *     not wake up if we block the wake up signal in the interrupt controller).
+ */
+void noinstr cpu_do_idle(void)
+{
+       struct arm_cpuidle_irq_context context;
+
+       arm_cpuidle_save_irq_context(&context);
+
+       dsb(sy);
+       wfi();
+
+       arm_cpuidle_restore_irq_context(&context);
+}
+
+/*
+ * This is our default idle handler.
+ */
+void noinstr arch_cpu_idle(void)
+{
+       /*
+        * This should do all the clock switching and wait for interrupt
+        * tricks
+        */
+       cpu_do_idle();
+       raw_local_irq_enable();
+}
index e628c8c..53a381a 100644 (file)
@@ -237,7 +237,8 @@ asmlinkage void __init init_feature_override(void)
 
        for (i = 0; i < ARRAY_SIZE(regs); i++) {
                if (regs[i]->override)
-                       __flush_dcache_area(regs[i]->override,
+                       dcache_clean_inval_poc((unsigned long)regs[i]->override,
+                                           (unsigned long)regs[i]->override +
                                            sizeof(*regs[i]->override));
        }
 }
index bcf3c27..c96a9a0 100644 (file)
@@ -35,7 +35,7 @@ __efistub_strnlen             = __pi_strnlen;
 __efistub_strcmp               = __pi_strcmp;
 __efistub_strncmp              = __pi_strncmp;
 __efistub_strrchr              = __pi_strrchr;
-__efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc;
+__efistub_dcache_clean_poc = __pi_dcache_clean_poc;
 
 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
 __efistub___memcpy             = __pi_memcpy;
index 9a8a0ae..fc98037 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/kernel.h>
 #include <linux/jump_label.h>
 #include <asm/insn.h>
+#include <asm/patching.h>
 
 void arch_jump_label_transform(struct jump_entry *entry,
                               enum jump_label_type type)
index 341342b..cfa2cfd 100644 (file)
@@ -72,7 +72,9 @@ u64 __init kaslr_early_init(void)
         * we end up running with module randomization disabled.
         */
        module_alloc_base = (u64)_etext - MODULES_VSIZE;
-       __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
+       dcache_clean_inval_poc((unsigned long)&module_alloc_base,
+                           (unsigned long)&module_alloc_base +
+                                   sizeof(module_alloc_base));
 
        /*
         * Try to map the FDT early. If this fails, we simply bail,
@@ -170,8 +172,12 @@ u64 __init kaslr_early_init(void)
        module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
        module_alloc_base &= PAGE_MASK;
 
-       __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
-       __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
+       dcache_clean_inval_poc((unsigned long)&module_alloc_base,
+                           (unsigned long)&module_alloc_base +
+                                   sizeof(module_alloc_base));
+       dcache_clean_inval_poc((unsigned long)&memstart_offset_seed,
+                           (unsigned long)&memstart_offset_seed +
+                                   sizeof(memstart_offset_seed));
 
        return offset;
 }
index 1a157ca..2aede78 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <asm/debug-monitors.h>
 #include <asm/insn.h>
+#include <asm/patching.h>
 #include <asm/traps.h>
 
 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
index 90a335c..03ceabe 100644 (file)
@@ -68,10 +68,16 @@ int machine_kexec_post_load(struct kimage *kimage)
        kimage->arch.kern_reloc = __pa(reloc_code);
        kexec_image_info(kimage);
 
-       /* Flush the reloc_code in preparation for its execution. */
-       __flush_dcache_area(reloc_code, arm64_relocate_new_kernel_size);
-       flush_icache_range((uintptr_t)reloc_code, (uintptr_t)reloc_code +
-                          arm64_relocate_new_kernel_size);
+       /*
+        * For execution with the MMU off, reloc_code needs to be cleaned to the
+        * PoC and invalidated from the I-cache.
+        */
+       dcache_clean_inval_poc((unsigned long)reloc_code,
+                           (unsigned long)reloc_code +
+                                   arm64_relocate_new_kernel_size);
+       icache_inval_pou((uintptr_t)reloc_code,
+                               (uintptr_t)reloc_code +
+                                       arm64_relocate_new_kernel_size);
 
        return 0;
 }
@@ -102,16 +108,18 @@ static void kexec_list_flush(struct kimage *kimage)
 
        for (entry = &kimage->head; ; entry++) {
                unsigned int flag;
-               void *addr;
+               unsigned long addr;
 
                /* flush the list entries. */
-               __flush_dcache_area(entry, sizeof(kimage_entry_t));
+               dcache_clean_inval_poc((unsigned long)entry,
+                                   (unsigned long)entry +
+                                           sizeof(kimage_entry_t));
 
                flag = *entry & IND_FLAGS;
                if (flag == IND_DONE)
                        break;
 
-               addr = phys_to_virt(*entry & PAGE_MASK);
+               addr = (unsigned long)phys_to_virt(*entry & PAGE_MASK);
 
                switch (flag) {
                case IND_INDIRECTION:
@@ -120,7 +128,7 @@ static void kexec_list_flush(struct kimage *kimage)
                        break;
                case IND_SOURCE:
                        /* flush the source pages. */
-                       __flush_dcache_area(addr, PAGE_SIZE);
+                       dcache_clean_inval_poc(addr, addr + PAGE_SIZE);
                        break;
                case IND_DESTINATION:
                        break;
@@ -147,8 +155,10 @@ static void kexec_segment_flush(const struct kimage *kimage)
                        kimage->segment[i].memsz,
                        kimage->segment[i].memsz /  PAGE_SIZE);
 
-               __flush_dcache_area(phys_to_virt(kimage->segment[i].mem),
-                       kimage->segment[i].memsz);
+               dcache_clean_inval_poc(
+                       (unsigned long)phys_to_virt(kimage->segment[i].mem),
+                       (unsigned long)phys_to_virt(kimage->segment[i].mem) +
+                               kimage->segment[i].memsz);
        }
 }
 
diff --git a/arch/arm64/kernel/patching.c b/arch/arm64/kernel/patching.c
new file mode 100644 (file)
index 0000000..771f543
--- /dev/null
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/stop_machine.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/fixmap.h>
+#include <asm/insn.h>
+#include <asm/kprobes.h>
+#include <asm/patching.h>
+#include <asm/sections.h>
+
+static DEFINE_RAW_SPINLOCK(patch_lock);
+
+static bool is_exit_text(unsigned long addr)
+{
+       /* discarded with init text/data */
+       return system_state < SYSTEM_RUNNING &&
+               addr >= (unsigned long)__exittext_begin &&
+               addr < (unsigned long)__exittext_end;
+}
+
+static bool is_image_text(unsigned long addr)
+{
+       return core_kernel_text(addr) || is_exit_text(addr);
+}
+
+static void __kprobes *patch_map(void *addr, int fixmap)
+{
+       unsigned long uintaddr = (uintptr_t) addr;
+       bool image = is_image_text(uintaddr);
+       struct page *page;
+
+       if (image)
+               page = phys_to_page(__pa_symbol(addr));
+       else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
+               page = vmalloc_to_page(addr);
+       else
+               return addr;
+
+       BUG_ON(!page);
+       return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
+                       (uintaddr & ~PAGE_MASK));
+}
+
+static void __kprobes patch_unmap(int fixmap)
+{
+       clear_fixmap(fixmap);
+}
+/*
+ * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
+ * little-endian.
+ */
+int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
+{
+       int ret;
+       __le32 val;
+
+       ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
+       if (!ret)
+               *insnp = le32_to_cpu(val);
+
+       return ret;
+}
+
+static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
+{
+       void *waddr = addr;
+       unsigned long flags = 0;
+       int ret;
+
+       raw_spin_lock_irqsave(&patch_lock, flags);
+       waddr = patch_map(addr, FIX_TEXT_POKE0);
+
+       ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
+
+       patch_unmap(FIX_TEXT_POKE0);
+       raw_spin_unlock_irqrestore(&patch_lock, flags);
+
+       return ret;
+}
+
+int __kprobes aarch64_insn_write(void *addr, u32 insn)
+{
+       return __aarch64_insn_write(addr, cpu_to_le32(insn));
+}
+
+int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
+{
+       u32 *tp = addr;
+       int ret;
+
+       /* A64 instructions must be word aligned */
+       if ((uintptr_t)tp & 0x3)
+               return -EINVAL;
+
+       ret = aarch64_insn_write(tp, insn);
+       if (ret == 0)
+               caches_clean_inval_pou((uintptr_t)tp,
+                                    (uintptr_t)tp + AARCH64_INSN_SIZE);
+
+       return ret;
+}
+
+struct aarch64_insn_patch {
+       void            **text_addrs;
+       u32             *new_insns;
+       int             insn_cnt;
+       atomic_t        cpu_count;
+};
+
+static int __kprobes aarch64_insn_patch_text_cb(void *arg)
+{
+       int i, ret = 0;
+       struct aarch64_insn_patch *pp = arg;
+
+       /* The first CPU becomes master */
+       if (atomic_inc_return(&pp->cpu_count) == 1) {
+               for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
+                       ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
+                                                            pp->new_insns[i]);
+               /* Notify other processors with an additional increment. */
+               atomic_inc(&pp->cpu_count);
+       } else {
+               while (atomic_read(&pp->cpu_count) <= num_online_cpus())
+                       cpu_relax();
+               isb();
+       }
+
+       return ret;
+}
+
+int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
+{
+       struct aarch64_insn_patch patch = {
+               .text_addrs = addrs,
+               .new_insns = insns,
+               .insn_cnt = cnt,
+               .cpu_count = ATOMIC_INIT(0),
+       };
+
+       if (cnt <= 0)
+               return -EINVAL;
+
+       return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
+                                      cpu_online_mask);
+}
index 88ff471..4a72c27 100644 (file)
@@ -116,7 +116,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
                tail = (struct frame_tail __user *)regs->regs[29];
 
                while (entry->nr < entry->max_stack &&
-                      tail && !((unsigned long)tail & 0xf))
+                      tail && !((unsigned long)tail & 0x7))
                        tail = user_backtrace(tail, entry);
        } else {
 #ifdef CONFIG_COMPAT
index f594957..d07788d 100644 (file)
@@ -165,10 +165,7 @@ armv8pmu_events_sysfs_show(struct device *dev,
 }
 
 #define ARMV8_EVENT_ATTR(name, config)                                         \
-       (&((struct perf_pmu_events_attr) {                                      \
-               .attr = __ATTR(name, 0444, armv8pmu_events_sysfs_show, NULL),   \
-               .id = config,                                                   \
-       }).attr.attr)
+       PMU_EVENT_ATTR_ID(name, armv8pmu_events_sysfs_show, config)
 
 static struct attribute *armv8_pmuv3_event_attrs[] = {
        ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR),
@@ -312,13 +309,46 @@ static ssize_t slots_show(struct device *dev, struct device_attribute *attr,
        struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
        u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK;
 
-       return snprintf(page, PAGE_SIZE, "0x%08x\n", slots);
+       return sysfs_emit(page, "0x%08x\n", slots);
 }
 
 static DEVICE_ATTR_RO(slots);
 
+static ssize_t bus_slots_show(struct device *dev, struct device_attribute *attr,
+                             char *page)
+{
+       struct pmu *pmu = dev_get_drvdata(dev);
+       struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
+       u32 bus_slots = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_SLOTS_SHIFT)
+                       & ARMV8_PMU_BUS_SLOTS_MASK;
+
+       return sysfs_emit(page, "0x%08x\n", bus_slots);
+}
+
+static DEVICE_ATTR_RO(bus_slots);
+
+static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr,
+                             char *page)
+{
+       struct pmu *pmu = dev_get_drvdata(dev);
+       struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
+       u32 bus_width = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_WIDTH_SHIFT)
+                       & ARMV8_PMU_BUS_WIDTH_MASK;
+       u32 val = 0;
+
+       /* Encoded as Log2(number of bytes), plus one */
+       if (bus_width > 2 && bus_width < 13)
+               val = 1 << (bus_width - 1);
+
+       return sysfs_emit(page, "0x%08x\n", val);
+}
+
+static DEVICE_ATTR_RO(bus_width);
+
 static struct attribute *armv8_pmuv3_caps_attrs[] = {
        &dev_attr_slots.attr,
+       &dev_attr_bus_slots.attr,
+       &dev_attr_bus_width.attr,
        NULL,
 };
 
index 004b86e..6dbcc89 100644 (file)
@@ -7,26 +7,28 @@
  * Copyright (C) 2013 Linaro Limited.
  * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
  */
+#include <linux/extable.h>
 #include <linux/kasan.h>
 #include <linux/kernel.h>
 #include <linux/kprobes.h>
-#include <linux/extable.h>
-#include <linux/slab.h>
-#include <linux/stop_machine.h>
 #include <linux/sched/debug.h>
 #include <linux/set_memory.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
 #include <linux/stringify.h>
+#include <linux/uaccess.h>
 #include <linux/vmalloc.h>
-#include <asm/traps.h>
-#include <asm/ptrace.h>
+
 #include <asm/cacheflush.h>
-#include <asm/debug-monitors.h>
 #include <asm/daifflags.h>
-#include <asm/system_misc.h>
+#include <asm/debug-monitors.h>
 #include <asm/insn.h>
-#include <linux/uaccess.h>
 #include <asm/irq.h>
+#include <asm/patching.h>
+#include <asm/ptrace.h>
 #include <asm/sections.h>
+#include <asm/system_misc.h>
+#include <asm/traps.h>
 
 #include "decode-insn.h"
 
index 25f67ec..22d0b32 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/kprobes.h>
 
 #include <asm/ptrace.h>
+#include <asm/traps.h>
 
 #include "simulate-insn.h"
 
index 2c24763..9be668f 100644 (file)
@@ -21,7 +21,7 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
        memcpy(dst, src, len);
 
        /* flush caches (dcache/icache) */
-       sync_icache_aliases(dst, len);
+       sync_icache_aliases((unsigned long)dst, (unsigned long)dst + len);
 
        kunmap_atomic(xol_page_kaddr);
 }
index 14f3c19..5ba0ed0 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/sched/task.h>
 #include <linux/sched/task_stack.h>
 #include <linux/kernel.h>
-#include <linux/lockdep.h>
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/nospec.h>
@@ -46,7 +45,6 @@
 #include <linux/prctl.h>
 
 #include <asm/alternative.h>
-#include <asm/arch_gicv3.h>
 #include <asm/compat.h>
 #include <asm/cpufeature.h>
 #include <asm/cacheflush.h>
@@ -74,63 +72,6 @@ EXPORT_SYMBOL_GPL(pm_power_off);
 
 void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
 
-static void noinstr __cpu_do_idle(void)
-{
-       dsb(sy);
-       wfi();
-}
-
-static void noinstr __cpu_do_idle_irqprio(void)
-{
-       unsigned long pmr;
-       unsigned long daif_bits;
-
-       daif_bits = read_sysreg(daif);
-       write_sysreg(daif_bits | PSR_I_BIT | PSR_F_BIT, daif);
-
-       /*
-        * Unmask PMR before going idle to make sure interrupts can
-        * be raised.
-        */
-       pmr = gic_read_pmr();
-       gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
-
-       __cpu_do_idle();
-
-       gic_write_pmr(pmr);
-       write_sysreg(daif_bits, daif);
-}
-
-/*
- *     cpu_do_idle()
- *
- *     Idle the processor (wait for interrupt).
- *
- *     If the CPU supports priority masking we must do additional work to
- *     ensure that interrupts are not masked at the PMR (because the core will
- *     not wake up if we block the wake up signal in the interrupt controller).
- */
-void noinstr cpu_do_idle(void)
-{
-       if (system_uses_irq_prio_masking())
-               __cpu_do_idle_irqprio();
-       else
-               __cpu_do_idle();
-}
-
-/*
- * This is our default idle handler.
- */
-void noinstr arch_cpu_idle(void)
-{
-       /*
-        * This should do all the clock switching and wait for interrupt
-        * tricks
-        */
-       cpu_do_idle();
-       raw_local_irq_enable();
-}
-
 #ifdef CONFIG_HOTPLUG_CPU
 void arch_cpu_idle_dead(void)
 {
@@ -435,6 +376,11 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
        }
        p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
        p->thread.cpu_context.sp = (unsigned long)childregs;
+       /*
+        * For the benefit of the unwinder, set up childregs->stackframe
+        * as the final frame for the new task.
+        */
+       p->thread.cpu_context.fp = (unsigned long)childregs->stackframe;
 
        ptrace_hw_copy_thread(p);
 
@@ -527,6 +473,15 @@ static void erratum_1418040_thread_switch(struct task_struct *prev,
        write_sysreg(val, cntkctl_el1);
 }
 
+static void compat_thread_switch(struct task_struct *next)
+{
+       if (!is_compat_thread(task_thread_info(next)))
+               return;
+
+       if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
+               set_tsk_thread_flag(next, TIF_NOTIFY_RESUME);
+}
+
 static void update_sctlr_el1(u64 sctlr)
 {
        /*
@@ -568,6 +523,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
        ssbs_thread_switch(next);
        erratum_1418040_thread_switch(prev, next);
        ptrauth_thread_switch_user(next);
+       compat_thread_switch(next);
 
        /*
         * Complete any pending TLB or cache maintenance on this CPU in case
@@ -633,8 +589,15 @@ unsigned long arch_align_stack(unsigned long sp)
  */
 void arch_setup_new_exec(void)
 {
-       current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
+       unsigned long mmflags = 0;
+
+       if (is_compat_task()) {
+               mmflags = MMCF_AARCH32;
+               if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
+                       set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
+       }
 
+       current->mm->context.flags = mmflags;
        ptrauth_thread_init_user();
        mte_thread_init_user();
 
@@ -724,22 +687,6 @@ static int __init tagged_addr_init(void)
 core_initcall(tagged_addr_init);
 #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
 
-asmlinkage void __sched arm64_preempt_schedule_irq(void)
-{
-       lockdep_assert_irqs_disabled();
-
-       /*
-        * Preempting a task from an IRQ means we leave copies of PSTATE
-        * on the stack. cpufeature's enable calls may modify PSTATE, but
-        * resuming one of these preempted tasks would undo those changes.
-        *
-        * Only allow a task to be preempted once cpufeatures have been
-        * enabled.
-        */
-       if (system_capabilities_finalized())
-               preempt_schedule_irq();
-}
-
 #ifdef CONFIG_BINFMT_ELF
 int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
                         bool has_interp, bool is_interp)
index eb2f739..499b6b2 100644 (file)
@@ -122,7 +122,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
 {
        return ((addr & ~(THREAD_SIZE - 1))  ==
                (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
-               on_irq_stack(addr, NULL);
+               on_irq_stack(addr, sizeof(unsigned long), NULL);
 }
 
 /**
index 2c7ca44..47f77d1 100644 (file)
@@ -162,31 +162,33 @@ static int init_sdei_scs(void)
        return err;
 }
 
-static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
+static bool on_sdei_normal_stack(unsigned long sp, unsigned long size,
+                                struct stack_info *info)
 {
        unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
        unsigned long high = low + SDEI_STACK_SIZE;
 
-       return on_stack(sp, low, high, STACK_TYPE_SDEI_NORMAL, info);
+       return on_stack(sp, size, low, high, STACK_TYPE_SDEI_NORMAL, info);
 }
 
-static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
+static bool on_sdei_critical_stack(unsigned long sp, unsigned long size,
+                                  struct stack_info *info)
 {
        unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
        unsigned long high = low + SDEI_STACK_SIZE;
 
-       return on_stack(sp, low, high, STACK_TYPE_SDEI_CRITICAL, info);
+       return on_stack(sp, size, low, high, STACK_TYPE_SDEI_CRITICAL, info);
 }
 
-bool _on_sdei_stack(unsigned long sp, struct stack_info *info)
+bool _on_sdei_stack(unsigned long sp, unsigned long size, struct stack_info *info)
 {
        if (!IS_ENABLED(CONFIG_VMAP_STACK))
                return false;
 
-       if (on_sdei_critical_stack(sp, info))
+       if (on_sdei_critical_stack(sp, size, info))
                return true;
 
-       if (on_sdei_normal_stack(sp, info))
+       if (on_sdei_normal_stack(sp, size, info))
                return true;
 
        return false;
@@ -231,13 +233,13 @@ out_err:
 }
 
 /*
- * __sdei_handler() returns one of:
+ * do_sdei_event() returns one of:
  *  SDEI_EV_HANDLED -  success, return to the interrupted context.
  *  SDEI_EV_FAILED  -  failure, return this error code to firmare.
  *  virtual-address -  success, return to this address.
  */
-static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
-                                            struct sdei_registered_event *arg)
+unsigned long __kprobes do_sdei_event(struct pt_regs *regs,
+                                     struct sdei_registered_event *arg)
 {
        u32 mode;
        int i, err = 0;
@@ -292,45 +294,3 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
 
        return vbar + 0x480;
 }
-
-static void __kprobes notrace __sdei_pstate_entry(void)
-{
-       /*
-        * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
-        * whether PSTATE bits are inherited unchanged or generated from
-        * scratch, and the TF-A implementation always clears PAN and always
-        * clears UAO. There are no other known implementations.
-        *
-        * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
-        * PSTATE is modified upon architectural exceptions, and so PAN is
-        * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
-        * cleared.
-        *
-        * We must explicitly reset PAN to the expected state, including
-        * clearing it when the host isn't using it, in case a VM had it set.
-        */
-       if (system_uses_hw_pan())
-               set_pstate_pan(1);
-       else if (cpu_has_pan())
-               set_pstate_pan(0);
-}
-
-asmlinkage noinstr unsigned long
-__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
-{
-       unsigned long ret;
-
-       /*
-        * We didn't take an exception to get here, so the HW hasn't
-        * set/cleared bits in PSTATE that we may rely on. Initialize PAN.
-        */
-       __sdei_pstate_entry();
-
-       arm64_enter_nmi(regs);
-
-       ret = _sdei_handler(regs, arg);
-
-       arm64_exit_nmi(regs);
-
-       return ret;
-}
index 61845c0..8ed6614 100644 (file)
@@ -87,12 +87,6 @@ void __init smp_setup_processor_id(void)
        u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
        set_cpu_logical_map(0, mpidr);
 
-       /*
-        * clear __my_cpu_offset on boot CPU to avoid hang caused by
-        * using percpu variable early, for example, lockdep will
-        * access percpu variable inside lock_release
-        */
-       set_my_cpu_offset(0);
        pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
                (unsigned long)mpidr, read_cpuid_id());
 }
@@ -381,7 +375,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
         * faults in case uaccess_enable() is inadvertently called by the init
         * thread.
         */
-       init_task.thread_info.ttbr0 = __pa_symbol(reserved_pg_dir);
+       init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
 #endif
 
        if (boot_args[1] || boot_args[2] || boot_args[3]) {
index 6237486..f8192f4 100644 (file)
@@ -911,6 +911,19 @@ static void do_signal(struct pt_regs *regs)
        restore_saved_sigmask();
 }
 
+static bool cpu_affinity_invalid(struct pt_regs *regs)
+{
+       if (!compat_user_mode(regs))
+               return false;
+
+       /*
+        * We're preemptible, but a reschedule will cause us to check the
+        * affinity again.
+        */
+       return !cpumask_test_cpu(raw_smp_processor_id(),
+                                system_32bit_el0_cpumask());
+}
+
 asmlinkage void do_notify_resume(struct pt_regs *regs,
                                 unsigned long thread_flags)
 {
@@ -938,6 +951,19 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
                        if (thread_flags & _TIF_NOTIFY_RESUME) {
                                tracehook_notify_resume(regs);
                                rseq_handle_notify_resume(NULL, regs);
+
+                               /*
+                                * If we reschedule after checking the affinity
+                                * then we must ensure that TIF_NOTIFY_RESUME
+                                * is set so that we check the affinity again.
+                                * Since tracehook_notify_resume() clears the
+                                * flag, ensure that the compiler doesn't move
+                                * it after the affinity check.
+                                */
+                               barrier();
+
+                               if (cpu_affinity_invalid(regs))
+                                       force_sig(SIGKILL);
                        }
 
                        if (thread_flags & _TIF_FOREIGN_FPSTATE)
index d624479..d3d37f9 100644 (file)
@@ -7,8 +7,34 @@
 
 #include <asm/asm-offsets.h>
 #include <asm/assembler.h>
+#include <asm/thread_info.h>
+
+/*
+ * If we have SMCCC v1.3 and (as is likely) no SVE state in
+ * the registers then set the SMCCC hint bit to say there's no
+ * need to preserve it.  Do this by directly adjusting the SMCCC
+ * function value which is already stored in x0 ready to be called.
+ */
+SYM_FUNC_START(__arm_smccc_sve_check)
+
+       ldr_l   x16, smccc_has_sve_hint
+       cbz     x16, 2f
+
+       get_current_task x16
+       ldr     x16, [x16, #TSK_TI_FLAGS]
+       tbnz    x16, #TIF_FOREIGN_FPSTATE, 1f   // Any live FP state?
+       tbnz    x16, #TIF_SVE, 2f               // Does that state include SVE?
+
+1:     orr     x0, x0, ARM_SMCCC_1_3_SVE_HINT
+
+2:     ret
+SYM_FUNC_END(__arm_smccc_sve_check)
+EXPORT_SYMBOL(__arm_smccc_sve_check)
 
        .macro SMCCC instr
+alternative_if ARM64_SVE
+       bl      __arm_smccc_sve_check
+alternative_else_nop_endif
        \instr  #0
        ldr     x4, [sp]
        stp     x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
@@ -43,3 +69,60 @@ SYM_FUNC_START(__arm_smccc_hvc)
        SMCCC   hvc
 SYM_FUNC_END(__arm_smccc_hvc)
 EXPORT_SYMBOL(__arm_smccc_hvc)
+
+       .macro SMCCC_1_2 instr
+       /* Save `res` and free a GPR that won't be clobbered */
+       stp     x1, x19, [sp, #-16]!
+
+       /* Ensure `args` won't be clobbered while loading regs in next step */
+       mov     x19, x0
+
+       /* Load the registers x0 - x17 from the struct arm_smccc_1_2_regs */
+       ldp     x0, x1, [x19, #ARM_SMCCC_1_2_REGS_X0_OFFS]
+       ldp     x2, x3, [x19, #ARM_SMCCC_1_2_REGS_X2_OFFS]
+       ldp     x4, x5, [x19, #ARM_SMCCC_1_2_REGS_X4_OFFS]
+       ldp     x6, x7, [x19, #ARM_SMCCC_1_2_REGS_X6_OFFS]
+       ldp     x8, x9, [x19, #ARM_SMCCC_1_2_REGS_X8_OFFS]
+       ldp     x10, x11, [x19, #ARM_SMCCC_1_2_REGS_X10_OFFS]
+       ldp     x12, x13, [x19, #ARM_SMCCC_1_2_REGS_X12_OFFS]
+       ldp     x14, x15, [x19, #ARM_SMCCC_1_2_REGS_X14_OFFS]
+       ldp     x16, x17, [x19, #ARM_SMCCC_1_2_REGS_X16_OFFS]
+
+       \instr #0
+
+       /* Load the `res` from the stack */
+       ldr     x19, [sp]
+
+       /* Store the registers x0 - x17 into the result structure */
+       stp     x0, x1, [x19, #ARM_SMCCC_1_2_REGS_X0_OFFS]
+       stp     x2, x3, [x19, #ARM_SMCCC_1_2_REGS_X2_OFFS]
+       stp     x4, x5, [x19, #ARM_SMCCC_1_2_REGS_X4_OFFS]
+       stp     x6, x7, [x19, #ARM_SMCCC_1_2_REGS_X6_OFFS]
+       stp     x8, x9, [x19, #ARM_SMCCC_1_2_REGS_X8_OFFS]
+       stp     x10, x11, [x19, #ARM_SMCCC_1_2_REGS_X10_OFFS]
+       stp     x12, x13, [x19, #ARM_SMCCC_1_2_REGS_X12_OFFS]
+       stp     x14, x15, [x19, #ARM_SMCCC_1_2_REGS_X14_OFFS]
+       stp     x16, x17, [x19, #ARM_SMCCC_1_2_REGS_X16_OFFS]
+
+       /* Restore original x19 */
+       ldp     xzr, x19, [sp], #16
+       ret
+.endm
+
+/*
+ * void arm_smccc_1_2_hvc(const struct arm_smccc_1_2_regs *args,
+ *                       struct arm_smccc_1_2_regs *res);
+ */
+SYM_FUNC_START(arm_smccc_1_2_hvc)
+       SMCCC_1_2 hvc
+SYM_FUNC_END(arm_smccc_1_2_hvc)
+EXPORT_SYMBOL(arm_smccc_1_2_hvc)
+
+/*
+ * void arm_smccc_1_2_smc(const struct arm_smccc_1_2_regs *args,
+ *                       struct arm_smccc_1_2_regs *res);
+ */
+SYM_FUNC_START(arm_smccc_1_2_smc)
+       SMCCC_1_2 smc
+SYM_FUNC_END(arm_smccc_1_2_smc)
+EXPORT_SYMBOL(arm_smccc_1_2_smc)
index 6671000..6f6ff07 100644 (file)
@@ -120,9 +120,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
         * page tables.
         */
        secondary_data.task = idle;
-       secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
        update_cpu_boot_status(CPU_MMU_OFF);
-       __flush_dcache_area(&secondary_data, sizeof(secondary_data));
 
        /* Now bring the CPU into our world */
        ret = boot_secondary(cpu, idle);
@@ -142,8 +140,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
 
        pr_crit("CPU%u: failed to come online\n", cpu);
        secondary_data.task = NULL;
-       secondary_data.stack = NULL;
-       __flush_dcache_area(&secondary_data, sizeof(secondary_data));
        status = READ_ONCE(secondary_data.status);
        if (status == CPU_MMU_OFF)
                status = READ_ONCE(__early_cpu_boot_status);
@@ -202,10 +198,7 @@ asmlinkage notrace void secondary_start_kernel(void)
        u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
        struct mm_struct *mm = &init_mm;
        const struct cpu_operations *ops;
-       unsigned int cpu;
-
-       cpu = task_cpu(current);
-       set_my_cpu_offset(per_cpu_offset(cpu));
+       unsigned int cpu = smp_processor_id();
 
        /*
         * All kernel threads share the same mm context; grab a
@@ -351,7 +344,7 @@ void __cpu_die(unsigned int cpu)
                pr_crit("CPU%u: cpu didn't die\n", cpu);
                return;
        }
-       pr_notice("CPU%u: shutdown\n", cpu);
+       pr_debug("CPU%u: shutdown\n", cpu);
 
        /*
         * Now that the dying CPU is beyond the point of no return w.r.t.
@@ -451,6 +444,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
 
 void __init smp_prepare_boot_cpu(void)
 {
+       /*
+        * The runtime per-cpu areas have been allocated by
+        * setup_per_cpu_areas(), and CPU0's boot time per-cpu area will be
+        * freed shortly, so we must move over to the runtime per-cpu area.
+        */
        set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
        cpuinfo_store_boot_cpu();
 
index c45a835..7e1624e 100644 (file)
@@ -36,7 +36,7 @@ static void write_pen_release(u64 val)
        unsigned long size = sizeof(secondary_holding_pen_release);
 
        secondary_holding_pen_release = val;
-       __flush_dcache_area(start, size);
+       dcache_clean_inval_poc((unsigned long)start, (unsigned long)start + size);
 }
 
 
@@ -90,8 +90,9 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
         * the boot protocol.
         */
        writeq_relaxed(pa_holding_pen, release_addr);
-       __flush_dcache_area((__force void *)release_addr,
-                           sizeof(*release_addr));
+       dcache_clean_inval_poc((__force unsigned long)release_addr,
+                           (__force unsigned long)release_addr +
+                                   sizeof(*release_addr));
 
        /*
         * Send an event to wake up the secondary CPU.
index de07147..b189de5 100644 (file)
@@ -68,13 +68,17 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
        unsigned long fp = frame->fp;
        struct stack_info info;
 
-       if (fp & 0xf)
-               return -EINVAL;
-
        if (!tsk)
                tsk = current;
 
-       if (!on_accessible_stack(tsk, fp, &info))
+       /* Final frame; nothing to unwind */
+       if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
+               return -ENOENT;
+
+       if (fp & 0x7)
+               return -EINVAL;
+
+       if (!on_accessible_stack(tsk, fp, 16, &info))
                return -EINVAL;
 
        if (test_bit(info.type, frame->stacks_done))
@@ -128,12 +132,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
 
        frame->pc = ptrauth_strip_insn_pac(frame->pc);
 
-       /*
-        * This is a terminal record, so we have finished unwinding.
-        */
-       if (!frame->fp && !frame->pc)
-               return -ENOENT;
-
        return 0;
 }
 NOKPROBE_SYMBOL(unwind_frame);
index e3f72df..938ce6f 100644 (file)
@@ -7,6 +7,7 @@
 #include <asm/alternative.h>
 #include <asm/cacheflush.h>
 #include <asm/cpufeature.h>
+#include <asm/cpuidle.h>
 #include <asm/daifflags.h>
 #include <asm/debug-monitors.h>
 #include <asm/exec.h>
@@ -91,6 +92,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
        int ret = 0;
        unsigned long flags;
        struct sleep_stack_data state;
+       struct arm_cpuidle_irq_context context;
 
        /* Report any MTE async fault before going to suspend */
        mte_suspend_enter();
@@ -103,12 +105,18 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
        flags = local_daif_save();
 
        /*
-        * Function graph tracer state gets incosistent when the kernel
+        * Function graph tracer state gets inconsistent when the kernel
         * calls functions that never return (aka suspend finishers) hence
         * disable graph tracing during their execution.
         */
        pause_graph_tracing();
 
+       /*
+        * Switch to using DAIF.IF instead of PMR in order to reliably
+        * resume if we're using pseudo-NMIs.
+        */
+       arm_cpuidle_save_irq_context(&context);
+
        if (__cpu_suspend_enter(&state)) {
                /* Call the suspend finisher */
                ret = fn(arg);
@@ -126,6 +134,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
                RCU_NONIDLE(__cpu_suspend_exit());
        }
 
+       arm_cpuidle_restore_irq_context(&context);
+
        unpause_graph_tracing();
 
        /*
index 265fe3e..db5159a 100644 (file)
@@ -41,7 +41,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
                        dsb(ish);
                }
 
-               ret = __flush_cache_user_range(start, start + chunk);
+               ret = caches_clean_inval_user_pou(start, start + chunk);
                if (ret)
                        return ret;
 
index a05d34f..b03e383 100644 (file)
@@ -38,6 +38,7 @@
 #include <asm/extable.h>
 #include <asm/insn.h>
 #include <asm/kprobes.h>
+#include <asm/patching.h>
 #include <asm/traps.h>
 #include <asm/smp.h>
 #include <asm/stack_pointer.h>
 #include <asm/system_misc.h>
 #include <asm/sysreg.h>
 
-static const char *handler[] = {
-       "Synchronous Abort",
-       "IRQ",
-       "FIQ",
-       "Error"
+static bool __kprobes __check_eq(unsigned long pstate)
+{
+       return (pstate & PSR_Z_BIT) != 0;
+}
+
+static bool __kprobes __check_ne(unsigned long pstate)
+{
+       return (pstate & PSR_Z_BIT) == 0;
+}
+
+static bool __kprobes __check_cs(unsigned long pstate)
+{
+       return (pstate & PSR_C_BIT) != 0;
+}
+
+static bool __kprobes __check_cc(unsigned long pstate)
+{
+       return (pstate & PSR_C_BIT) == 0;
+}
+
+static bool __kprobes __check_mi(unsigned long pstate)
+{
+       return (pstate & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_pl(unsigned long pstate)
+{
+       return (pstate & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_vs(unsigned long pstate)
+{
+       return (pstate & PSR_V_BIT) != 0;
+}
+
+static bool __kprobes __check_vc(unsigned long pstate)
+{
+       return (pstate & PSR_V_BIT) == 0;
+}
+
+static bool __kprobes __check_hi(unsigned long pstate)
+{
+       pstate &= ~(pstate >> 1);       /* PSR_C_BIT &= ~PSR_Z_BIT */
+       return (pstate & PSR_C_BIT) != 0;
+}
+
+static bool __kprobes __check_ls(unsigned long pstate)
+{
+       pstate &= ~(pstate >> 1);       /* PSR_C_BIT &= ~PSR_Z_BIT */
+       return (pstate & PSR_C_BIT) == 0;
+}
+
+static bool __kprobes __check_ge(unsigned long pstate)
+{
+       pstate ^= (pstate << 3);        /* PSR_N_BIT ^= PSR_V_BIT */
+       return (pstate & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_lt(unsigned long pstate)
+{
+       pstate ^= (pstate << 3);        /* PSR_N_BIT ^= PSR_V_BIT */
+       return (pstate & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_gt(unsigned long pstate)
+{
+       /*PSR_N_BIT ^= PSR_V_BIT */
+       unsigned long temp = pstate ^ (pstate << 3);
+
+       temp |= (pstate << 1);  /*PSR_N_BIT |= PSR_Z_BIT */
+       return (temp & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_le(unsigned long pstate)
+{
+       /*PSR_N_BIT ^= PSR_V_BIT */
+       unsigned long temp = pstate ^ (pstate << 3);
+
+       temp |= (pstate << 1);  /*PSR_N_BIT |= PSR_Z_BIT */
+       return (temp & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_al(unsigned long pstate)
+{
+       return true;
+}
+
+/*
+ * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
+ * it behaves identically to 0b1110 ("al").
+ */
+pstate_check_t * const aarch32_opcode_cond_checks[16] = {
+       __check_eq, __check_ne, __check_cs, __check_cc,
+       __check_mi, __check_pl, __check_vs, __check_vc,
+       __check_hi, __check_ls, __check_ge, __check_lt,
+       __check_gt, __check_le, __check_al, __check_al
 };
 
 int show_unhandled_signals = 0;
@@ -751,27 +843,8 @@ const char *esr_get_class_string(u32 esr)
 }
 
 /*
- * bad_mode handles the impossible case in the exception vector. This is always
- * fatal.
- */
-asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
-{
-       arm64_enter_nmi(regs);
-
-       console_verbose();
-
-       pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
-               handler[reason], smp_processor_id(), esr,
-               esr_get_class_string(esr));
-
-       __show_regs(regs);
-       local_daif_mask();
-       panic("bad mode");
-}
-
-/*
  * bad_el0_sync handles unexpected, but potentially recoverable synchronous
- * exceptions taken from EL0. Unlike bad_mode, this returns.
+ * exceptions taken from EL0.
  */
 void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
 {
@@ -789,15 +862,11 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
        __aligned(16);
 
-asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
+void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far)
 {
        unsigned long tsk_stk = (unsigned long)current->stack;
        unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
        unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
-       unsigned int esr = read_sysreg(esr_el1);
-       unsigned long far = read_sysreg(far_el1);
-
-       arm64_enter_nmi(regs);
 
        console_verbose();
        pr_emerg("Insufficient stack space to handle exception!");
@@ -870,15 +939,11 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
        }
 }
 
-asmlinkage void noinstr do_serror(struct pt_regs *regs, unsigned int esr)
+void do_serror(struct pt_regs *regs, unsigned int esr)
 {
-       arm64_enter_nmi(regs);
-
        /* non-RAS errors are not containable */
        if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
                arm64_serror_panic(regs, esr);
-
-       arm64_exit_nmi(regs);
 }
 
 /* GENERIC_BUG traps */
index e720148..06f8e2d 100644 (file)
@@ -692,6 +692,15 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
        }
 }
 
+static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
+{
+       if (likely(!vcpu_mode_is_32bit(vcpu)))
+               return false;
+
+       return !system_supports_32bit_el0() ||
+               static_branch_unlikely(&arm64_mismatched_32bit_el0);
+}
+
 /**
  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
  * @vcpu:      The VCPU pointer
@@ -877,7 +886,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                 * with the asymmetric AArch32 case), return to userspace with
                 * a fatal error.
                 */
-               if (!system_supports_32bit_el0() && vcpu_mode_is_32bit(vcpu)) {
+               if (vcpu_mode_is_bad_32bit(vcpu)) {
                        /*
                         * As we have caught the guest red-handed, decide that
                         * it isn't fit for purpose anymore by making the vcpu
@@ -1078,7 +1087,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
                if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
                        stage2_unmap_vm(vcpu->kvm);
                else
-                       __flush_icache_all();
+                       icache_inval_all_pou();
        }
 
        vcpu_reset_hcr(vcpu);
index 36cef69..958734f 100644 (file)
@@ -7,7 +7,7 @@
 #include <asm/assembler.h>
 #include <asm/alternative.h>
 
-SYM_FUNC_START_PI(__flush_dcache_area)
+SYM_FUNC_START_PI(dcache_clean_inval_poc)
        dcache_by_line_op civac, sy, x0, x1, x2, x3
        ret
-SYM_FUNC_END_PI(__flush_dcache_area)
+SYM_FUNC_END_PI(dcache_clean_inval_poc)
index a3d3a27..545db98 100644 (file)
@@ -134,7 +134,8 @@ static void update_nvhe_init_params(void)
        for (i = 0; i < hyp_nr_cpus; i++) {
                params = per_cpu_ptr(&kvm_init_params, i);
                params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
-               __flush_dcache_area(params, sizeof(*params));
+               dcache_clean_inval_poc((unsigned long)params,
+                                   (unsigned long)params + sizeof(*params));
        }
 }
 
index 83dc3b2..38ed0f6 100644 (file)
@@ -104,7 +104,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
         * you should be running with VHE enabled.
         */
        if (icache_is_vpipt())
-               __flush_icache_all();
+               icache_inval_all_pou();
 
        __tlb_switch_to_host(&cxt);
 }
index c37c1dc..e9ad7fb 100644 (file)
@@ -839,8 +839,11 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
        stage2_put_pte(ptep, mmu, addr, level, mm_ops);
 
        if (need_flush) {
-               __flush_dcache_area(kvm_pte_follow(pte, mm_ops),
-                                   kvm_granule_size(level));
+               kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops);
+
+               dcache_clean_inval_poc((unsigned long)pte_follow,
+                                   (unsigned long)pte_follow +
+                                           kvm_granule_size(level));
        }
 
        if (childp)
@@ -988,11 +991,15 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
        struct kvm_pgtable *pgt = arg;
        struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
        kvm_pte_t pte = *ptep;
+       kvm_pte_t *pte_follow;
 
        if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
                return 0;
 
-       __flush_dcache_area(kvm_pte_follow(pte, mm_ops), kvm_granule_size(level));
+       pte_follow = kvm_pte_follow(pte, mm_ops);
+       dcache_clean_inval_poc((unsigned long)pte_follow,
+                           (unsigned long)pte_follow +
+                                   kvm_granule_size(level));
        return 0;
 }
 
index d31e116..6dd56a4 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 lib-y          := clear_user.o delay.o copy_from_user.o                \
                   copy_to_user.o copy_in_user.o copy_page.o            \
-                  clear_page.o csum.o memchr.o memcpy.o memmove.o      \
+                  clear_page.o csum.o insn.o memchr.o memcpy.o         \
                   memset.o memcmp.o strcmp.o strncmp.o strlen.o        \
                   strnlen.o strchr.o strrchr.o tishift.o
 
@@ -18,3 +18,5 @@ obj-$(CONFIG_CRC32) += crc32.o
 obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
 
 obj-$(CONFIG_ARM64_MTE) += mte.o
+
+obj-$(CONFIG_KASAN_SW_TAGS) += kasan_sw_tags.o
index af9afcb..a7efb2a 100644 (file)
@@ -1,12 +1,9 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Based on arch/arm/lib/clear_user.S
- *
- * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2021 Arm Ltd.
  */
-#include <linux/linkage.h>
 
-#include <asm/asm-uaccess.h>
+#include <linux/linkage.h>
 #include <asm/assembler.h>
 
        .text
  *
  * Alignment fixed up by hardware.
  */
+
+       .p2align 4
+       // Alignment is for the loop, but since the prologue (including BTI)
+       // is also 16 bytes we can keep any padding outside the function
 SYM_FUNC_START(__arch_clear_user)
-       mov     x2, x1                  // save the size for fixup return
+       add     x2, x0, x1
        subs    x1, x1, #8
        b.mi    2f
 1:
-user_ldst 9f, sttr, xzr, x0, 8
+USER(9f, sttr  xzr, [x0])
+       add     x0, x0, #8
        subs    x1, x1, #8
-       b.pl    1b
-2:     adds    x1, x1, #4
-       b.mi    3f
-user_ldst 9f, sttr, wzr, x0, 4
-       sub     x1, x1, #4
-3:     adds    x1, x1, #2
-       b.mi    4f
-user_ldst 9f, sttrh, wzr, x0, 2
-       sub     x1, x1, #2
-4:     adds    x1, x1, #1
-       b.mi    5f
-user_ldst 9f, sttrb, wzr, x0, 0
+       b.hi    1b
+USER(9f, sttr  xzr, [x2, #-8])
+       mov     x0, #0
+       ret
+
+2:     tbz     x1, #2, 3f
+USER(9f, sttr  wzr, [x0])
+USER(8f, sttr  wzr, [x2, #-4])
+       mov     x0, #0
+       ret
+
+3:     tbz     x1, #1, 4f
+USER(9f, sttrh wzr, [x0])
+4:     tbz     x1, #0, 5f
+USER(7f, sttrb wzr, [x2, #-1])
 5:     mov     x0, #0
        ret
 SYM_FUNC_END(__arch_clear_user)
@@ -45,6 +50,8 @@ EXPORT_SYMBOL(__arch_clear_user)
 
        .section .fixup,"ax"
        .align  2
-9:     mov     x0, x2                  // return the original size
+7:     sub     x0, x2, #5      // Adjust for faulting on the final byte...
+8:     add     x0, x0, #4      // ...or the second word of the 4-7 byte case
+9:     sub     x0, x2, x0
        ret
        .previous
similarity index 86%
rename from arch/arm64/kernel/insn.c
rename to arch/arm64/lib/insn.c
index 6c0de2f..b506a4b 100644 (file)
@@ -7,21 +7,14 @@
  */
 #include <linux/bitops.h>
 #include <linux/bug.h>
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/spinlock.h>
-#include <linux/stop_machine.h>
+#include <linux/printk.h>
+#include <linux/sizes.h>
 #include <linux/types.h>
-#include <linux/uaccess.h>
 
-#include <asm/cacheflush.h>
 #include <asm/debug-monitors.h>
-#include <asm/fixmap.h>
+#include <asm/errno.h>
 #include <asm/insn.h>
 #include <asm/kprobes.h>
-#include <asm/sections.h>
 
 #define AARCH64_INSN_SF_BIT    BIT(31)
 #define AARCH64_INSN_N_BIT     BIT(22)
@@ -30,7 +23,7 @@
 static const int aarch64_insn_encoding_class[] = {
        AARCH64_INSN_CLS_UNKNOWN,
        AARCH64_INSN_CLS_UNKNOWN,
-       AARCH64_INSN_CLS_UNKNOWN,
+       AARCH64_INSN_CLS_SVE,
        AARCH64_INSN_CLS_UNKNOWN,
        AARCH64_INSN_CLS_LDST,
        AARCH64_INSN_CLS_DP_REG,
@@ -83,81 +76,6 @@ bool aarch64_insn_is_branch_imm(u32 insn)
                aarch64_insn_is_bcond(insn));
 }
 
-static DEFINE_RAW_SPINLOCK(patch_lock);
-
-static bool is_exit_text(unsigned long addr)
-{
-       /* discarded with init text/data */
-       return system_state < SYSTEM_RUNNING &&
-               addr >= (unsigned long)__exittext_begin &&
-               addr < (unsigned long)__exittext_end;
-}
-
-static bool is_image_text(unsigned long addr)
-{
-       return core_kernel_text(addr) || is_exit_text(addr);
-}
-
-static void __kprobes *patch_map(void *addr, int fixmap)
-{
-       unsigned long uintaddr = (uintptr_t) addr;
-       bool image = is_image_text(uintaddr);
-       struct page *page;
-
-       if (image)
-               page = phys_to_page(__pa_symbol(addr));
-       else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
-               page = vmalloc_to_page(addr);
-       else
-               return addr;
-
-       BUG_ON(!page);
-       return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
-                       (uintaddr & ~PAGE_MASK));
-}
-
-static void __kprobes patch_unmap(int fixmap)
-{
-       clear_fixmap(fixmap);
-}
-/*
- * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
- * little-endian.
- */
-int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
-{
-       int ret;
-       __le32 val;
-
-       ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
-       if (!ret)
-               *insnp = le32_to_cpu(val);
-
-       return ret;
-}
-
-static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
-{
-       void *waddr = addr;
-       unsigned long flags = 0;
-       int ret;
-
-       raw_spin_lock_irqsave(&patch_lock, flags);
-       waddr = patch_map(addr, FIX_TEXT_POKE0);
-
-       ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
-
-       patch_unmap(FIX_TEXT_POKE0);
-       raw_spin_unlock_irqrestore(&patch_lock, flags);
-
-       return ret;
-}
-
-int __kprobes aarch64_insn_write(void *addr, u32 insn)
-{
-       return __aarch64_insn_write(addr, cpu_to_le32(insn));
-}
-
 bool __kprobes aarch64_insn_uses_literal(u32 insn)
 {
        /* ldr/ldrsw (literal), prfm */
@@ -187,67 +105,6 @@ bool __kprobes aarch64_insn_is_branch(u32 insn)
                aarch64_insn_is_bcond(insn);
 }
 
-int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
-{
-       u32 *tp = addr;
-       int ret;
-
-       /* A64 instructions must be word aligned */
-       if ((uintptr_t)tp & 0x3)
-               return -EINVAL;
-
-       ret = aarch64_insn_write(tp, insn);
-       if (ret == 0)
-               __flush_icache_range((uintptr_t)tp,
-                                    (uintptr_t)tp + AARCH64_INSN_SIZE);
-
-       return ret;
-}
-
-struct aarch64_insn_patch {
-       void            **text_addrs;
-       u32             *new_insns;
-       int             insn_cnt;
-       atomic_t        cpu_count;
-};
-
-static int __kprobes aarch64_insn_patch_text_cb(void *arg)
-{
-       int i, ret = 0;
-       struct aarch64_insn_patch *pp = arg;
-
-       /* The first CPU becomes master */
-       if (atomic_inc_return(&pp->cpu_count) == 1) {
-               for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
-                       ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
-                                                            pp->new_insns[i]);
-               /* Notify other processors with an additional increment. */
-               atomic_inc(&pp->cpu_count);
-       } else {
-               while (atomic_read(&pp->cpu_count) <= num_online_cpus())
-                       cpu_relax();
-               isb();
-       }
-
-       return ret;
-}
-
-int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
-{
-       struct aarch64_insn_patch patch = {
-               .text_addrs = addrs,
-               .new_insns = insns,
-               .insn_cnt = cnt,
-               .cpu_count = ATOMIC_INIT(0),
-       };
-
-       if (cnt <= 0)
-               return -EINVAL;
-
-       return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
-                                      cpu_online_mask);
-}
-
 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
                                                u32 *maskp, int *shiftp)
 {
@@ -1432,104 +1289,6 @@ u32 aarch32_insn_mcr_extract_crm(u32 insn)
        return insn & CRM_MASK;
 }
 
-static bool __kprobes __check_eq(unsigned long pstate)
-{
-       return (pstate & PSR_Z_BIT) != 0;
-}
-
-static bool __kprobes __check_ne(unsigned long pstate)
-{
-       return (pstate & PSR_Z_BIT) == 0;
-}
-
-static bool __kprobes __check_cs(unsigned long pstate)
-{
-       return (pstate & PSR_C_BIT) != 0;
-}
-
-static bool __kprobes __check_cc(unsigned long pstate)
-{
-       return (pstate & PSR_C_BIT) == 0;
-}
-
-static bool __kprobes __check_mi(unsigned long pstate)
-{
-       return (pstate & PSR_N_BIT) != 0;
-}
-
-static bool __kprobes __check_pl(unsigned long pstate)
-{
-       return (pstate & PSR_N_BIT) == 0;
-}
-
-static bool __kprobes __check_vs(unsigned long pstate)
-{
-       return (pstate & PSR_V_BIT) != 0;
-}
-
-static bool __kprobes __check_vc(unsigned long pstate)
-{
-       return (pstate & PSR_V_BIT) == 0;
-}
-
-static bool __kprobes __check_hi(unsigned long pstate)
-{
-       pstate &= ~(pstate >> 1);       /* PSR_C_BIT &= ~PSR_Z_BIT */
-       return (pstate & PSR_C_BIT) != 0;
-}
-
-static bool __kprobes __check_ls(unsigned long pstate)
-{
-       pstate &= ~(pstate >> 1);       /* PSR_C_BIT &= ~PSR_Z_BIT */
-       return (pstate & PSR_C_BIT) == 0;
-}
-
-static bool __kprobes __check_ge(unsigned long pstate)
-{
-       pstate ^= (pstate << 3);        /* PSR_N_BIT ^= PSR_V_BIT */
-       return (pstate & PSR_N_BIT) == 0;
-}
-
-static bool __kprobes __check_lt(unsigned long pstate)
-{
-       pstate ^= (pstate << 3);        /* PSR_N_BIT ^= PSR_V_BIT */
-       return (pstate & PSR_N_BIT) != 0;
-}
-
-static bool __kprobes __check_gt(unsigned long pstate)
-{
-       /*PSR_N_BIT ^= PSR_V_BIT */
-       unsigned long temp = pstate ^ (pstate << 3);
-
-       temp |= (pstate << 1);  /*PSR_N_BIT |= PSR_Z_BIT */
-       return (temp & PSR_N_BIT) == 0;
-}
-
-static bool __kprobes __check_le(unsigned long pstate)
-{
-       /*PSR_N_BIT ^= PSR_V_BIT */
-       unsigned long temp = pstate ^ (pstate << 3);
-
-       temp |= (pstate << 1);  /*PSR_N_BIT |= PSR_Z_BIT */
-       return (temp & PSR_N_BIT) != 0;
-}
-
-static bool __kprobes __check_al(unsigned long pstate)
-{
-       return true;
-}
-
-/*
- * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
- * it behaves identically to 0b1110 ("al").
- */
-pstate_check_t * const aarch32_opcode_cond_checks[16] = {
-       __check_eq, __check_ne, __check_cs, __check_cc,
-       __check_mi, __check_pl, __check_vs, __check_vc,
-       __check_hi, __check_ls, __check_ge, __check_lt,
-       __check_gt, __check_le, __check_al, __check_al
-};
-
 static bool range_of_ones(u64 val)
 {
        /* Doesn't handle full ones or full zeroes */
diff --git a/arch/arm64/lib/kasan_sw_tags.S b/arch/arm64/lib/kasan_sw_tags.S
new file mode 100644 (file)
index 0000000..5b04464
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Google LLC
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * Report a tag mismatch detected by tag-based KASAN.
+ *
+ * A compiler-generated thunk calls this with a non-AAPCS calling
+ * convention. Upon entry to this function, registers are as follows:
+ *
+ * x0:         fault address (see below for restore)
+ * x1:         fault description (see below for restore)
+ * x2 to x15:  callee-saved
+ * x16 to x17: safe to clobber
+ * x18 to x30: callee-saved
+ * sp:         pre-decremented by 256 bytes (see below for restore)
+ *
+ * The caller has decremented the SP by 256 bytes, and created a
+ * structure on the stack as follows:
+ *
+ * sp + 0..15:    x0 and x1 to be restored
+ * sp + 16..231:  free for use
+ * sp + 232..247: x29 and x30 (same as in GPRs)
+ * sp + 248..255: free for use
+ *
+ * Note that this is not a struct pt_regs.
+ *
+ * To call a regular AAPCS function we must save x2 to x15 (which we can
+ * store in the gaps), and create a frame record (for which we can use
+ * x29 and x30 spilled by the caller as those match the GPRs).
+ *
+ * The caller expects x0 and x1 to be restored from the structure, and
+ * for the structure to be removed from the stack (i.e. the SP must be
+ * incremented by 256 prior to return).
+ */
+SYM_CODE_START(__hwasan_tag_mismatch)
+#ifdef BTI_C
+       BTI_C
+#endif
+       add     x29, sp, #232
+       stp     x2, x3, [sp, #8 * 2]
+       stp     x4, x5, [sp, #8 * 4]
+       stp     x6, x7, [sp, #8 * 6]
+       stp     x8, x9, [sp, #8 * 8]
+       stp     x10, x11, [sp, #8 * 10]
+       stp     x12, x13, [sp, #8 * 12]
+       stp     x14, x15, [sp, #8 * 14]
+#ifndef CONFIG_SHADOW_CALL_STACK
+       str     x18, [sp, #8 * 18]
+#endif
+
+       mov     x2, x30
+       bl      kasan_tag_mismatch
+
+       ldp     x0, x1, [sp]
+       ldp     x2, x3, [sp, #8 * 2]
+       ldp     x4, x5, [sp, #8 * 4]
+       ldp     x6, x7, [sp, #8 * 6]
+       ldp     x8, x9, [sp, #8 * 8]
+       ldp     x10, x11, [sp, #8 * 10]
+       ldp     x12, x13, [sp, #8 * 12]
+       ldp     x14, x15, [sp, #8 * 14]
+#ifndef CONFIG_SHADOW_CALL_STACK
+       ldr     x18, [sp, #8 * 18]
+#endif
+       ldp     x29, x30, [sp, #8 * 29]
+
+       /* remove the structure from the stack */
+       add     sp, sp, #256
+       ret
+SYM_CODE_END(__hwasan_tag_mismatch)
+EXPORT_SYMBOL(__hwasan_tag_mismatch)
index edf6b97..7c2276f 100644 (file)
@@ -1,9 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Based on arch/arm/lib/memchr.S
- *
- * Copyright (C) 1995-2000 Russell King
- * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2021 Arm Ltd.
  */
 
 #include <linux/linkage.h>
  * Returns:
  *     x0 - address of first occurrence of 'c' or 0
  */
+
+#define L(label) .L ## label
+
+#define REP8_01 0x0101010101010101
+#define REP8_7f 0x7f7f7f7f7f7f7f7f
+
+#define srcin          x0
+#define chrin          w1
+#define cntin          x2
+
+#define result         x0
+
+#define wordcnt                x3
+#define rep01          x4
+#define repchr         x5
+#define cur_word       x6
+#define cur_byte       w6
+#define tmp            x7
+#define tmp2           x8
+
+       .p2align 4
+       nop
 SYM_FUNC_START_WEAK_PI(memchr)
-       and     w1, w1, #0xff
-1:     subs    x2, x2, #1
-       b.mi    2f
-       ldrb    w3, [x0], #1
-       cmp     w3, w1
-       b.ne    1b
-       sub     x0, x0, #1
+       and     chrin, chrin, #0xff
+       lsr     wordcnt, cntin, #3
+       cbz     wordcnt, L(byte_loop)
+       mov     rep01, #REP8_01
+       mul     repchr, x1, rep01
+       and     cntin, cntin, #7
+L(word_loop):
+       ldr     cur_word, [srcin], #8
+       sub     wordcnt, wordcnt, #1
+       eor     cur_word, cur_word, repchr
+       sub     tmp, cur_word, rep01
+       orr     tmp2, cur_word, #REP8_7f
+       bics    tmp, tmp, tmp2
+       b.ne    L(found_word)
+       cbnz    wordcnt, L(word_loop)
+L(byte_loop):
+       cbz     cntin, L(not_found)
+       ldrb    cur_byte, [srcin], #1
+       sub     cntin, cntin, #1
+       cmp     cur_byte, chrin
+       b.ne    L(byte_loop)
+       sub     srcin, srcin, #1
+       ret
+L(found_word):
+CPU_LE(        rev     tmp, tmp)
+       clz     tmp, tmp
+       sub     tmp, tmp, #64
+       add     result, srcin, tmp, asr #3
        ret
-2:     mov     x0, #0
+L(not_found):
+       mov     result, #0
        ret
 SYM_FUNC_END_PI(memchr)
 EXPORT_SYMBOL_NOKASAN(memchr)
index c0671e7..7d95638 100644 (file)
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2013-2021, Arm Limited.
  *
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/e823e3abf5f89ecb/string/aarch64/memcmp.S
  */
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 
-/*
-* compare memory areas(when two memory areas' offset are different,
-* alignment handled by the hardware)
-*
-* Parameters:
-*  x0 - const memory area 1 pointer
-*  x1 - const memory area 2 pointer
-*  x2 - the maximal compare byte length
-* Returns:
-*  x0 - a compare result, maybe less than, equal to, or greater than ZERO
-*/
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64, unaligned accesses.
+ */
+
+#define L(label) .L ## label
 
 /* Parameters and result.  */
-src1           .req    x0
-src2           .req    x1
-limit          .req    x2
-result         .req    x0
+#define src1           x0
+#define src2           x1
+#define limit          x2
+#define result         w0
 
 /* Internal variables.  */
-data1          .req    x3
-data1w         .req    w3
-data2          .req    x4
-data2w         .req    w4
-has_nul                .req    x5
-diff           .req    x6
-endloop                .req    x7
-tmp1           .req    x8
-tmp2           .req    x9
-tmp3           .req    x10
-pos            .req    x11
-limit_wd       .req    x12
-mask           .req    x13
+#define data1          x3
+#define data1w         w3
+#define data1h         x4
+#define data2          x5
+#define data2w         w5
+#define data2h         x6
+#define tmp1           x7
+#define tmp2           x8
 
 SYM_FUNC_START_WEAK_PI(memcmp)
-       cbz     limit, .Lret0
-       eor     tmp1, src1, src2
-       tst     tmp1, #7
-       b.ne    .Lmisaligned8
-       ands    tmp1, src1, #7
-       b.ne    .Lmutual_align
-       sub     limit_wd, limit, #1 /* limit != 0, so no underflow.  */
-       lsr     limit_wd, limit_wd, #3 /* Convert to Dwords.  */
-       /*
-       * The input source addresses are at alignment boundary.
-       * Directly compare eight bytes each time.
-       */
-.Lloop_aligned:
-       ldr     data1, [src1], #8
-       ldr     data2, [src2], #8
-.Lstart_realigned:
-       subs    limit_wd, limit_wd, #1
-       eor     diff, data1, data2      /* Non-zero if differences found.  */
-       csinv   endloop, diff, xzr, cs  /* Last Dword or differences.  */
-       cbz     endloop, .Lloop_aligned
-
-       /* Not reached the limit, must have found a diff.  */
-       tbz     limit_wd, #63, .Lnot_limit
-
-       /* Limit % 8 == 0 => the diff is in the last 8 bytes. */
-       ands    limit, limit, #7
-       b.eq    .Lnot_limit
-       /*
-       * The remained bytes less than 8. It is needed to extract valid data
-       * from last eight bytes of the intended memory range.
-       */
-       lsl     limit, limit, #3        /* bytes-> bits.  */
-       mov     mask, #~0
-CPU_BE( lsr    mask, mask, limit )
-CPU_LE( lsl    mask, mask, limit )
-       bic     data1, data1, mask
-       bic     data2, data2, mask
-
-       orr     diff, diff, mask
-       b       .Lnot_limit
-
-.Lmutual_align:
-       /*
-       * Sources are mutually aligned, but are not currently at an
-       * alignment boundary. Round down the addresses and then mask off
-       * the bytes that precede the start point.
-       */
-       bic     src1, src1, #7
-       bic     src2, src2, #7
-       ldr     data1, [src1], #8
-       ldr     data2, [src2], #8
-       /*
-       * We can not add limit with alignment offset(tmp1) here. Since the
-       * addition probably make the limit overflown.
-       */
-       sub     limit_wd, limit, #1/*limit != 0, so no underflow.*/
-       and     tmp3, limit_wd, #7
-       lsr     limit_wd, limit_wd, #3
-       add     tmp3, tmp3, tmp1
-       add     limit_wd, limit_wd, tmp3, lsr #3
-       add     limit, limit, tmp1/* Adjust the limit for the extra.  */
-
-       lsl     tmp1, tmp1, #3/* Bytes beyond alignment -> bits.*/
-       neg     tmp1, tmp1/* Bits to alignment -64.  */
-       mov     tmp2, #~0
-       /*mask off the non-intended bytes before the start address.*/
-CPU_BE( lsl    tmp2, tmp2, tmp1 )/*Big-endian.Early bytes are at MSB*/
-       /* Little-endian.  Early bytes are at LSB.  */
-CPU_LE( lsr    tmp2, tmp2, tmp1 )
-
-       orr     data1, data1, tmp2
-       orr     data2, data2, tmp2
-       b       .Lstart_realigned
-
-       /*src1 and src2 have different alignment offset.*/
-.Lmisaligned8:
-       cmp     limit, #8
-       b.lo    .Ltiny8proc /*limit < 8: compare byte by byte*/
-
-       and     tmp1, src1, #7
-       neg     tmp1, tmp1
-       add     tmp1, tmp1, #8/*valid length in the first 8 bytes of src1*/
-       and     tmp2, src2, #7
-       neg     tmp2, tmp2
-       add     tmp2, tmp2, #8/*valid length in the first 8 bytes of src2*/
-       subs    tmp3, tmp1, tmp2
-       csel    pos, tmp1, tmp2, hi /*Choose the maximum.*/
-
-       sub     limit, limit, pos
-       /*compare the proceeding bytes in the first 8 byte segment.*/
-.Ltinycmp:
-       ldrb    data1w, [src1], #1
-       ldrb    data2w, [src2], #1
-       subs    pos, pos, #1
-       ccmp    data1w, data2w, #0, ne  /* NZCV = 0b0000.  */
-       b.eq    .Ltinycmp
-       cbnz    pos, 1f /*diff occurred before the last byte.*/
-       cmp     data1w, data2w
-       b.eq    .Lstart_align
-1:
-       sub     result, data1, data2
+       subs    limit, limit, 8
+       b.lo    L(less8)
+
+       ldr     data1, [src1], 8
+       ldr     data2, [src2], 8
+       cmp     data1, data2
+       b.ne    L(return)
+
+       subs    limit, limit, 8
+       b.gt    L(more16)
+
+       ldr     data1, [src1, limit]
+       ldr     data2, [src2, limit]
+       b       L(return)
+
+L(more16):
+       ldr     data1, [src1], 8
+       ldr     data2, [src2], 8
+       cmp     data1, data2
+       bne     L(return)
+
+       /* Jump directly to comparing the last 16 bytes for 32 byte (or less)
+          strings.  */
+       subs    limit, limit, 16
+       b.ls    L(last_bytes)
+
+       /* We overlap loads between 0-32 bytes at either side of SRC1 when we
+          try to align, so limit it only to strings larger than 128 bytes.  */
+       cmp     limit, 96
+       b.ls    L(loop16)
+
+       /* Align src1 and adjust src2 with bytes not yet done.  */
+       and     tmp1, src1, 15
+       add     limit, limit, tmp1
+       sub     src1, src1, tmp1
+       sub     src2, src2, tmp1
+
+       /* Loop performing 16 bytes per iteration using aligned src1.
+          Limit is pre-decremented by 16 and must be larger than zero.
+          Exit if <= 16 bytes left to do or if the data is not equal.  */
+       .p2align 4
+L(loop16):
+       ldp     data1, data1h, [src1], 16
+       ldp     data2, data2h, [src2], 16
+       subs    limit, limit, 16
+       ccmp    data1, data2, 0, hi
+       ccmp    data1h, data2h, 0, eq
+       b.eq    L(loop16)
+
+       cmp     data1, data2
+       bne     L(return)
+       mov     data1, data1h
+       mov     data2, data2h
+       cmp     data1, data2
+       bne     L(return)
+
+       /* Compare last 1-16 bytes using unaligned access.  */
+L(last_bytes):
+       add     src1, src1, limit
+       add     src2, src2, limit
+       ldp     data1, data1h, [src1]
+       ldp     data2, data2h, [src2]
+       cmp     data1, data2
+       bne     L(return)
+       mov     data1, data1h
+       mov     data2, data2h
+       cmp     data1, data2
+
+       /* Compare data bytes and set return value to 0, -1 or 1.  */
+L(return):
+#ifndef __AARCH64EB__
+       rev     data1, data1
+       rev     data2, data2
+#endif
+       cmp     data1, data2
+L(ret_eq):
+       cset    result, ne
+       cneg    result, result, lo
        ret
 
-.Lstart_align:
-       lsr     limit_wd, limit, #3
-       cbz     limit_wd, .Lremain8
-
-       ands    xzr, src1, #7
-       b.eq    .Lrecal_offset
-       /*process more leading bytes to make src1 aligned...*/
-       add     src1, src1, tmp3 /*backwards src1 to alignment boundary*/
-       add     src2, src2, tmp3
-       sub     limit, limit, tmp3
-       lsr     limit_wd, limit, #3
-       cbz     limit_wd, .Lremain8
-       /*load 8 bytes from aligned SRC1..*/
-       ldr     data1, [src1], #8
-       ldr     data2, [src2], #8
-
-       subs    limit_wd, limit_wd, #1
-       eor     diff, data1, data2  /*Non-zero if differences found.*/
-       csinv   endloop, diff, xzr, ne
-       cbnz    endloop, .Lunequal_proc
-       /*How far is the current SRC2 from the alignment boundary...*/
-       and     tmp3, tmp3, #7
-
-.Lrecal_offset:/*src1 is aligned now..*/
-       neg     pos, tmp3
-.Lloopcmp_proc:
-       /*
-       * Divide the eight bytes into two parts. First,backwards the src2
-       * to an alignment boundary,load eight bytes and compare from
-       * the SRC2 alignment boundary. If all 8 bytes are equal,then start
-       * the second part's comparison. Otherwise finish the comparison.
-       * This special handle can garantee all the accesses are in the
-       * thread/task space in avoid to overrange access.
-       */
-       ldr     data1, [src1,pos]
-       ldr     data2, [src2,pos]
-       eor     diff, data1, data2  /* Non-zero if differences found.  */
-       cbnz    diff, .Lnot_limit
-
-       /*The second part process*/
-       ldr     data1, [src1], #8
-       ldr     data2, [src2], #8
-       eor     diff, data1, data2  /* Non-zero if differences found.  */
-       subs    limit_wd, limit_wd, #1
-       csinv   endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
-       cbz     endloop, .Lloopcmp_proc
-.Lunequal_proc:
-       cbz     diff, .Lremain8
-
-/* There is difference occurred in the latest comparison. */
-.Lnot_limit:
-/*
-* For little endian,reverse the low significant equal bits into MSB,then
-* following CLZ can find how many equal bits exist.
-*/
-CPU_LE( rev    diff, diff )
-CPU_LE( rev    data1, data1 )
-CPU_LE( rev    data2, data2 )
-
-       /*
-       * The MS-non-zero bit of DIFF marks either the first bit
-       * that is different, or the end of the significant data.
-       * Shifting left now will bring the critical information into the
-       * top bits.
-       */
-       clz     pos, diff
-       lsl     data1, data1, pos
-       lsl     data2, data2, pos
-       /*
-       * We need to zero-extend (char is unsigned) the value and then
-       * perform a signed subtraction.
-       */
-       lsr     data1, data1, #56
-       sub     result, data1, data2, lsr #56
+       .p2align 4
+       /* Compare up to 8 bytes.  Limit is [-8..-1].  */
+L(less8):
+       adds    limit, limit, 4
+       b.lo    L(less4)
+       ldr     data1w, [src1], 4
+       ldr     data2w, [src2], 4
+       cmp     data1w, data2w
+       b.ne    L(return)
+       sub     limit, limit, 4
+L(less4):
+       adds    limit, limit, 4
+       beq     L(ret_eq)
+L(byte_loop):
+       ldrb    data1w, [src1], 1
+       ldrb    data2w, [src2], 1
+       subs    limit, limit, 1
+       ccmp    data1w, data2w, 0, ne   /* NZCV = 0b0000.  */
+       b.eq    L(byte_loop)
+       sub     result, data1w, data2w
        ret
 
-.Lremain8:
-       /* Limit % 8 == 0 =>. all data are equal.*/
-       ands    limit, limit, #7
-       b.eq    .Lret0
-
-.Ltiny8proc:
-       ldrb    data1w, [src1], #1
-       ldrb    data2w, [src2], #1
-       subs    limit, limit, #1
-
-       ccmp    data1w, data2w, #0, ne  /* NZCV = 0b0000. */
-       b.eq    .Ltiny8proc
-       sub     result, data1, data2
-       ret
-.Lret0:
-       mov     result, #0
-       ret
 SYM_FUNC_END_PI(memcmp)
 EXPORT_SYMBOL_NOKASAN(memcmp)
index dc8d2a2..b82fd64 100644 (file)
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2012-2021, Arm Limited.
  *
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/afd6244a1f8d9229/string/aarch64/memcpy.S
  */
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
-#include <asm/cache.h>
 
-/*
- * Copy a buffer from src to dest (alignment handled by the hardware)
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64, unaligned accesses.
  *
- * Parameters:
- *     x0 - dest
- *     x1 - src
- *     x2 - n
- * Returns:
- *     x0 - dest
  */
-       .macro ldrb1 reg, ptr, val
-       ldrb  \reg, [\ptr], \val
-       .endm
-
-       .macro strb1 reg, ptr, val
-       strb \reg, [\ptr], \val
-       .endm
 
-       .macro ldrh1 reg, ptr, val
-       ldrh  \reg, [\ptr], \val
-       .endm
+#define L(label) .L ## label
 
-       .macro strh1 reg, ptr, val
-       strh \reg, [\ptr], \val
-       .endm
+#define dstin  x0
+#define src    x1
+#define count  x2
+#define dst    x3
+#define srcend x4
+#define dstend x5
+#define A_l    x6
+#define A_lw   w6
+#define A_h    x7
+#define B_l    x8
+#define B_lw   w8
+#define B_h    x9
+#define C_l    x10
+#define C_lw   w10
+#define C_h    x11
+#define D_l    x12
+#define D_h    x13
+#define E_l    x14
+#define E_h    x15
+#define F_l    x16
+#define F_h    x17
+#define G_l    count
+#define G_h    dst
+#define H_l    src
+#define H_h    srcend
+#define tmp1   x14
 
-       .macro ldr1 reg, ptr, val
-       ldr \reg, [\ptr], \val
-       .endm
+/* This implementation handles overlaps and supports both memcpy and memmove
+   from a single entry point.  It uses unaligned accesses and branchless
+   sequences to keep the code small, simple and improve performance.
 
-       .macro str1 reg, ptr, val
-       str \reg, [\ptr], \val
-       .endm
+   Copies are split into 3 main cases: small copies of up to 32 bytes, medium
+   copies of up to 128 bytes, and large copies.  The overhead of the overlap
+   check is negligible since it is only required for large copies.
 
-       .macro ldp1 reg1, reg2, ptr, val
-       ldp \reg1, \reg2, [\ptr], \val
-       .endm
-
-       .macro stp1 reg1, reg2, ptr, val
-       stp \reg1, \reg2, [\ptr], \val
-       .endm
+   Large copies use a software pipelined loop processing 64 bytes per iteration.
+   The destination pointer is 16-byte aligned to minimize unaligned accesses.
+   The loop tail is handled by always copying 64 bytes from the end.
+*/
 
+SYM_FUNC_START_ALIAS(__memmove)
+SYM_FUNC_START_WEAK_ALIAS_PI(memmove)
 SYM_FUNC_START_ALIAS(__memcpy)
 SYM_FUNC_START_WEAK_PI(memcpy)
-#include "copy_template.S"
+       add     srcend, src, count
+       add     dstend, dstin, count
+       cmp     count, 128
+       b.hi    L(copy_long)
+       cmp     count, 32
+       b.hi    L(copy32_128)
+
+       /* Small copies: 0..32 bytes.  */
+       cmp     count, 16
+       b.lo    L(copy16)
+       ldp     A_l, A_h, [src]
+       ldp     D_l, D_h, [srcend, -16]
+       stp     A_l, A_h, [dstin]
+       stp     D_l, D_h, [dstend, -16]
+       ret
+
+       /* Copy 8-15 bytes.  */
+L(copy16):
+       tbz     count, 3, L(copy8)
+       ldr     A_l, [src]
+       ldr     A_h, [srcend, -8]
+       str     A_l, [dstin]
+       str     A_h, [dstend, -8]
+       ret
+
+       .p2align 3
+       /* Copy 4-7 bytes.  */
+L(copy8):
+       tbz     count, 2, L(copy4)
+       ldr     A_lw, [src]
+       ldr     B_lw, [srcend, -4]
+       str     A_lw, [dstin]
+       str     B_lw, [dstend, -4]
+       ret
+
+       /* Copy 0..3 bytes using a branchless sequence.  */
+L(copy4):
+       cbz     count, L(copy0)
+       lsr     tmp1, count, 1
+       ldrb    A_lw, [src]
+       ldrb    C_lw, [srcend, -1]
+       ldrb    B_lw, [src, tmp1]
+       strb    A_lw, [dstin]
+       strb    B_lw, [dstin, tmp1]
+       strb    C_lw, [dstend, -1]
+L(copy0):
+       ret
+
+       .p2align 4
+       /* Medium copies: 33..128 bytes.  */
+L(copy32_128):
+       ldp     A_l, A_h, [src]
+       ldp     B_l, B_h, [src, 16]
+       ldp     C_l, C_h, [srcend, -32]
+       ldp     D_l, D_h, [srcend, -16]
+       cmp     count, 64
+       b.hi    L(copy128)
+       stp     A_l, A_h, [dstin]
+       stp     B_l, B_h, [dstin, 16]
+       stp     C_l, C_h, [dstend, -32]
+       stp     D_l, D_h, [dstend, -16]
        ret
+
+       .p2align 4
+       /* Copy 65..128 bytes.  */
+L(copy128):
+       ldp     E_l, E_h, [src, 32]
+       ldp     F_l, F_h, [src, 48]
+       cmp     count, 96
+       b.ls    L(copy96)
+       ldp     G_l, G_h, [srcend, -64]
+       ldp     H_l, H_h, [srcend, -48]
+       stp     G_l, G_h, [dstend, -64]
+       stp     H_l, H_h, [dstend, -48]
+L(copy96):
+       stp     A_l, A_h, [dstin]
+       stp     B_l, B_h, [dstin, 16]
+       stp     E_l, E_h, [dstin, 32]
+       stp     F_l, F_h, [dstin, 48]
+       stp     C_l, C_h, [dstend, -32]
+       stp     D_l, D_h, [dstend, -16]
+       ret
+
+       .p2align 4
+       /* Copy more than 128 bytes.  */
+L(copy_long):
+       /* Use backwards copy if there is an overlap.  */
+       sub     tmp1, dstin, src
+       cbz     tmp1, L(copy0)
+       cmp     tmp1, count
+       b.lo    L(copy_long_backwards)
+
+       /* Copy 16 bytes and then align dst to 16-byte alignment.  */
+
+       ldp     D_l, D_h, [src]
+       and     tmp1, dstin, 15
+       bic     dst, dstin, 15
+       sub     src, src, tmp1
+       add     count, count, tmp1      /* Count is now 16 too large.  */
+       ldp     A_l, A_h, [src, 16]
+       stp     D_l, D_h, [dstin]
+       ldp     B_l, B_h, [src, 32]
+       ldp     C_l, C_h, [src, 48]
+       ldp     D_l, D_h, [src, 64]!
+       subs    count, count, 128 + 16  /* Test and readjust count.  */
+       b.ls    L(copy64_from_end)
+
+L(loop64):
+       stp     A_l, A_h, [dst, 16]
+       ldp     A_l, A_h, [src, 16]
+       stp     B_l, B_h, [dst, 32]
+       ldp     B_l, B_h, [src, 32]
+       stp     C_l, C_h, [dst, 48]
+       ldp     C_l, C_h, [src, 48]
+       stp     D_l, D_h, [dst, 64]!
+       ldp     D_l, D_h, [src, 64]!
+       subs    count, count, 64
+       b.hi    L(loop64)
+
+       /* Write the last iteration and copy 64 bytes from the end.  */
+L(copy64_from_end):
+       ldp     E_l, E_h, [srcend, -64]
+       stp     A_l, A_h, [dst, 16]
+       ldp     A_l, A_h, [srcend, -48]
+       stp     B_l, B_h, [dst, 32]
+       ldp     B_l, B_h, [srcend, -32]
+       stp     C_l, C_h, [dst, 48]
+       ldp     C_l, C_h, [srcend, -16]
+       stp     D_l, D_h, [dst, 64]
+       stp     E_l, E_h, [dstend, -64]
+       stp     A_l, A_h, [dstend, -48]
+       stp     B_l, B_h, [dstend, -32]
+       stp     C_l, C_h, [dstend, -16]
+       ret
+
+       .p2align 4
+
+       /* Large backwards copy for overlapping copies.
+          Copy 16 bytes and then align dst to 16-byte alignment.  */
+L(copy_long_backwards):
+       ldp     D_l, D_h, [srcend, -16]
+       and     tmp1, dstend, 15
+       sub     srcend, srcend, tmp1
+       sub     count, count, tmp1
+       ldp     A_l, A_h, [srcend, -16]
+       stp     D_l, D_h, [dstend, -16]
+       ldp     B_l, B_h, [srcend, -32]
+       ldp     C_l, C_h, [srcend, -48]
+       ldp     D_l, D_h, [srcend, -64]!
+       sub     dstend, dstend, tmp1
+       subs    count, count, 128
+       b.ls    L(copy64_from_start)
+
+L(loop64_backwards):
+       stp     A_l, A_h, [dstend, -16]
+       ldp     A_l, A_h, [srcend, -16]
+       stp     B_l, B_h, [dstend, -32]
+       ldp     B_l, B_h, [srcend, -32]
+       stp     C_l, C_h, [dstend, -48]
+       ldp     C_l, C_h, [srcend, -48]
+       stp     D_l, D_h, [dstend, -64]!
+       ldp     D_l, D_h, [srcend, -64]!
+       subs    count, count, 64
+       b.hi    L(loop64_backwards)
+
+       /* Write the last iteration and copy 64 bytes from the start.  */
+L(copy64_from_start):
+       ldp     G_l, G_h, [src, 48]
+       stp     A_l, A_h, [dstend, -16]
+       ldp     A_l, A_h, [src, 32]
+       stp     B_l, B_h, [dstend, -32]
+       ldp     B_l, B_h, [src, 16]
+       stp     C_l, C_h, [dstend, -48]
+       ldp     C_l, C_h, [src]
+       stp     D_l, D_h, [dstend, -64]
+       stp     G_l, G_h, [dstin, 48]
+       stp     A_l, A_h, [dstin, 32]
+       stp     B_l, B_h, [dstin, 16]
+       stp     C_l, C_h, [dstin]
+       ret
+
 SYM_FUNC_END_PI(memcpy)
 EXPORT_SYMBOL(memcpy)
 SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(__memcpy)
+SYM_FUNC_END_ALIAS_PI(memmove)
+EXPORT_SYMBOL(memmove)
+SYM_FUNC_END_ALIAS(__memmove)
+EXPORT_SYMBOL(__memmove)
diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S
deleted file mode 100644 (file)
index 1035dce..0000000
+++ /dev/null
@@ -1,189 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
- *
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/cache.h>
-
-/*
- * Move a buffer from src to test (alignment handled by the hardware).
- * If dest <= src, call memcpy, otherwise copy in reverse order.
- *
- * Parameters:
- *     x0 - dest
- *     x1 - src
- *     x2 - n
- * Returns:
- *     x0 - dest
- */
-dstin  .req    x0
-src    .req    x1
-count  .req    x2
-tmp1   .req    x3
-tmp1w  .req    w3
-tmp2   .req    x4
-tmp2w  .req    w4
-tmp3   .req    x5
-tmp3w  .req    w5
-dst    .req    x6
-
-A_l    .req    x7
-A_h    .req    x8
-B_l    .req    x9
-B_h    .req    x10
-C_l    .req    x11
-C_h    .req    x12
-D_l    .req    x13
-D_h    .req    x14
-
-SYM_FUNC_START_ALIAS(__memmove)
-SYM_FUNC_START_WEAK_PI(memmove)
-       cmp     dstin, src
-       b.lo    __memcpy
-       add     tmp1, src, count
-       cmp     dstin, tmp1
-       b.hs    __memcpy                /* No overlap.  */
-
-       add     dst, dstin, count
-       add     src, src, count
-       cmp     count, #16
-       b.lo    .Ltail15  /*probably non-alignment accesses.*/
-
-       ands    tmp2, src, #15     /* Bytes to reach alignment.  */
-       b.eq    .LSrcAligned
-       sub     count, count, tmp2
-       /*
-       * process the aligned offset length to make the src aligned firstly.
-       * those extra instructions' cost is acceptable. It also make the
-       * coming accesses are based on aligned address.
-       */
-       tbz     tmp2, #0, 1f
-       ldrb    tmp1w, [src, #-1]!
-       strb    tmp1w, [dst, #-1]!
-1:
-       tbz     tmp2, #1, 2f
-       ldrh    tmp1w, [src, #-2]!
-       strh    tmp1w, [dst, #-2]!
-2:
-       tbz     tmp2, #2, 3f
-       ldr     tmp1w, [src, #-4]!
-       str     tmp1w, [dst, #-4]!
-3:
-       tbz     tmp2, #3, .LSrcAligned
-       ldr     tmp1, [src, #-8]!
-       str     tmp1, [dst, #-8]!
-
-.LSrcAligned:
-       cmp     count, #64
-       b.ge    .Lcpy_over64
-
-       /*
-       * Deal with small copies quickly by dropping straight into the
-       * exit block.
-       */
-.Ltail63:
-       /*
-       * Copy up to 48 bytes of data. At this point we only need the
-       * bottom 6 bits of count to be accurate.
-       */
-       ands    tmp1, count, #0x30
-       b.eq    .Ltail15
-       cmp     tmp1w, #0x20
-       b.eq    1f
-       b.lt    2f
-       ldp     A_l, A_h, [src, #-16]!
-       stp     A_l, A_h, [dst, #-16]!
-1:
-       ldp     A_l, A_h, [src, #-16]!
-       stp     A_l, A_h, [dst, #-16]!
-2:
-       ldp     A_l, A_h, [src, #-16]!
-       stp     A_l, A_h, [dst, #-16]!
-
-.Ltail15:
-       tbz     count, #3, 1f
-       ldr     tmp1, [src, #-8]!
-       str     tmp1, [dst, #-8]!
-1:
-       tbz     count, #2, 2f
-       ldr     tmp1w, [src, #-4]!
-       str     tmp1w, [dst, #-4]!
-2:
-       tbz     count, #1, 3f
-       ldrh    tmp1w, [src, #-2]!
-       strh    tmp1w, [dst, #-2]!
-3:
-       tbz     count, #0, .Lexitfunc
-       ldrb    tmp1w, [src, #-1]
-       strb    tmp1w, [dst, #-1]
-
-.Lexitfunc:
-       ret
-
-.Lcpy_over64:
-       subs    count, count, #128
-       b.ge    .Lcpy_body_large
-       /*
-       * Less than 128 bytes to copy, so handle 64 bytes here and then jump
-       * to the tail.
-       */
-       ldp     A_l, A_h, [src, #-16]
-       stp     A_l, A_h, [dst, #-16]
-       ldp     B_l, B_h, [src, #-32]
-       ldp     C_l, C_h, [src, #-48]
-       stp     B_l, B_h, [dst, #-32]
-       stp     C_l, C_h, [dst, #-48]
-       ldp     D_l, D_h, [src, #-64]!
-       stp     D_l, D_h, [dst, #-64]!
-
-       tst     count, #0x3f
-       b.ne    .Ltail63
-       ret
-
-       /*
-       * Critical loop. Start at a new cache line boundary. Assuming
-       * 64 bytes per line this ensures the entire loop is in one line.
-       */
-       .p2align        L1_CACHE_SHIFT
-.Lcpy_body_large:
-       /* pre-load 64 bytes data. */
-       ldp     A_l, A_h, [src, #-16]
-       ldp     B_l, B_h, [src, #-32]
-       ldp     C_l, C_h, [src, #-48]
-       ldp     D_l, D_h, [src, #-64]!
-1:
-       /*
-       * interlace the load of next 64 bytes data block with store of the last
-       * loaded 64 bytes data.
-       */
-       stp     A_l, A_h, [dst, #-16]
-       ldp     A_l, A_h, [src, #-16]
-       stp     B_l, B_h, [dst, #-32]
-       ldp     B_l, B_h, [src, #-32]
-       stp     C_l, C_h, [dst, #-48]
-       ldp     C_l, C_h, [src, #-48]
-       stp     D_l, D_h, [dst, #-64]!
-       ldp     D_l, D_h, [src, #-64]!
-       subs    count, count, #64
-       b.ge    1b
-       stp     A_l, A_h, [dst, #-16]
-       stp     B_l, B_h, [dst, #-32]
-       stp     C_l, C_h, [dst, #-48]
-       stp     D_l, D_h, [dst, #-64]!
-
-       tst     count, #0x3f
-       b.ne    .Ltail63
-       ret
-SYM_FUNC_END_PI(memmove)
-EXPORT_SYMBOL(memmove)
-SYM_FUNC_END_ALIAS(__memmove)
-EXPORT_SYMBOL(__memmove)
index 351537c..e83643b 100644 (file)
@@ -37,6 +37,26 @@ SYM_FUNC_START(mte_clear_page_tags)
 SYM_FUNC_END(mte_clear_page_tags)
 
 /*
+ * Zero the page and tags at the same time
+ *
+ * Parameters:
+ *     x0 - address to the beginning of the page
+ */
+SYM_FUNC_START(mte_zero_clear_page_tags)
+       mrs     x1, dczid_el0
+       and     w1, w1, #0xf
+       mov     x2, #4
+       lsl     x1, x2, x1
+       and     x0, x0, #(1 << MTE_TAG_SHIFT) - 1       // clear the tag
+
+1:     dc      gzva, x0
+       add     x0, x0, x1
+       tst     x0, #(PAGE_SIZE - 1)
+       b.ne    1b
+       ret
+SYM_FUNC_END(mte_zero_clear_page_tags)
+
+/*
  * Copy the tags from the source page to the destination one
  *   x0 - address of the destination page
  *   x1 - address of the source page
index 4e79566..d7bee21 100644 (file)
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2012-2021, Arm Limited.
  *
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/afd6244a1f8d9229/string/aarch64/strcmp.S
  */
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 
-/*
- * compare two strings
+/* Assumptions:
  *
- * Parameters:
- *     x0 - const string 1 pointer
- *    x1 - const string 2 pointer
- * Returns:
- * x0 - an integer less than, equal to, or greater than zero
- * if  s1  is  found, respectively, to be less than, to match,
- * or be greater than s2.
+ * ARMv8-a, AArch64
  */
 
+#define L(label) .L ## label
+
 #define REP8_01 0x0101010101010101
 #define REP8_7f 0x7f7f7f7f7f7f7f7f
 #define REP8_80 0x8080808080808080
 
 /* Parameters and result.  */
-src1           .req    x0
-src2           .req    x1
-result         .req    x0
+#define src1           x0
+#define src2           x1
+#define result         x0
 
 /* Internal variables.  */
-data1          .req    x2
-data1w         .req    w2
-data2          .req    x3
-data2w         .req    w3
-has_nul                .req    x4
-diff           .req    x5
-syndrome       .req    x6
-tmp1           .req    x7
-tmp2           .req    x8
-tmp3           .req    x9
-zeroones       .req    x10
-pos            .req    x11
-
+#define data1          x2
+#define data1w         w2
+#define data2          x3
+#define data2w         w3
+#define has_nul                x4
+#define diff           x5
+#define syndrome       x6
+#define tmp1           x7
+#define tmp2           x8
+#define tmp3           x9
+#define zeroones       x10
+#define pos            x11
+
+       /* Start of performance-critical section  -- one 64B cache line.  */
+       .align 6
 SYM_FUNC_START_WEAK_PI(strcmp)
        eor     tmp1, src1, src2
        mov     zeroones, #REP8_01
        tst     tmp1, #7
-       b.ne    .Lmisaligned8
+       b.ne    L(misaligned8)
        ands    tmp1, src1, #7
-       b.ne    .Lmutual_align
-
-       /*
-       * NUL detection works on the principle that (X - 1) & (~X) & 0x80
-       * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
-       * can be done in parallel across the entire word.
-       */
-.Lloop_aligned:
+       b.ne    L(mutual_align)
+       /* NUL detection works on the principle that (X - 1) & (~X) & 0x80
+          (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+          can be done in parallel across the entire word.  */
+L(loop_aligned):
        ldr     data1, [src1], #8
        ldr     data2, [src2], #8
-.Lstart_realigned:
+L(start_realigned):
        sub     tmp1, data1, zeroones
        orr     tmp2, data1, #REP8_7f
        eor     diff, data1, data2      /* Non-zero if differences found.  */
        bic     has_nul, tmp1, tmp2     /* Non-zero if NUL terminator.  */
        orr     syndrome, diff, has_nul
-       cbz     syndrome, .Lloop_aligned
-       b       .Lcal_cmpresult
+       cbz     syndrome, L(loop_aligned)
+       /* End of performance-critical section  -- one 64B cache line.  */
+
+L(end):
+#ifndef        __AARCH64EB__
+       rev     syndrome, syndrome
+       rev     data1, data1
+       /* The MS-non-zero bit of the syndrome marks either the first bit
+          that is different, or the top bit of the first zero byte.
+          Shifting left now will bring the critical information into the
+          top bits.  */
+       clz     pos, syndrome
+       rev     data2, data2
+       lsl     data1, data1, pos
+       lsl     data2, data2, pos
+       /* But we need to zero-extend (char is unsigned) the value and then
+          perform a signed 32-bit subtraction.  */
+       lsr     data1, data1, #56
+       sub     result, data1, data2, lsr #56
+       ret
+#else
+       /* For big-endian we cannot use the trick with the syndrome value
+          as carry-propagation can corrupt the upper bits if the trailing
+          bytes in the string contain 0x01.  */
+       /* However, if there is no NUL byte in the dword, we can generate
+          the result directly.  We can't just subtract the bytes as the
+          MSB might be significant.  */
+       cbnz    has_nul, 1f
+       cmp     data1, data2
+       cset    result, ne
+       cneg    result, result, lo
+       ret
+1:
+       /* Re-compute the NUL-byte detection, using a byte-reversed value.  */
+       rev     tmp3, data1
+       sub     tmp1, tmp3, zeroones
+       orr     tmp2, tmp3, #REP8_7f
+       bic     has_nul, tmp1, tmp2
+       rev     has_nul, has_nul
+       orr     syndrome, diff, has_nul
+       clz     pos, syndrome
+       /* The MS-non-zero bit of the syndrome marks either the first bit
+          that is different, or the top bit of the first zero byte.
+          Shifting left now will bring the critical information into the
+          top bits.  */
+       lsl     data1, data1, pos
+       lsl     data2, data2, pos
+       /* But we need to zero-extend (char is unsigned) the value and then
+          perform a signed 32-bit subtraction.  */
+       lsr     data1, data1, #56
+       sub     result, data1, data2, lsr #56
+       ret
+#endif
 
-.Lmutual_align:
-       /*
-       * Sources are mutually aligned, but are not currently at an
-       * alignment boundary.  Round down the addresses and then mask off
-       * the bytes that preceed the start point.
-       */
+L(mutual_align):
+       /* Sources are mutually aligned, but are not currently at an
+          alignment boundary.  Round down the addresses and then mask off
+          the bytes that preceed the start point.  */
        bic     src1, src1, #7
        bic     src2, src2, #7
        lsl     tmp1, tmp1, #3          /* Bytes beyond alignment -> bits.  */
@@ -86,138 +125,52 @@ SYM_FUNC_START_WEAK_PI(strcmp)
        neg     tmp1, tmp1              /* Bits to alignment -64.  */
        ldr     data2, [src2], #8
        mov     tmp2, #~0
+#ifdef __AARCH64EB__
        /* Big-endian.  Early bytes are at MSB.  */
-CPU_BE( lsl    tmp2, tmp2, tmp1 )      /* Shift (tmp1 & 63).  */
+       lsl     tmp2, tmp2, tmp1        /* Shift (tmp1 & 63).  */
+#else
        /* Little-endian.  Early bytes are at LSB.  */
-CPU_LE( lsr    tmp2, tmp2, tmp1 )      /* Shift (tmp1 & 63).  */
-
+       lsr     tmp2, tmp2, tmp1        /* Shift (tmp1 & 63).  */
+#endif
        orr     data1, data1, tmp2
        orr     data2, data2, tmp2
-       b       .Lstart_realigned
-
-.Lmisaligned8:
-       /*
-       * Get the align offset length to compare per byte first.
-       * After this process, one string's address will be aligned.
-       */
-       and     tmp1, src1, #7
-       neg     tmp1, tmp1
-       add     tmp1, tmp1, #8
-       and     tmp2, src2, #7
-       neg     tmp2, tmp2
-       add     tmp2, tmp2, #8
-       subs    tmp3, tmp1, tmp2
-       csel    pos, tmp1, tmp2, hi /*Choose the maximum. */
-.Ltinycmp:
+       b       L(start_realigned)
+
+L(misaligned8):
+       /* Align SRC1 to 8 bytes and then compare 8 bytes at a time, always
+          checking to make sure that we don't access beyond page boundary in
+          SRC2.  */
+       tst     src1, #7
+       b.eq    L(loop_misaligned)
+L(do_misaligned):
        ldrb    data1w, [src1], #1
        ldrb    data2w, [src2], #1
-       subs    pos, pos, #1
-       ccmp    data1w, #1, #0, ne  /* NZCV = 0b0000.  */
-       ccmp    data1w, data2w, #0, cs  /* NZCV = 0b0000.  */
-       b.eq    .Ltinycmp
-       cbnz    pos, 1f /*find the null or unequal...*/
        cmp     data1w, #1
-       ccmp    data1w, data2w, #0, cs
-       b.eq    .Lstart_align /*the last bytes are equal....*/
-1:
-       sub     result, data1, data2
-       ret
-
-.Lstart_align:
-       ands    xzr, src1, #7
-       b.eq    .Lrecal_offset
-       /*process more leading bytes to make str1 aligned...*/
-       add     src1, src1, tmp3
-       add     src2, src2, tmp3
-       /*load 8 bytes from aligned str1 and non-aligned str2..*/
+       ccmp    data1w, data2w, #0, cs  /* NZCV = 0b0000.  */
+       b.ne    L(done)
+       tst     src1, #7
+       b.ne    L(do_misaligned)
+
+L(loop_misaligned):
+       /* Test if we are within the last dword of the end of a 4K page.  If
+          yes then jump back to the misaligned loop to copy a byte at a time.  */
+       and     tmp1, src2, #0xff8
+       eor     tmp1, tmp1, #0xff8
+       cbz     tmp1, L(do_misaligned)
        ldr     data1, [src1], #8
        ldr     data2, [src2], #8
 
        sub     tmp1, data1, zeroones
        orr     tmp2, data1, #REP8_7f
-       bic     has_nul, tmp1, tmp2
-       eor     diff, data1, data2 /* Non-zero if differences found.  */
-       orr     syndrome, diff, has_nul
-       cbnz    syndrome, .Lcal_cmpresult
-       /*How far is the current str2 from the alignment boundary...*/
-       and     tmp3, tmp3, #7
-.Lrecal_offset:
-       neg     pos, tmp3
-.Lloopcmp_proc:
-       /*
-       * Divide the eight bytes into two parts. First,backwards the src2
-       * to an alignment boundary,load eight bytes from the SRC2 alignment
-       * boundary,then compare with the relative bytes from SRC1.
-       * If all 8 bytes are equal,then start the second part's comparison.
-       * Otherwise finish the comparison.
-       * This special handle can garantee all the accesses are in the
-       * thread/task space in avoid to overrange access.
-       */
-       ldr     data1, [src1,pos]
-       ldr     data2, [src2,pos]
-       sub     tmp1, data1, zeroones
-       orr     tmp2, data1, #REP8_7f
-       bic     has_nul, tmp1, tmp2
-       eor     diff, data1, data2  /* Non-zero if differences found.  */
-       orr     syndrome, diff, has_nul
-       cbnz    syndrome, .Lcal_cmpresult
-
-       /*The second part process*/
-       ldr     data1, [src1], #8
-       ldr     data2, [src2], #8
-       sub     tmp1, data1, zeroones
-       orr     tmp2, data1, #REP8_7f
-       bic     has_nul, tmp1, tmp2
-       eor     diff, data1, data2  /* Non-zero if differences found.  */
+       eor     diff, data1, data2      /* Non-zero if differences found.  */
+       bic     has_nul, tmp1, tmp2     /* Non-zero if NUL terminator.  */
        orr     syndrome, diff, has_nul
-       cbz     syndrome, .Lloopcmp_proc
+       cbz     syndrome, L(loop_misaligned)
+       b       L(end)
 
-.Lcal_cmpresult:
-       /*
-       * reversed the byte-order as big-endian,then CLZ can find the most
-       * significant zero bits.
-       */
-CPU_LE( rev    syndrome, syndrome )
-CPU_LE( rev    data1, data1 )
-CPU_LE( rev    data2, data2 )
-
-       /*
-       * For big-endian we cannot use the trick with the syndrome value
-       * as carry-propagation can corrupt the upper bits if the trailing
-       * bytes in the string contain 0x01.
-       * However, if there is no NUL byte in the dword, we can generate
-       * the result directly.  We cannot just subtract the bytes as the
-       * MSB might be significant.
-       */
-CPU_BE( cbnz   has_nul, 1f )
-CPU_BE( cmp    data1, data2 )
-CPU_BE( cset   result, ne )
-CPU_BE( cneg   result, result, lo )
-CPU_BE( ret )
-CPU_BE( 1: )
-       /*Re-compute the NUL-byte detection, using a byte-reversed value. */
-CPU_BE(        rev     tmp3, data1 )
-CPU_BE(        sub     tmp1, tmp3, zeroones )
-CPU_BE(        orr     tmp2, tmp3, #REP8_7f )
-CPU_BE(        bic     has_nul, tmp1, tmp2 )
-CPU_BE(        rev     has_nul, has_nul )
-CPU_BE(        orr     syndrome, diff, has_nul )
-
-       clz     pos, syndrome
-       /*
-       * The MS-non-zero bit of the syndrome marks either the first bit
-       * that is different, or the top bit of the first zero byte.
-       * Shifting left now will bring the critical information into the
-       * top bits.
-       */
-       lsl     data1, data1, pos
-       lsl     data2, data2, pos
-       /*
-       * But we need to zero-extend (char is unsigned) the value and then
-       * perform a signed 32-bit subtraction.
-       */
-       lsr     data1, data1, #56
-       sub     result, data1, data2, lsr #56
+L(done):
+       sub     result, data1, data2
        ret
+
 SYM_FUNC_END_PI(strcmp)
 EXPORT_SYMBOL_NOKASAN(strcmp)
index ee3ed88..35fbdb7 100644 (file)
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2013-2021, Arm Limited.
  *
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/98e4d6a5c13c8e54/string/aarch64/strlen.S
  */
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 
-/*
- * calculate the length of a string
+/* Assumptions:
  *
- * Parameters:
- *     x0 - const string pointer
- * Returns:
- *     x0 - the return length of specific string
+ * ARMv8-a, AArch64, unaligned accesses, min page size 4k.
  */
 
+#define L(label) .L ## label
+
 /* Arguments and results.  */
-srcin          .req    x0
-len            .req    x0
+#define srcin          x0
+#define len            x0
 
 /* Locals and temporaries.  */
-src            .req    x1
-data1          .req    x2
-data2          .req    x3
-data2a         .req    x4
-has_nul1       .req    x5
-has_nul2       .req    x6
-tmp1           .req    x7
-tmp2           .req    x8
-tmp3           .req    x9
-tmp4           .req    x10
-zeroones       .req    x11
-pos            .req    x12
+#define src            x1
+#define data1          x2
+#define data2          x3
+#define has_nul1       x4
+#define has_nul2       x5
+#define tmp1           x4
+#define tmp2           x5
+#define tmp3           x6
+#define tmp4           x7
+#define zeroones       x8
+
+       /* NUL detection works on the principle that (X - 1) & (~X) & 0x80
+          (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+          can be done in parallel across the entire word. A faster check
+          (X - 1) & 0x80 is zero for non-NUL ASCII characters, but gives
+          false hits for characters 129..255.  */
 
 #define REP8_01 0x0101010101010101
 #define REP8_7f 0x7f7f7f7f7f7f7f7f
 #define REP8_80 0x8080808080808080
 
+#define MIN_PAGE_SIZE 4096
+
+       /* Since strings are short on average, we check the first 16 bytes
+          of the string for a NUL character.  In order to do an unaligned ldp
+          safely we have to do a page cross check first.  If there is a NUL
+          byte we calculate the length from the 2 8-byte words using
+          conditional select to reduce branch mispredictions (it is unlikely
+          strlen will be repeatedly called on strings with the same length).
+
+          If the string is longer than 16 bytes, we align src so don't need
+          further page cross checks, and process 32 bytes per iteration
+          using the fast NUL check.  If we encounter non-ASCII characters,
+          fallback to a second loop using the full NUL check.
+
+          If the page cross check fails, we read 16 bytes from an aligned
+          address, remove any characters before the string, and continue
+          in the main loop using aligned loads.  Since strings crossing a
+          page in the first 16 bytes are rare (probability of
+          16/MIN_PAGE_SIZE ~= 0.4%), this case does not need to be optimized.
+
+          AArch64 systems have a minimum page size of 4k.  We don't bother
+          checking for larger page sizes - the cost of setting up the correct
+          page size is just not worth the extra gain from a small reduction in
+          the cases taking the slow path.  Note that we only care about
+          whether the first fetch, which may be misaligned, crosses a page
+          boundary.  */
+
 SYM_FUNC_START_WEAK_PI(strlen)
-       mov     zeroones, #REP8_01
-       bic     src, srcin, #15
-       ands    tmp1, srcin, #15
-       b.ne    .Lmisaligned
-       /*
-       * NUL detection works on the principle that (X - 1) & (~X) & 0x80
-       * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
-       * can be done in parallel across the entire word.
-       */
-       /*
-       * The inner loop deals with two Dwords at a time. This has a
-       * slightly higher start-up cost, but we should win quite quickly,
-       * especially on cores with a high number of issue slots per
-       * cycle, as we get much better parallelism out of the operations.
-       */
-.Lloop:
-       ldp     data1, data2, [src], #16
-.Lrealigned:
+       and     tmp1, srcin, MIN_PAGE_SIZE - 1
+       mov     zeroones, REP8_01
+       cmp     tmp1, MIN_PAGE_SIZE - 16
+       b.gt    L(page_cross)
+       ldp     data1, data2, [srcin]
+#ifdef __AARCH64EB__
+       /* For big-endian, carry propagation (if the final byte in the
+          string is 0x01) means we cannot use has_nul1/2 directly.
+          Since we expect strings to be small and early-exit,
+          byte-swap the data now so has_null1/2 will be correct.  */
+       rev     data1, data1
+       rev     data2, data2
+#endif
        sub     tmp1, data1, zeroones
-       orr     tmp2, data1, #REP8_7f
+       orr     tmp2, data1, REP8_7f
        sub     tmp3, data2, zeroones
-       orr     tmp4, data2, #REP8_7f
-       bic     has_nul1, tmp1, tmp2
-       bics    has_nul2, tmp3, tmp4
-       ccmp    has_nul1, #0, #0, eq    /* NZCV = 0000  */
-       b.eq    .Lloop
+       orr     tmp4, data2, REP8_7f
+       bics    has_nul1, tmp1, tmp2
+       bic     has_nul2, tmp3, tmp4
+       ccmp    has_nul2, 0, 0, eq
+       beq     L(main_loop_entry)
+
+       /* Enter with C = has_nul1 == 0.  */
+       csel    has_nul1, has_nul1, has_nul2, cc
+       mov     len, 8
+       rev     has_nul1, has_nul1
+       clz     tmp1, has_nul1
+       csel    len, xzr, len, cc
+       add     len, len, tmp1, lsr 3
+       ret
 
+       /* The inner loop processes 32 bytes per iteration and uses the fast
+          NUL check.  If we encounter non-ASCII characters, use a second
+          loop with the accurate NUL check.  */
+       .p2align 4
+L(main_loop_entry):
+       bic     src, srcin, 15
+       sub     src, src, 16
+L(main_loop):
+       ldp     data1, data2, [src, 32]!
+L(page_cross_entry):
+       sub     tmp1, data1, zeroones
+       sub     tmp3, data2, zeroones
+       orr     tmp2, tmp1, tmp3
+       tst     tmp2, zeroones, lsl 7
+       bne     1f
+       ldp     data1, data2, [src, 16]
+       sub     tmp1, data1, zeroones
+       sub     tmp3, data2, zeroones
+       orr     tmp2, tmp1, tmp3
+       tst     tmp2, zeroones, lsl 7
+       beq     L(main_loop)
+       add     src, src, 16
+1:
+       /* The fast check failed, so do the slower, accurate NUL check.  */
+       orr     tmp2, data1, REP8_7f
+       orr     tmp4, data2, REP8_7f
+       bics    has_nul1, tmp1, tmp2
+       bic     has_nul2, tmp3, tmp4
+       ccmp    has_nul2, 0, 0, eq
+       beq     L(nonascii_loop)
+
+       /* Enter with C = has_nul1 == 0.  */
+L(tail):
+#ifdef __AARCH64EB__
+       /* For big-endian, carry propagation (if the final byte in the
+          string is 0x01) means we cannot use has_nul1/2 directly.  The
+          easiest way to get the correct byte is to byte-swap the data
+          and calculate the syndrome a second time.  */
+       csel    data1, data1, data2, cc
+       rev     data1, data1
+       sub     tmp1, data1, zeroones
+       orr     tmp2, data1, REP8_7f
+       bic     has_nul1, tmp1, tmp2
+#else
+       csel    has_nul1, has_nul1, has_nul2, cc
+#endif
        sub     len, src, srcin
-       cbz     has_nul1, .Lnul_in_data2
-CPU_BE(        mov     data2, data1 )  /*prepare data to re-calculate the syndrome*/
-       sub     len, len, #8
-       mov     has_nul2, has_nul1
-.Lnul_in_data2:
-       /*
-       * For big-endian, carry propagation (if the final byte in the
-       * string is 0x01) means we cannot use has_nul directly.  The
-       * easiest way to get the correct byte is to byte-swap the data
-       * and calculate the syndrome a second time.
-       */
-CPU_BE( rev    data2, data2 )
-CPU_BE( sub    tmp1, data2, zeroones )
-CPU_BE( orr    tmp2, data2, #REP8_7f )
-CPU_BE( bic    has_nul2, tmp1, tmp2 )
-
-       sub     len, len, #8
-       rev     has_nul2, has_nul2
-       clz     pos, has_nul2
-       add     len, len, pos, lsr #3           /* Bits to bytes.  */
+       rev     has_nul1, has_nul1
+       add     tmp2, len, 8
+       clz     tmp1, has_nul1
+       csel    len, len, tmp2, cc
+       add     len, len, tmp1, lsr 3
        ret
 
-.Lmisaligned:
-       cmp     tmp1, #8
-       neg     tmp1, tmp1
-       ldp     data1, data2, [src], #16
-       lsl     tmp1, tmp1, #3          /* Bytes beyond alignment -> bits.  */
-       mov     tmp2, #~0
-       /* Big-endian.  Early bytes are at MSB.  */
-CPU_BE( lsl    tmp2, tmp2, tmp1 )      /* Shift (tmp1 & 63).  */
+L(nonascii_loop):
+       ldp     data1, data2, [src, 16]!
+       sub     tmp1, data1, zeroones
+       orr     tmp2, data1, REP8_7f
+       sub     tmp3, data2, zeroones
+       orr     tmp4, data2, REP8_7f
+       bics    has_nul1, tmp1, tmp2
+       bic     has_nul2, tmp3, tmp4
+       ccmp    has_nul2, 0, 0, eq
+       bne     L(tail)
+       ldp     data1, data2, [src, 16]!
+       sub     tmp1, data1, zeroones
+       orr     tmp2, data1, REP8_7f
+       sub     tmp3, data2, zeroones
+       orr     tmp4, data2, REP8_7f
+       bics    has_nul1, tmp1, tmp2
+       bic     has_nul2, tmp3, tmp4
+       ccmp    has_nul2, 0, 0, eq
+       beq     L(nonascii_loop)
+       b       L(tail)
+
+       /* Load 16 bytes from [srcin & ~15] and force the bytes that precede
+          srcin to 0x7f, so we ignore any NUL bytes before the string.
+          Then continue in the aligned loop.  */
+L(page_cross):
+       bic     src, srcin, 15
+       ldp     data1, data2, [src]
+       lsl     tmp1, srcin, 3
+       mov     tmp4, -1
+#ifdef __AARCH64EB__
+       /* Big-endian.  Early bytes are at MSB.  */
+       lsr     tmp1, tmp4, tmp1        /* Shift (tmp1 & 63).  */
+#else
        /* Little-endian.  Early bytes are at LSB.  */
-CPU_LE( lsr    tmp2, tmp2, tmp1 )      /* Shift (tmp1 & 63).  */
+       lsl     tmp1, tmp4, tmp1        /* Shift (tmp1 & 63).  */
+#endif
+       orr     tmp1, tmp1, REP8_80
+       orn     data1, data1, tmp1
+       orn     tmp2, data2, tmp1
+       tst     srcin, 8
+       csel    data1, data1, tmp4, eq
+       csel    data2, data2, tmp2, eq
+       b       L(page_cross_entry)
 
-       orr     data1, data1, tmp2
-       orr     data2a, data2, tmp2
-       csinv   data1, data1, xzr, le
-       csel    data2, data2, data2a, le
-       b       .Lrealigned
 SYM_FUNC_END_PI(strlen)
 EXPORT_SYMBOL_NOKASAN(strlen)
index 2a7ee94..48d44f7 100644 (file)
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2013-2021, Arm Limited.
  *
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/e823e3abf5f89ecb/string/aarch64/strncmp.S
  */
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 
-/*
- * compare two strings
+/* Assumptions:
  *
- * Parameters:
- *  x0 - const string 1 pointer
- *  x1 - const string 2 pointer
- *  x2 - the maximal length to be compared
- * Returns:
- *  x0 - an integer less than, equal to, or greater than zero if s1 is found,
- *     respectively, to be less than, to match, or be greater than s2.
+ * ARMv8-a, AArch64
  */
 
+#define L(label) .L ## label
+
 #define REP8_01 0x0101010101010101
 #define REP8_7f 0x7f7f7f7f7f7f7f7f
 #define REP8_80 0x8080808080808080
 
 /* Parameters and result.  */
-src1           .req    x0
-src2           .req    x1
-limit          .req    x2
-result         .req    x0
+#define src1           x0
+#define src2           x1
+#define limit          x2
+#define result         x0
 
 /* Internal variables.  */
-data1          .req    x3
-data1w         .req    w3
-data2          .req    x4
-data2w         .req    w4
-has_nul                .req    x5
-diff           .req    x6
-syndrome       .req    x7
-tmp1           .req    x8
-tmp2           .req    x9
-tmp3           .req    x10
-zeroones       .req    x11
-pos            .req    x12
-limit_wd       .req    x13
-mask           .req    x14
-endloop                .req    x15
+#define data1          x3
+#define data1w         w3
+#define data2          x4
+#define data2w         w4
+#define has_nul                x5
+#define diff           x6
+#define syndrome       x7
+#define tmp1           x8
+#define tmp2           x9
+#define tmp3           x10
+#define zeroones       x11
+#define pos            x12
+#define limit_wd       x13
+#define mask           x14
+#define endloop                x15
+#define count          mask
 
 SYM_FUNC_START_WEAK_PI(strncmp)
-       cbz     limit, .Lret0
+       cbz     limit, L(ret0)
        eor     tmp1, src1, src2
        mov     zeroones, #REP8_01
        tst     tmp1, #7
-       b.ne    .Lmisaligned8
-       ands    tmp1, src1, #7
-       b.ne    .Lmutual_align
+       and     count, src1, #7
+       b.ne    L(misaligned8)
+       cbnz    count, L(mutual_align)
        /* Calculate the number of full and partial words -1.  */
-       /*
-       * when limit is mulitply of 8, if not sub 1,
-       * the judgement of last dword will wrong.
-       */
-       sub     limit_wd, limit, #1 /* limit != 0, so no underflow.  */
-       lsr     limit_wd, limit_wd, #3  /* Convert to Dwords.  */
+       sub     limit_wd, limit, #1     /* limit != 0, so no underflow.  */
+       lsr     limit_wd, limit_wd, #3  /* Convert to Dwords.  */
 
-       /*
-       * NUL detection works on the principle that (X - 1) & (~X) & 0x80
-       * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
-       * can be done in parallel across the entire word.
-       */
-.Lloop_aligned:
+       /* NUL detection works on the principle that (X - 1) & (~X) & 0x80
+          (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+          can be done in parallel across the entire word.  */
+       .p2align 4
+L(loop_aligned):
        ldr     data1, [src1], #8
        ldr     data2, [src2], #8
-.Lstart_realigned:
+L(start_realigned):
        subs    limit_wd, limit_wd, #1
        sub     tmp1, data1, zeroones
        orr     tmp2, data1, #REP8_7f
-       eor     diff, data1, data2  /* Non-zero if differences found.  */
-       csinv   endloop, diff, xzr, pl  /* Last Dword or differences.*/
-       bics    has_nul, tmp1, tmp2 /* Non-zero if NUL terminator.  */
+       eor     diff, data1, data2      /* Non-zero if differences found.  */
+       csinv   endloop, diff, xzr, pl  /* Last Dword or differences.  */
+       bics    has_nul, tmp1, tmp2     /* Non-zero if NUL terminator.  */
        ccmp    endloop, #0, #0, eq
-       b.eq    .Lloop_aligned
+       b.eq    L(loop_aligned)
+       /* End of main loop */
 
-       /*Not reached the limit, must have found the end or a diff.  */
-       tbz     limit_wd, #63, .Lnot_limit
+       /* Not reached the limit, must have found the end or a diff.  */
+       tbz     limit_wd, #63, L(not_limit)
 
        /* Limit % 8 == 0 => all bytes significant.  */
        ands    limit, limit, #7
-       b.eq    .Lnot_limit
+       b.eq    L(not_limit)
 
-       lsl     limit, limit, #3    /* Bits -> bytes.  */
+       lsl     limit, limit, #3        /* Bits -> bytes.  */
        mov     mask, #~0
-CPU_BE( lsr    mask, mask, limit )
-CPU_LE( lsl    mask, mask, limit )
+#ifdef __AARCH64EB__
+       lsr     mask, mask, limit
+#else
+       lsl     mask, mask, limit
+#endif
        bic     data1, data1, mask
        bic     data2, data2, mask
 
        /* Make sure that the NUL byte is marked in the syndrome.  */
        orr     has_nul, has_nul, mask
 
-.Lnot_limit:
+L(not_limit):
        orr     syndrome, diff, has_nul
-       b       .Lcal_cmpresult
 
-.Lmutual_align:
-       /*
-       * Sources are mutually aligned, but are not currently at an
-       * alignment boundary.  Round down the addresses and then mask off
-       * the bytes that precede the start point.
-       * We also need to adjust the limit calculations, but without
-       * overflowing if the limit is near ULONG_MAX.
-       */
+#ifndef        __AARCH64EB__
+       rev     syndrome, syndrome
+       rev     data1, data1
+       /* The MS-non-zero bit of the syndrome marks either the first bit
+          that is different, or the top bit of the first zero byte.
+          Shifting left now will bring the critical information into the
+          top bits.  */
+       clz     pos, syndrome
+       rev     data2, data2
+       lsl     data1, data1, pos
+       lsl     data2, data2, pos
+       /* But we need to zero-extend (char is unsigned) the value and then
+          perform a signed 32-bit subtraction.  */
+       lsr     data1, data1, #56
+       sub     result, data1, data2, lsr #56
+       ret
+#else
+       /* For big-endian we cannot use the trick with the syndrome value
+          as carry-propagation can corrupt the upper bits if the trailing
+          bytes in the string contain 0x01.  */
+       /* However, if there is no NUL byte in the dword, we can generate
+          the result directly.  We can't just subtract the bytes as the
+          MSB might be significant.  */
+       cbnz    has_nul, 1f
+       cmp     data1, data2
+       cset    result, ne
+       cneg    result, result, lo
+       ret
+1:
+       /* Re-compute the NUL-byte detection, using a byte-reversed value.  */
+       rev     tmp3, data1
+       sub     tmp1, tmp3, zeroones
+       orr     tmp2, tmp3, #REP8_7f
+       bic     has_nul, tmp1, tmp2
+       rev     has_nul, has_nul
+       orr     syndrome, diff, has_nul
+       clz     pos, syndrome
+       /* The MS-non-zero bit of the syndrome marks either the first bit
+          that is different, or the top bit of the first zero byte.
+          Shifting left now will bring the critical information into the
+          top bits.  */
+       lsl     data1, data1, pos
+       lsl     data2, data2, pos
+       /* But we need to zero-extend (char is unsigned) the value and then
+          perform a signed 32-bit subtraction.  */
+       lsr     data1, data1, #56
+       sub     result, data1, data2, lsr #56
+       ret
+#endif
+
+L(mutual_align):
+       /* Sources are mutually aligned, but are not currently at an
+          alignment boundary.  Round down the addresses and then mask off
+          the bytes that precede the start point.
+          We also need to adjust the limit calculations, but without
+          overflowing if the limit is near ULONG_MAX.  */
        bic     src1, src1, #7
        bic     src2, src2, #7
        ldr     data1, [src1], #8
-       neg     tmp3, tmp1, lsl #3  /* 64 - bits(bytes beyond align). */
+       neg     tmp3, count, lsl #3     /* 64 - bits(bytes beyond align). */
        ldr     data2, [src2], #8
        mov     tmp2, #~0
-       sub     limit_wd, limit, #1 /* limit != 0, so no underflow.  */
+       sub     limit_wd, limit, #1     /* limit != 0, so no underflow.  */
+#ifdef __AARCH64EB__
        /* Big-endian.  Early bytes are at MSB.  */
-CPU_BE( lsl    tmp2, tmp2, tmp3 )      /* Shift (tmp1 & 63).  */
+       lsl     tmp2, tmp2, tmp3        /* Shift (count & 63).  */
+#else
        /* Little-endian.  Early bytes are at LSB.  */
-CPU_LE( lsr    tmp2, tmp2, tmp3 )      /* Shift (tmp1 & 63).  */
-
+       lsr     tmp2, tmp2, tmp3        /* Shift (count & 63).  */
+#endif
        and     tmp3, limit_wd, #7
        lsr     limit_wd, limit_wd, #3
-       /* Adjust the limit. Only low 3 bits used, so overflow irrelevant.*/
-       add     limit, limit, tmp1
-       add     tmp3, tmp3, tmp1
+       /* Adjust the limit. Only low 3 bits used, so overflow irrelevant.  */
+       add     limit, limit, count
+       add     tmp3, tmp3, count
        orr     data1, data1, tmp2
        orr     data2, data2, tmp2
        add     limit_wd, limit_wd, tmp3, lsr #3
-       b       .Lstart_realigned
+       b       L(start_realigned)
+
+       .p2align 4
+       /* Don't bother with dwords for up to 16 bytes.  */
+L(misaligned8):
+       cmp     limit, #16
+       b.hs    L(try_misaligned_words)
 
-/*when src1 offset is not equal to src2 offset...*/
-.Lmisaligned8:
-       cmp     limit, #8
-       b.lo    .Ltiny8proc /*limit < 8... */
-       /*
-       * Get the align offset length to compare per byte first.
-       * After this process, one string's address will be aligned.*/
-       and     tmp1, src1, #7
-       neg     tmp1, tmp1
-       add     tmp1, tmp1, #8
-       and     tmp2, src2, #7
-       neg     tmp2, tmp2
-       add     tmp2, tmp2, #8
-       subs    tmp3, tmp1, tmp2
-       csel    pos, tmp1, tmp2, hi /*Choose the maximum. */
-       /*
-       * Here, limit is not less than 8, so directly run .Ltinycmp
-       * without checking the limit.*/
-       sub     limit, limit, pos
-.Ltinycmp:
+L(byte_loop):
+       /* Perhaps we can do better than this.  */
        ldrb    data1w, [src1], #1
        ldrb    data2w, [src2], #1
-       subs    pos, pos, #1
-       ccmp    data1w, #1, #0, ne  /* NZCV = 0b0000.  */
-       ccmp    data1w, data2w, #0, cs  /* NZCV = 0b0000.  */
-       b.eq    .Ltinycmp
-       cbnz    pos, 1f /*find the null or unequal...*/
-       cmp     data1w, #1
-       ccmp    data1w, data2w, #0, cs
-       b.eq    .Lstart_align /*the last bytes are equal....*/
-1:
+       subs    limit, limit, #1
+       ccmp    data1w, #1, #0, hi      /* NZCV = 0b0000.  */
+       ccmp    data1w, data2w, #0, cs  /* NZCV = 0b0000.  */
+       b.eq    L(byte_loop)
+L(done):
        sub     result, data1, data2
        ret
-
-.Lstart_align:
+       /* Align the SRC1 to a dword by doing a bytewise compare and then do
+          the dword loop.  */
+L(try_misaligned_words):
        lsr     limit_wd, limit, #3
-       cbz     limit_wd, .Lremain8
-       /*process more leading bytes to make str1 aligned...*/
-       ands    xzr, src1, #7
-       b.eq    .Lrecal_offset
-       add     src1, src1, tmp3        /*tmp3 is positive in this branch.*/
-       add     src2, src2, tmp3
-       ldr     data1, [src1], #8
-       ldr     data2, [src2], #8
+       cbz     count, L(do_misaligned)
 
-       sub     limit, limit, tmp3
+       neg     count, count
+       and     count, count, #7
+       sub     limit, limit, count
        lsr     limit_wd, limit, #3
-       subs    limit_wd, limit_wd, #1
 
-       sub     tmp1, data1, zeroones
-       orr     tmp2, data1, #REP8_7f
-       eor     diff, data1, data2  /* Non-zero if differences found.  */
-       csinv   endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
-       bics    has_nul, tmp1, tmp2
-       ccmp    endloop, #0, #0, eq /*has_null is ZERO: no null byte*/
-       b.ne    .Lunequal_proc
-       /*How far is the current str2 from the alignment boundary...*/
-       and     tmp3, tmp3, #7
-.Lrecal_offset:
-       neg     pos, tmp3
-.Lloopcmp_proc:
-       /*
-       * Divide the eight bytes into two parts. First,backwards the src2
-       * to an alignment boundary,load eight bytes from the SRC2 alignment
-       * boundary,then compare with the relative bytes from SRC1.
-       * If all 8 bytes are equal,then start the second part's comparison.
-       * Otherwise finish the comparison.
-       * This special handle can garantee all the accesses are in the
-       * thread/task space in avoid to overrange access.
-       */
-       ldr     data1, [src1,pos]
-       ldr     data2, [src2,pos]
-       sub     tmp1, data1, zeroones
-       orr     tmp2, data1, #REP8_7f
-       bics    has_nul, tmp1, tmp2 /* Non-zero if NUL terminator.  */
-       eor     diff, data1, data2  /* Non-zero if differences found.  */
-       csinv   endloop, diff, xzr, eq
-       cbnz    endloop, .Lunequal_proc
+L(page_end_loop):
+       ldrb    data1w, [src1], #1
+       ldrb    data2w, [src2], #1
+       cmp     data1w, #1
+       ccmp    data1w, data2w, #0, cs  /* NZCV = 0b0000.  */
+       b.ne    L(done)
+       subs    count, count, #1
+       b.hi    L(page_end_loop)
+
+L(do_misaligned):
+       /* Prepare ourselves for the next page crossing.  Unlike the aligned
+          loop, we fetch 1 less dword because we risk crossing bounds on
+          SRC2.  */
+       mov     count, #8
+       subs    limit_wd, limit_wd, #1
+       b.lo    L(done_loop)
+L(loop_misaligned):
+       and     tmp2, src2, #0xff8
+       eor     tmp2, tmp2, #0xff8
+       cbz     tmp2, L(page_end_loop)
 
-       /*The second part process*/
        ldr     data1, [src1], #8
        ldr     data2, [src2], #8
-       subs    limit_wd, limit_wd, #1
        sub     tmp1, data1, zeroones
        orr     tmp2, data1, #REP8_7f
-       eor     diff, data1, data2  /* Non-zero if differences found.  */
-       csinv   endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
-       bics    has_nul, tmp1, tmp2
-       ccmp    endloop, #0, #0, eq /*has_null is ZERO: no null byte*/
-       b.eq    .Lloopcmp_proc
-
-.Lunequal_proc:
-       orr     syndrome, diff, has_nul
-       cbz     syndrome, .Lremain8
-.Lcal_cmpresult:
-       /*
-       * reversed the byte-order as big-endian,then CLZ can find the most
-       * significant zero bits.
-       */
-CPU_LE( rev    syndrome, syndrome )
-CPU_LE( rev    data1, data1 )
-CPU_LE( rev    data2, data2 )
-       /*
-       * For big-endian we cannot use the trick with the syndrome value
-       * as carry-propagation can corrupt the upper bits if the trailing
-       * bytes in the string contain 0x01.
-       * However, if there is no NUL byte in the dword, we can generate
-       * the result directly.  We can't just subtract the bytes as the
-       * MSB might be significant.
-       */
-CPU_BE( cbnz   has_nul, 1f )
-CPU_BE( cmp    data1, data2 )
-CPU_BE( cset   result, ne )
-CPU_BE( cneg   result, result, lo )
-CPU_BE( ret )
-CPU_BE( 1: )
-       /* Re-compute the NUL-byte detection, using a byte-reversed value.*/
-CPU_BE( rev    tmp3, data1 )
-CPU_BE( sub    tmp1, tmp3, zeroones )
-CPU_BE( orr    tmp2, tmp3, #REP8_7f )
-CPU_BE( bic    has_nul, tmp1, tmp2 )
-CPU_BE( rev    has_nul, has_nul )
-CPU_BE( orr    syndrome, diff, has_nul )
-       /*
-       * The MS-non-zero bit of the syndrome marks either the first bit
-       * that is different, or the top bit of the first zero byte.
-       * Shifting left now will bring the critical information into the
-       * top bits.
-       */
-       clz     pos, syndrome
-       lsl     data1, data1, pos
-       lsl     data2, data2, pos
-       /*
-       * But we need to zero-extend (char is unsigned) the value and then
-       * perform a signed 32-bit subtraction.
-       */
-       lsr     data1, data1, #56
-       sub     result, data1, data2, lsr #56
-       ret
-
-.Lremain8:
-       /* Limit % 8 == 0 => all bytes significant.  */
-       ands    limit, limit, #7
-       b.eq    .Lret0
-.Ltiny8proc:
-       ldrb    data1w, [src1], #1
-       ldrb    data2w, [src2], #1
-       subs    limit, limit, #1
+       eor     diff, data1, data2      /* Non-zero if differences found.  */
+       bics    has_nul, tmp1, tmp2     /* Non-zero if NUL terminator.  */
+       ccmp    diff, #0, #0, eq
+       b.ne    L(not_limit)
+       subs    limit_wd, limit_wd, #1
+       b.pl    L(loop_misaligned)
 
-       ccmp    data1w, #1, #0, ne  /* NZCV = 0b0000.  */
-       ccmp    data1w, data2w, #0, cs  /* NZCV = 0b0000.  */
-       b.eq    .Ltiny8proc
-       sub     result, data1, data2
-       ret
+L(done_loop):
+       /* We found a difference or a NULL before the limit was reached.  */
+       and     limit, limit, #7
+       cbz     limit, L(not_limit)
+       /* Read the last word.  */
+       sub     src1, src1, 8
+       sub     src2, src2, 8
+       ldr     data1, [src1, limit]
+       ldr     data2, [src2, limit]
+       sub     tmp1, data1, zeroones
+       orr     tmp2, data1, #REP8_7f
+       eor     diff, data1, data2      /* Non-zero if differences found.  */
+       bics    has_nul, tmp1, tmp2     /* Non-zero if NUL terminator.  */
+       ccmp    diff, #0, #0, eq
+       b.ne    L(not_limit)
 
-.Lret0:
+L(ret0):
        mov     result, #0
        ret
+
 SYM_FUNC_END_PI(strncmp)
 EXPORT_SYMBOL_NOKASAN(strncmp)
index c83bb5a..baee229 100644 (file)
@@ -15,7 +15,7 @@ void memcpy_flushcache(void *dst, const void *src, size_t cnt)
         * barrier to order the cache maintenance against the memcpy.
         */
        memcpy(dst, src, cnt);
-       __clean_dcache_area_pop(dst, cnt);
+       dcache_clean_pop((unsigned long)dst, (unsigned long)dst + cnt);
 }
 EXPORT_SYMBOL_GPL(memcpy_flushcache);
 
@@ -33,6 +33,6 @@ unsigned long __copy_user_flushcache(void *to, const void __user *from,
        rc = raw_copy_from_user(to, from, n);
 
        /* See above */
-       __clean_dcache_area_pop(to, n - rc);
+       dcache_clean_pop((unsigned long)to, (unsigned long)to + n - rc);
        return rc;
 }
index 2d881f3..5051b3c 100644 (file)
@@ -15,7 +15,7 @@
 #include <asm/asm-uaccess.h>
 
 /*
- *     flush_icache_range(start,end)
+ *     caches_clean_inval_pou_macro(start,end) [fixup]
  *
  *     Ensure that the I and D caches are coherent within specified region.
  *     This is typically used when code has been written to a memory region,
  *
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
+ *     - fixup   - optional label to branch to on user fault
  */
-SYM_FUNC_START(__flush_icache_range)
-       /* FALLTHROUGH */
+.macro caches_clean_inval_pou_macro, fixup
+alternative_if ARM64_HAS_CACHE_IDC
+       dsb     ishst
+       b       .Ldc_skip_\@
+alternative_else_nop_endif
+       mov     x2, x0
+       mov     x3, x1
+       dcache_by_line_op cvau, ish, x2, x3, x4, x5, \fixup
+.Ldc_skip_\@:
+alternative_if ARM64_HAS_CACHE_DIC
+       isb
+       b       .Lic_skip_\@
+alternative_else_nop_endif
+       invalidate_icache_by_line x0, x1, x2, x3, \fixup
+.Lic_skip_\@:
+.endm
 
 /*
- *     __flush_cache_user_range(start,end)
+ *     caches_clean_inval_pou(start,end)
  *
  *     Ensure that the I and D caches are coherent within specified region.
  *     This is typically used when code has been written to a memory region,
@@ -37,117 +52,103 @@ SYM_FUNC_START(__flush_icache_range)
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-SYM_FUNC_START(__flush_cache_user_range)
+SYM_FUNC_START(caches_clean_inval_pou)
+       caches_clean_inval_pou_macro
+       ret
+SYM_FUNC_END(caches_clean_inval_pou)
+
+/*
+ *     caches_clean_inval_user_pou(start,end)
+ *
+ *     Ensure that the I and D caches are coherent within specified region.
+ *     This is typically used when code has been written to a memory region,
+ *     and will be executed.
+ *
+ *     - start   - virtual start address of region
+ *     - end     - virtual end address of region
+ */
+SYM_FUNC_START(caches_clean_inval_user_pou)
        uaccess_ttbr0_enable x2, x3, x4
-alternative_if ARM64_HAS_CACHE_IDC
-       dsb     ishst
-       b       7f
-alternative_else_nop_endif
-       dcache_line_size x2, x3
-       sub     x3, x2, #1
-       bic     x4, x0, x3
-1:
-user_alt 9f, "dc cvau, x4",  "dc civac, x4",  ARM64_WORKAROUND_CLEAN_CACHE
-       add     x4, x4, x2
-       cmp     x4, x1
-       b.lo    1b
-       dsb     ish
 
-7:
-alternative_if ARM64_HAS_CACHE_DIC
-       isb
-       b       8f
-alternative_else_nop_endif
-       invalidate_icache_by_line x0, x1, x2, x3, 9f
-8:     mov     x0, #0
+       caches_clean_inval_pou_macro 2f
+       mov     x0, xzr
 1:
        uaccess_ttbr0_disable x1, x2
        ret
-9:
+2:
        mov     x0, #-EFAULT
        b       1b
-SYM_FUNC_END(__flush_icache_range)
-SYM_FUNC_END(__flush_cache_user_range)
+SYM_FUNC_END(caches_clean_inval_user_pou)
 
 /*
- *     invalidate_icache_range(start,end)
+ *     icache_inval_pou(start,end)
  *
  *     Ensure that the I cache is invalid within specified region.
  *
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-SYM_FUNC_START(invalidate_icache_range)
+SYM_FUNC_START(icache_inval_pou)
 alternative_if ARM64_HAS_CACHE_DIC
-       mov     x0, xzr
        isb
        ret
 alternative_else_nop_endif
 
-       uaccess_ttbr0_enable x2, x3, x4
-
-       invalidate_icache_by_line x0, x1, x2, x3, 2f
-       mov     x0, xzr
-1:
-       uaccess_ttbr0_disable x1, x2
+       invalidate_icache_by_line x0, x1, x2, x3
        ret
-2:
-       mov     x0, #-EFAULT
-       b       1b
-SYM_FUNC_END(invalidate_icache_range)
+SYM_FUNC_END(icache_inval_pou)
 
 /*
- *     __flush_dcache_area(kaddr, size)
+ *     dcache_clean_inval_poc(start, end)
  *
- *     Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ *     Ensure that any D-cache lines for the interval [start, end)
  *     are cleaned and invalidated to the PoC.
  *
- *     - kaddr   - kernel address
- *     - size    - size in question
+ *     - start   - virtual start address of region
+ *     - end     - virtual end address of region
  */
-SYM_FUNC_START_PI(__flush_dcache_area)
+SYM_FUNC_START_PI(dcache_clean_inval_poc)
        dcache_by_line_op civac, sy, x0, x1, x2, x3
        ret
-SYM_FUNC_END_PI(__flush_dcache_area)
+SYM_FUNC_END_PI(dcache_clean_inval_poc)
 
 /*
- *     __clean_dcache_area_pou(kaddr, size)
+ *     dcache_clean_pou(start, end)
  *
- *     Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ *     Ensure that any D-cache lines for the interval [start, end)
  *     are cleaned to the PoU.
  *
- *     - kaddr   - kernel address
- *     - size    - size in question
+ *     - start   - virtual start address of region
+ *     - end     - virtual end address of region
  */
-SYM_FUNC_START(__clean_dcache_area_pou)
+SYM_FUNC_START(dcache_clean_pou)
 alternative_if ARM64_HAS_CACHE_IDC
        dsb     ishst
        ret
 alternative_else_nop_endif
        dcache_by_line_op cvau, ish, x0, x1, x2, x3
        ret
-SYM_FUNC_END(__clean_dcache_area_pou)
+SYM_FUNC_END(dcache_clean_pou)
 
 /*
- *     __inval_dcache_area(kaddr, size)
+ *     dcache_inval_poc(start, end)
  *
- *     Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ *     Ensure that any D-cache lines for the interval [start, end)
  *     are invalidated. Any partial lines at the ends of the interval are
  *     also cleaned to PoC to prevent data loss.
  *
- *     - kaddr   - kernel address
- *     - size    - size in question
+ *     - start   - kernel start address of region
+ *     - end     - kernel end address of region
  */
 SYM_FUNC_START_LOCAL(__dma_inv_area)
-SYM_FUNC_START_PI(__inval_dcache_area)
+SYM_FUNC_START_PI(dcache_inval_poc)
        /* FALLTHROUGH */
 
 /*
- *     __dma_inv_area(start, size)
+ *     __dma_inv_area(start, end)
  *     - start   - virtual start address of region
- *     - size    - size in question
+ *     - end     - virtual end address of region
  */
-       add     x1, x1, x0
        dcache_line_size x2, x3
        sub     x3, x2, #1
        tst     x1, x3                          // end cache line aligned?
@@ -165,48 +166,48 @@ SYM_FUNC_START_PI(__inval_dcache_area)
        b.lo    2b
        dsb     sy
        ret
-SYM_FUNC_END_PI(__inval_dcache_area)
+SYM_FUNC_END_PI(dcache_inval_poc)
 SYM_FUNC_END(__dma_inv_area)
 
 /*
- *     __clean_dcache_area_poc(kaddr, size)
+ *     dcache_clean_poc(start, end)
  *
- *     Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ *     Ensure that any D-cache lines for the interval [start, end)
  *     are cleaned to the PoC.
  *
- *     - kaddr   - kernel address
- *     - size    - size in question
+ *     - start   - virtual start address of region
+ *     - end     - virtual end address of region
  */
 SYM_FUNC_START_LOCAL(__dma_clean_area)
-SYM_FUNC_START_PI(__clean_dcache_area_poc)
+SYM_FUNC_START_PI(dcache_clean_poc)
        /* FALLTHROUGH */
 
 /*
- *     __dma_clean_area(start, size)
+ *     __dma_clean_area(start, end)
  *     - start   - virtual start address of region
- *     - size    - size in question
+ *     - end     - virtual end address of region
  */
        dcache_by_line_op cvac, sy, x0, x1, x2, x3
        ret
-SYM_FUNC_END_PI(__clean_dcache_area_poc)
+SYM_FUNC_END_PI(dcache_clean_poc)
 SYM_FUNC_END(__dma_clean_area)
 
 /*
- *     __clean_dcache_area_pop(kaddr, size)
+ *     dcache_clean_pop(start, end)
  *
- *     Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ *     Ensure that any D-cache lines for the interval [start, end)
  *     are cleaned to the PoP.
  *
- *     - kaddr   - kernel address
- *     - size    - size in question
+ *     - start   - virtual start address of region
+ *     - end     - virtual end address of region
  */
-SYM_FUNC_START_PI(__clean_dcache_area_pop)
+SYM_FUNC_START_PI(dcache_clean_pop)
        alternative_if_not ARM64_HAS_DCPOP
-       b       __clean_dcache_area_poc
+       b       dcache_clean_poc
        alternative_else_nop_endif
        dcache_by_line_op cvap, sy, x0, x1, x2, x3
        ret
-SYM_FUNC_END_PI(__clean_dcache_area_pop)
+SYM_FUNC_END_PI(dcache_clean_pop)
 
 /*
  *     __dma_flush_area(start, size)
@@ -217,6 +218,7 @@ SYM_FUNC_END_PI(__clean_dcache_area_pop)
  *     - size    - size in question
  */
 SYM_FUNC_START_PI(__dma_flush_area)
+       add     x1, x0, x1
        dcache_by_line_op civac, sy, x0, x1, x2, x3
        ret
 SYM_FUNC_END_PI(__dma_flush_area)
@@ -228,6 +230,7 @@ SYM_FUNC_END_PI(__dma_flush_area)
  *     - dir   - DMA direction
  */
 SYM_FUNC_START_PI(__dma_map_area)
+       add     x1, x0, x1
        cmp     w2, #DMA_FROM_DEVICE
        b.eq    __dma_inv_area
        b       __dma_clean_area
@@ -240,6 +243,7 @@ SYM_FUNC_END_PI(__dma_map_area)
  *     - dir   - DMA direction
  */
 SYM_FUNC_START_PI(__dma_unmap_area)
+       add     x1, x0, x1
        cmp     w2, #DMA_TO_DEVICE
        b.ne    __dma_inv_area
        ret
index 001737a..cd72576 100644 (file)
@@ -402,14 +402,12 @@ static int asids_init(void)
 {
        asid_bits = get_cpu_asid_bits();
        atomic64_set(&asid_generation, ASID_FIRST_VERSION);
-       asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
-                          GFP_KERNEL);
+       asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
        if (!asid_map)
                panic("Failed to allocate bitmap for %lu ASIDs\n",
                      NUM_USER_ASIDS);
 
-       pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS),
-                                 sizeof(*pinned_asid_map), GFP_KERNEL);
+       pinned_asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
        nr_pinned_asids = 0;
 
        /*
index 871c82a..349c488 100644 (file)
@@ -99,6 +99,8 @@ static void mem_abort_decode(unsigned int esr)
        pr_alert("  EA = %lu, S1PTW = %lu\n",
                 (esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT,
                 (esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT);
+       pr_alert("  FSC = 0x%02x: %s\n", (esr & ESR_ELx_FSC),
+                esr_to_fault_info(esr)->name);
 
        if (esr_is_data_abort(esr))
                data_abort_decode(esr);
@@ -232,13 +234,17 @@ static bool is_el1_instruction_abort(unsigned int esr)
        return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
 }
 
+static bool is_el1_data_abort(unsigned int esr)
+{
+       return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_CUR;
+}
+
 static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
                                           struct pt_regs *regs)
 {
-       unsigned int ec       = ESR_ELx_EC(esr);
        unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
 
-       if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR)
+       if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr))
                return false;
 
        if (fsc_type == ESR_ELx_FSC_PERM)
@@ -258,7 +264,7 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
        unsigned long flags;
        u64 par, dfsc;
 
-       if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR ||
+       if (!is_el1_data_abort(esr) ||
            (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT)
                return false;
 
@@ -346,10 +352,9 @@ static void do_tag_recovery(unsigned long addr, unsigned int esr,
 
 static bool is_el1_mte_sync_tag_check_fault(unsigned int esr)
 {
-       unsigned int ec = ESR_ELx_EC(esr);
        unsigned int fsc = esr & ESR_ELx_FSC;
 
-       if (ec != ESR_ELx_EC_DABT_CUR)
+       if (!is_el1_data_abort(esr))
                return false;
 
        if (fsc == ESR_ELx_FSC_MTE)
@@ -504,7 +509,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
         */
        if (!(vma->vm_flags & vm_flags))
                return VM_FAULT_BADACCESS;
-       return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags, regs);
+       return handle_mm_fault(vma, addr, mm_flags, regs);
 }
 
 static bool is_el0_instruction_abort(unsigned int esr)
@@ -836,13 +841,6 @@ void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs)
 }
 NOKPROBE_SYMBOL(do_mem_abort);
 
-void do_el0_irq_bp_hardening(void)
-{
-       /* PC has already been checked in entry.S */
-       arm64_apply_bp_hardening();
-}
-NOKPROBE_SYMBOL(do_el0_irq_bp_hardening);
-
 void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
 {
        arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN,
@@ -921,3 +919,29 @@ void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
        debug_exception_exit(regs);
 }
 NOKPROBE_SYMBOL(do_debug_exception);
+
+/*
+ * Used during anonymous page fault handling.
+ */
+struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+                                               unsigned long vaddr)
+{
+       gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO;
+
+       /*
+        * If the page is mapped with PROT_MTE, initialise the tags at the
+        * point of allocation and page zeroing as this is usually faster than
+        * separate DC ZVA and STGM.
+        */
+       if (vma->vm_flags & VM_MTE)
+               flags |= __GFP_ZEROTAGS;
+
+       return alloc_page_vma(flags, vma, vaddr);
+}
+
+void tag_clear_highpage(struct page *page)
+{
+       mte_zero_clear_page_tags(page_address(page));
+       page_kasan_tag_reset(page);
+       set_bit(PG_mte_tagged, &page->flags);
+}
index 6d44c02..2aaf950 100644 (file)
 #include <asm/cache.h>
 #include <asm/tlbflush.h>
 
-void sync_icache_aliases(void *kaddr, unsigned long len)
+void sync_icache_aliases(unsigned long start, unsigned long end)
 {
-       unsigned long addr = (unsigned long)kaddr;
-
        if (icache_is_aliasing()) {
-               __clean_dcache_area_pou(kaddr, len);
-               __flush_icache_all();
+               dcache_clean_pou(start, end);
+               icache_inval_all_pou();
        } else {
                /*
                 * Don't issue kick_all_cpus_sync() after I-cache invalidation
                 * for user mappings.
                 */
-               __flush_icache_range(addr, addr + len);
+               caches_clean_inval_pou(start, end);
        }
 }
 
-static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
-                               unsigned long uaddr, void *kaddr,
-                               unsigned long len)
+static void flush_ptrace_access(struct vm_area_struct *vma, unsigned long start,
+                               unsigned long end)
 {
        if (vma->vm_flags & VM_EXEC)
-               sync_icache_aliases(kaddr, len);
+               sync_icache_aliases(start, end);
 }
 
 /*
@@ -48,7 +45,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
                       unsigned long len)
 {
        memcpy(dst, src, len);
-       flush_ptrace_access(vma, page, uaddr, dst, len);
+       flush_ptrace_access(vma, (unsigned long)dst, (unsigned long)dst + len);
 }
 
 void __sync_icache_dcache(pte_t pte)
@@ -56,7 +53,9 @@ void __sync_icache_dcache(pte_t pte)
        struct page *page = pte_page(pte);
 
        if (!test_bit(PG_dcache_clean, &page->flags)) {
-               sync_icache_aliases(page_address(page), page_size(page));
+               sync_icache_aliases((unsigned long)page_address(page),
+                                   (unsigned long)page_address(page) +
+                                           page_size(page));
                set_bit(PG_dcache_clean, &page->flags);
        }
 }
@@ -77,20 +76,20 @@ EXPORT_SYMBOL(flush_dcache_page);
 /*
  * Additional functions defined in assembly.
  */
-EXPORT_SYMBOL(__flush_icache_range);
+EXPORT_SYMBOL(caches_clean_inval_pou);
 
 #ifdef CONFIG_ARCH_HAS_PMEM_API
 void arch_wb_cache_pmem(void *addr, size_t size)
 {
        /* Ensure order against any prior non-cacheable writes */
        dmb(osh);
-       __clean_dcache_area_pop(addr, size);
+       dcache_clean_pop((unsigned long)addr, (unsigned long)addr + size);
 }
 EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
 
 void arch_invalidate_pmem(void *addr, size_t size)
 {
-       __inval_dcache_area(addr, size);
+       dcache_inval_poc((unsigned long)addr, (unsigned long)addr + size);
 }
 EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
 #endif
index e55409c..6e1ca04 100644 (file)
@@ -499,6 +499,13 @@ void __init mem_init(void)
        BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
 #endif
 
+       /*
+        * Selected page table levels should match when derived from
+        * scratch using the virtual address range and page size.
+        */
+       BUILD_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) !=
+                    CONFIG_PGTABLE_LEVELS);
+
        if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
                extern int sysctl_overcommit_memory;
                /*
index 89b66ef..0b28cc2 100644 (file)
@@ -228,7 +228,7 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
                next = pmd_addr_end(addr, end);
 
                /* try section mapping first */
-               if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
+               if (((addr | next | phys) & ~PMD_MASK) == 0 &&
                    (flags & NO_BLOCK_MAPPINGS) == 0) {
                        pmd_set_huge(pmdp, phys, prot);
 
@@ -1114,14 +1114,14 @@ static void free_empty_tables(unsigned long addr, unsigned long end,
 }
 #endif
 
-#if !ARM64_SWAPPER_USES_SECTION_MAPS
+#if !ARM64_KERNEL_USES_PMD_MAPS
 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
                struct vmem_altmap *altmap)
 {
        WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
        return vmemmap_populate_basepages(start, end, node, altmap);
 }
-#else  /* !ARM64_SWAPPER_USES_SECTION_MAPS */
+#else  /* !ARM64_KERNEL_USES_PMD_MAPS */
 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
                struct vmem_altmap *altmap)
 {
@@ -1166,17 +1166,18 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
 
        return 0;
 }
-#endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */
+#endif /* !ARM64_KERNEL_USES_PMD_MAPS */
+
+#ifdef CONFIG_MEMORY_HOTPLUG
 void vmemmap_free(unsigned long start, unsigned long end,
                struct vmem_altmap *altmap)
 {
-#ifdef CONFIG_MEMORY_HOTPLUG
        WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
 
        unmap_hotplug_range(start, end, true, altmap);
        free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END);
-#endif
 }
+#endif /* CONFIG_MEMORY_HOTPLUG */
 
 static inline pud_t *fixmap_pud(unsigned long addr)
 {
index 97d7bcd..35936c5 100644 (file)
 #endif
 
 #ifdef CONFIG_KASAN_HW_TAGS
-#define TCR_KASAN_HW_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
+#define TCR_MTE_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
 #else
-#define TCR_KASAN_HW_FLAGS 0
+/*
+ * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
+ * TBI being enabled at EL1.
+ */
+#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
 #endif
 
 /*
 #define MAIR_EL1_SET                                                   \
        (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) |      \
         MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) |        \
-        MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) |            \
         MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) |              \
         MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) |                    \
-        MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT) |              \
         MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))
 
 #ifdef CONFIG_CPU_PM
@@ -83,11 +85,7 @@ SYM_FUNC_START(cpu_do_suspend)
        mrs     x9, mdscr_el1
        mrs     x10, oslsr_el1
        mrs     x11, sctlr_el1
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
-       mrs     x12, tpidr_el1
-alternative_else
-       mrs     x12, tpidr_el2
-alternative_endif
+       get_this_cpu_offset x12
        mrs     x13, sp_el0
        stp     x2, x3, [x0]
        stp     x4, x5, [x0, #16]
@@ -145,11 +143,7 @@ SYM_FUNC_START(cpu_do_resume)
        msr     mdscr_el1, x10
 
        msr     sctlr_el1, x12
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
-       msr     tpidr_el1, x13
-alternative_else
-       msr     tpidr_el2, x13
-alternative_endif
+       set_this_cpu_offset x13
        msr     sp_el0, x14
        /*
         * Restore oslsr_el1 by writing oslar_el1
@@ -464,7 +458,7 @@ SYM_FUNC_START(__cpu_setup)
        msr_s   SYS_TFSRE0_EL1, xzr
 
        /* set the TCR_EL1 bits */
-       mov_q   x10, TCR_KASAN_HW_FLAGS
+       mov_q   x10, TCR_MTE_FLAGS
        orr     tcr, tcr, x10
 1:
 #endif
index a1937df..1c40353 100644 (file)
@@ -159,10 +159,6 @@ static const struct prot_bits pte_bits[] = {
                .set    = "DEVICE/nGnRE",
        }, {
                .mask   = PTE_ATTRINDX_MASK,
-               .val    = PTE_ATTRINDX(MT_DEVICE_GRE),
-               .set    = "DEVICE/GRE",
-       }, {
-               .mask   = PTE_ATTRINDX_MASK,
                .val    = PTE_ATTRINDX(MT_NORMAL_NC),
                .set    = "MEM/NORMAL-NC",
        }, {
index f7b1948..dd5000d 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/byteorder.h>
 #include <asm/cacheflush.h>
 #include <asm/debug-monitors.h>
+#include <asm/insn.h>
 #include <asm/set_memory.h>
 
 #include "bpf_jit.h"
index 21fbdda..49305c2 100644 (file)
@@ -3,7 +3,8 @@
 # Internal CPU capabilities constants, keep this list sorted
 
 BTI
-HAS_32BIT_EL0
+# Unreliable: use system_supports_32bit_el0() instead.
+HAS_32BIT_EL0_DO_NOT_USE
 HAS_32BIT_EL1
 HAS_ADDRESS_AUTH
 HAS_ADDRESS_AUTH_ARCH
index f4dc81f..1b99046 100644 (file)
@@ -82,16 +82,16 @@ do {                                                \
 } while (0)
 
 
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr)         \
+#define alloc_zeroed_user_highpage_movable(vma, vaddr)                 \
 ({                                                                     \
        struct page *page = alloc_page_vma(                             \
-               GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr);  \
+               GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr);         \
        if (page)                                                       \
                flush_dcache_page(page);                                \
        page;                                                           \
 })
 
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
 
 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
 
index 8d0f862..c9d0d84 100644 (file)
@@ -13,9 +13,9 @@ extern unsigned long memory_end;
 #define clear_user_page(page, vaddr, pg)       clear_page(page)
 #define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
 
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
-       alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
+       alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
 
 #define __pa(vaddr)            ((unsigned long)(vaddr))
 #define __va(paddr)            ((void *)((unsigned long)(paddr)))
index cc98f9b..479dc76 100644 (file)
@@ -68,9 +68,9 @@ static inline void copy_page(void *to, void *from)
 #define clear_user_page(page, vaddr, pg)       clear_page(page)
 #define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
 
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
-       alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
+       alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
 
 /*
  * These are used to make use of C type-checking..
index 7555b48..4d5810c 100644 (file)
@@ -34,9 +34,9 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
        copy_page(to, from);
 }
 
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
-       alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
+       alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
 
 #ifndef __pa
 #define __pa(x)                __phys_addr((unsigned long)(x))
index 3c1c5da..e3da38e 100644 (file)
@@ -335,10 +335,15 @@ int psci_cpu_suspend_enter(u32 state)
 {
        int ret;
 
-       if (!psci_power_state_loses_context(state))
+       if (!psci_power_state_loses_context(state)) {
+               struct arm_cpuidle_irq_context context;
+
+               arm_cpuidle_save_irq_context(&context);
                ret = psci_ops.cpu_suspend(state, 0);
-       else
+               arm_cpuidle_restore_irq_context(&context);
+       } else {
                ret = cpu_suspend(state, psci_suspend_finisher);
+       }
 
        return ret;
 }
index 028f81d..9f937b1 100644 (file)
@@ -15,6 +15,7 @@ static u32 smccc_version = ARM_SMCCC_VERSION_1_0;
 static enum arm_smccc_conduit smccc_conduit = SMCCC_CONDUIT_NONE;
 
 bool __ro_after_init smccc_trng_available = false;
+u64 __ro_after_init smccc_has_sve_hint = false;
 
 void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit)
 {
@@ -22,6 +23,9 @@ void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit)
        smccc_conduit = conduit;
 
        smccc_trng_available = smccc_probe_trng();
+       if (IS_ENABLED(CONFIG_ARM64_SVE) &&
+           smccc_version >= ARM_SMCCC_VERSION_1_3)
+               smccc_has_sve_hint = true;
 }
 
 enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void)
index 0e8254d..a164896 100644 (file)
@@ -463,7 +463,7 @@ void lkdtm_DOUBLE_FAULT(void)
 #ifdef CONFIG_ARM64
 static noinline void change_pac_parameters(void)
 {
-       if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH)) {
+       if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
                /* Reset the keys of current task */
                ptrauth_thread_init_kernel(current);
                ptrauth_thread_switch_kernel(current);
@@ -477,8 +477,8 @@ noinline void lkdtm_CORRUPT_PAC(void)
 #define CORRUPT_PAC_ITERATE    10
        int i;
 
-       if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH))
-               pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH\n");
+       if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
+               pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH_KERNEL\n");
 
        if (!system_supports_address_auth()) {
                pr_err("FAIL: CPU lacks pointer authentication feature\n");
index 666d8a9..54aca3a 100644 (file)
@@ -37,7 +37,7 @@
 
 #define CCI_PMU_CNTR_SIZE(model)       ((model)->cntr_size)
 #define CCI_PMU_CNTR_BASE(model, idx)  ((idx) * CCI_PMU_CNTR_SIZE(model))
-#define CCI_PMU_CNTR_MASK              ((1ULL << 32) -1)
+#define CCI_PMU_CNTR_MASK              ((1ULL << 32) - 1)
 #define CCI_PMU_CNTR_LAST(cci_pmu)     (cci_pmu->num_cntrs - 1)
 
 #define CCI_PMU_MAX_HW_CNTRS(model) \
@@ -806,7 +806,7 @@ static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *ev
                return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event);
 
        /* Generic code to find an unused idx from the mask */
-       for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++)
+       for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++)
                if (!test_and_set_bit(idx, hw->used_mask))
                        return idx;
 
index 96d47cb..a96c316 100644 (file)
@@ -1211,7 +1211,7 @@ static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
        perf_pmu_migrate_context(&dt->pmu, cpu, target);
        dt->cpu = target;
        if (ccn->irq)
-               WARN_ON(irq_set_affinity_hint(ccn->irq, cpumask_of(dt->cpu)));
+               WARN_ON(irq_set_affinity(ccn->irq, cpumask_of(dt->cpu)));
        return 0;
 }
 
@@ -1291,7 +1291,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
 
        /* Also make sure that the overflow interrupt is handled by this CPU */
        if (ccn->irq) {
-               err = irq_set_affinity_hint(ccn->irq, cpumask_of(ccn->dt.cpu));
+               err = irq_set_affinity(ccn->irq, cpumask_of(ccn->dt.cpu));
                if (err) {
                        dev_err(ccn->dev, "Failed to set interrupt affinity!\n");
                        goto error_set_affinity;
@@ -1325,8 +1325,6 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
 
        cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
                                            &ccn->dt.node);
-       if (ccn->irq)
-               irq_set_affinity_hint(ccn->irq, NULL);
        for (i = 0; i < ccn->num_xps; i++)
                writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
        writel(0, ccn->dt.base + CCN_DT_PMCR);
index 56a5c35..bc3cba5 100644 (file)
@@ -31,7 +31,7 @@
 #define CMN_CI_CHILD_COUNT             GENMASK_ULL(15, 0)
 #define CMN_CI_CHILD_PTR_OFFSET                GENMASK_ULL(31, 16)
 
-#define CMN_CHILD_NODE_ADDR            GENMASK(27,0)
+#define CMN_CHILD_NODE_ADDR            GENMASK(27, 0)
 #define CMN_CHILD_NODE_EXTERNAL                BIT(31)
 
 #define CMN_ADDR_NODE_PTR              GENMASK(27, 14)
@@ -1162,7 +1162,7 @@ static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
 
        perf_pmu_migrate_context(&cmn->pmu, cpu, target);
        for (i = 0; i < cmn->num_dtcs; i++)
-               irq_set_affinity_hint(cmn->dtc[i].irq, cpumask_of(target));
+               irq_set_affinity(cmn->dtc[i].irq, cpumask_of(target));
        cmn->cpu = target;
        return 0;
 }
@@ -1212,7 +1212,7 @@ static int arm_cmn_init_irqs(struct arm_cmn *cmn)
                irq = cmn->dtc[i].irq;
                for (j = i; j--; ) {
                        if (cmn->dtc[j].irq == irq) {
-                               cmn->dtc[j].irq_friend = j - i;
+                               cmn->dtc[j].irq_friend = i - j;
                                goto next;
                        }
                }
@@ -1222,7 +1222,7 @@ static int arm_cmn_init_irqs(struct arm_cmn *cmn)
                if (err)
                        return err;
 
-               err = irq_set_affinity_hint(irq, cpumask_of(cmn->cpu));
+               err = irq_set_affinity(irq, cpumask_of(cmn->cpu));
                if (err)
                        return err;
        next:
@@ -1568,16 +1568,11 @@ static int arm_cmn_probe(struct platform_device *pdev)
 static int arm_cmn_remove(struct platform_device *pdev)
 {
        struct arm_cmn *cmn = platform_get_drvdata(pdev);
-       int i;
 
        writel_relaxed(0, cmn->dtc[0].base + CMN_DT_DTC_CTL);
 
        perf_pmu_unregister(&cmn->pmu);
        cpuhp_state_remove_instance(arm_cmn_hp_state, &cmn->cpuhp_node);
-
-       for (i = 0; i < cmn->num_dtcs; i++)
-               irq_set_affinity_hint(cmn->dtc[i].irq, NULL);
-
        return 0;
 }
 
index b6c2511..280a6ae 100644 (file)
@@ -421,7 +421,7 @@ static struct dmc620_pmu_irq *__dmc620_pmu_get_irq(int irq_num)
        if (ret)
                goto out_free_aff;
 
-       ret = irq_set_affinity_hint(irq_num, cpumask_of(irq->cpu));
+       ret = irq_set_affinity(irq_num, cpumask_of(irq->cpu));
        if (ret)
                goto out_free_irq;
 
@@ -475,7 +475,6 @@ static void dmc620_pmu_put_irq(struct dmc620_pmu *dmc620_pmu)
        list_del(&irq->irqs_node);
        mutex_unlock(&dmc620_pmu_irqs_lock);
 
-       WARN_ON(irq_set_affinity_hint(irq->irq_num, NULL));
        free_irq(irq->irq_num, irq);
        cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &irq->node);
        kfree(irq);
@@ -622,7 +621,7 @@ static int dmc620_pmu_cpu_teardown(unsigned int cpu,
                perf_pmu_migrate_context(&dmc620_pmu->pmu, irq->cpu, target);
        mutex_unlock(&dmc620_pmu_irqs_lock);
 
-       WARN_ON(irq_set_affinity_hint(irq->irq_num, cpumask_of(target)));
+       WARN_ON(irq_set_affinity(irq->irq_num, cpumask_of(target)));
        irq->cpu = target;
 
        return 0;
index 196faea..a36698a 100644 (file)
@@ -687,7 +687,7 @@ static void dsu_pmu_probe_pmu(struct dsu_pmu *dsu_pmu)
 static void dsu_pmu_set_active_cpu(int cpu, struct dsu_pmu *dsu_pmu)
 {
        cpumask_set_cpu(cpu, &dsu_pmu->active_cpu);
-       if (irq_set_affinity_hint(dsu_pmu->irq, &dsu_pmu->active_cpu))
+       if (irq_set_affinity(dsu_pmu->irq, &dsu_pmu->active_cpu))
                pr_warn("Failed to set irq affinity to %d\n", cpu);
 }
 
@@ -769,7 +769,6 @@ static int dsu_pmu_device_probe(struct platform_device *pdev)
        if (rc) {
                cpuhp_state_remove_instance(dsu_pmu_cpuhp_state,
                                                 &dsu_pmu->cpuhp_node);
-               irq_set_affinity_hint(dsu_pmu->irq, NULL);
        }
 
        return rc;
@@ -781,7 +780,6 @@ static int dsu_pmu_device_remove(struct platform_device *pdev)
 
        perf_pmu_unregister(&dsu_pmu->pmu);
        cpuhp_state_remove_instance(dsu_pmu_cpuhp_state, &dsu_pmu->cpuhp_node);
-       irq_set_affinity_hint(dsu_pmu->irq, NULL);
 
        return 0;
 }
@@ -840,10 +838,8 @@ static int dsu_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
 
        dst = dsu_pmu_get_online_cpu_any_but(dsu_pmu, cpu);
        /* If there are no active CPUs in the DSU, leave IRQ disabled */
-       if (dst >= nr_cpu_ids) {
-               irq_set_affinity_hint(dsu_pmu->irq, NULL);
+       if (dst >= nr_cpu_ids)
                return 0;
-       }
 
        perf_pmu_migrate_context(&dsu_pmu->pmu, cpu, dst);
        dsu_pmu_set_active_cpu(dst, dsu_pmu);
index d4f7f1f..3cbc3ba 100644 (file)
@@ -563,14 +563,14 @@ static int armpmu_filter_match(struct perf_event *event)
        return ret;
 }
 
-static ssize_t armpmu_cpumask_show(struct device *dev,
-                                  struct device_attribute *attr, char *buf)
+static ssize_t cpus_show(struct device *dev,
+                        struct device_attribute *attr, char *buf)
 {
        struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev));
        return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus);
 }
 
-static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL);
+static DEVICE_ATTR_RO(cpus);
 
 static struct attribute *armpmu_common_attrs[] = {
        &dev_attr_cpus.attr,
@@ -644,11 +644,9 @@ int armpmu_request_irq(int irq, int cpu)
                }
 
                irq_flags = IRQF_PERCPU |
-                           IRQF_NOBALANCING |
+                           IRQF_NOBALANCING | IRQF_NO_AUTOEN |
                            IRQF_NO_THREAD;
 
-               irq_set_status_flags(irq, IRQ_NOAUTOEN);
-
                err = request_nmi(irq, handler, irq_flags, "arm-pmu",
                                  per_cpu_ptr(&cpu_armpmu, cpu));
 
@@ -670,7 +668,7 @@ int armpmu_request_irq(int irq, int cpu)
                                                 &cpu_armpmu);
                        irq_ops = &percpu_pmuirq_ops;
                } else {
-                       has_nmi= true;
+                       has_nmi = true;
                        irq_ops = &percpu_pmunmi_ops;
                }
        } else {
@@ -869,10 +867,8 @@ static struct arm_pmu *__armpmu_alloc(gfp_t flags)
        int cpu;
 
        pmu = kzalloc(sizeof(*pmu), flags);
-       if (!pmu) {
-               pr_info("failed to allocate PMU device!\n");
+       if (!pmu)
                goto out;
-       }
 
        pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags);
        if (!pmu->hw_events) {
index ff6fab4..2263488 100644 (file)
@@ -277,7 +277,7 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
                                       struct perf_event *event, int idx)
 {
        u32 span, sid;
-       unsigned int num_ctrs = smmu_pmu->num_counters;
+       unsigned int cur_idx, num_ctrs = smmu_pmu->num_counters;
        bool filter_en = !!get_filter_enable(event);
 
        span = filter_en ? get_filter_span(event) :
@@ -285,17 +285,19 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
        sid = filter_en ? get_filter_stream_id(event) :
                           SMMU_PMCG_DEFAULT_FILTER_SID;
 
-       /* Support individual filter settings */
-       if (!smmu_pmu->global_filter) {
+       cur_idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
+       /*
+        * Per-counter filtering, or scheduling the first globally-filtered
+        * event into an empty PMU so idx == 0 and it works out equivalent.
+        */
+       if (!smmu_pmu->global_filter || cur_idx == num_ctrs) {
                smmu_pmu_set_event_filter(event, idx, span, sid);
                return 0;
        }
 
-       /* Requested settings same as current global settings*/
-       idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
-       if (idx == num_ctrs ||
-           smmu_pmu_check_global_filter(smmu_pmu->events[idx], event)) {
-               smmu_pmu_set_event_filter(event, 0, span, sid);
+       /* Otherwise, must match whatever's currently scheduled */
+       if (smmu_pmu_check_global_filter(smmu_pmu->events[cur_idx], event)) {
+               smmu_pmu_set_evtyper(smmu_pmu, idx, get_event(event));
                return 0;
        }
 
@@ -509,11 +511,8 @@ static ssize_t smmu_pmu_event_show(struct device *dev,
        return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
 }
 
-#define SMMU_EVENT_ATTR(name, config)                                  \
-       (&((struct perf_pmu_events_attr) {                              \
-               .attr = __ATTR(name, 0444, smmu_pmu_event_show, NULL),  \
-               .id = config,                                           \
-       }).attr.attr)
+#define SMMU_EVENT_ATTR(name, config)                  \
+       PMU_EVENT_ATTR_ID(name, smmu_pmu_event_show, config)
 
 static struct attribute *smmu_pmu_events[] = {
        SMMU_EVENT_ATTR(cycles, 0),
@@ -628,7 +627,7 @@ static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
 
        perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
        smmu_pmu->on_cpu = target;
-       WARN_ON(irq_set_affinity_hint(smmu_pmu->irq, cpumask_of(target)));
+       WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(target)));
 
        return 0;
 }
@@ -839,15 +838,14 @@ static int smmu_pmu_probe(struct platform_device *pdev)
 
        /* Pick one CPU to be the preferred one to use */
        smmu_pmu->on_cpu = raw_smp_processor_id();
-       WARN_ON(irq_set_affinity_hint(smmu_pmu->irq,
-                                     cpumask_of(smmu_pmu->on_cpu)));
+       WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu)));
 
        err = cpuhp_state_add_instance_nocalls(cpuhp_state_num,
                                               &smmu_pmu->node);
        if (err) {
                dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
                        err, &res_0->start);
-               goto out_clear_affinity;
+               return err;
        }
 
        err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
@@ -866,8 +864,6 @@ static int smmu_pmu_probe(struct platform_device *pdev)
 
 out_unregister:
        cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
-out_clear_affinity:
-       irq_set_affinity_hint(smmu_pmu->irq, NULL);
        return err;
 }
 
@@ -877,7 +873,6 @@ static int smmu_pmu_remove(struct platform_device *pdev)
 
        perf_pmu_unregister(&smmu_pmu->pmu);
        cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
-       irq_set_affinity_hint(smmu_pmu->irq, NULL);
 
        return 0;
 }
index 8a1e86a..d44bcc2 100644 (file)
@@ -231,15 +231,14 @@ static const struct attribute_group arm_spe_pmu_format_group = {
        .attrs  = arm_spe_pmu_formats_attr,
 };
 
-static ssize_t arm_spe_pmu_get_attr_cpumask(struct device *dev,
-                                           struct device_attribute *attr,
-                                           char *buf)
+static ssize_t cpumask_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
 {
        struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
 
        return cpumap_print_to_pagebuf(true, buf, &spe_pmu->supported_cpus);
 }
-static DEVICE_ATTR(cpumask, S_IRUGO, arm_spe_pmu_get_attr_cpumask, NULL);
+static DEVICE_ATTR_RO(cpumask);
 
 static struct attribute *arm_spe_pmu_attrs[] = {
        &dev_attr_cpumask.attr,
@@ -1044,7 +1043,6 @@ static void __arm_spe_pmu_dev_probe(void *info)
                 spe_pmu->max_record_sz, spe_pmu->align, spe_pmu->features);
 
        spe_pmu->features |= SPE_PMU_FEAT_DEV_PROBED;
-       return;
 }
 
 static void __arm_spe_pmu_reset_local(void)
@@ -1190,10 +1188,8 @@ static int arm_spe_pmu_device_probe(struct platform_device *pdev)
        }
 
        spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL);
-       if (!spe_pmu) {
-               dev_err(dev, "failed to allocate spe_pmu\n");
+       if (!spe_pmu)
                return -ENOMEM;
-       }
 
        spe_pmu->handle = alloc_percpu(typeof(*spe_pmu->handle));
        if (!spe_pmu->handle)
index 2bbb931..94ebc1e 100644 (file)
@@ -222,11 +222,8 @@ ddr_pmu_event_show(struct device *dev, struct device_attribute *attr,
        return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
 }
 
-#define IMX8_DDR_PMU_EVENT_ATTR(_name, _id)                            \
-       (&((struct perf_pmu_events_attr[]) {                            \
-               { .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\
-                 .id = _id, }                                          \
-       })[0].attr.attr)
+#define IMX8_DDR_PMU_EVENT_ATTR(_name, _id)            \
+       PMU_EVENT_ATTR_ID(_name, ddr_pmu_event_show, _id)
 
 static struct attribute *ddr_perf_events_attrs[] = {
        IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID),
@@ -674,7 +671,7 @@ static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
        perf_pmu_migrate_context(&pmu->pmu, cpu, target);
        pmu->cpu = target;
 
-       WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)));
+       WARN_ON(irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu)));
 
        return 0;
 }
@@ -705,8 +702,10 @@ static int ddr_perf_probe(struct platform_device *pdev)
 
        name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d",
                              num);
-       if (!name)
-               return -ENOMEM;
+       if (!name) {
+               ret = -ENOMEM;
+               goto cpuhp_state_err;
+       }
 
        pmu->devtype_data = of_device_get_match_data(&pdev->dev);
 
@@ -749,7 +748,7 @@ static int ddr_perf_probe(struct platform_device *pdev)
        }
 
        pmu->irq = irq;
-       ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu));
+       ret = irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu));
        if (ret) {
                dev_err(pmu->dev, "Failed to set interrupt affinity!\n");
                goto ddr_perf_err;
@@ -777,7 +776,6 @@ static int ddr_perf_remove(struct platform_device *pdev)
 
        cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
        cpuhp_remove_multi_state(pmu->cpuhp_state);
-       irq_set_affinity_hint(pmu->irq, NULL);
 
        perf_pmu_unregister(&pmu->pmu);
 
index 7c8a4bc..62299ab 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * HiSilicon SoC DDRC uncore Hardware event counters support
  *
- * Copyright (C) 2017 Hisilicon Limited
+ * Copyright (C) 2017 HiSilicon Limited
  * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
  *         Anurup M <anurup.m@huawei.com>
  *
@@ -537,7 +537,6 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
                dev_err(ddrc_pmu->dev, "DDRC PMU register failed!\n");
                cpuhp_state_remove_instance_nocalls(
                        CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, &ddrc_pmu->node);
-               irq_set_affinity_hint(ddrc_pmu->irq, NULL);
        }
 
        return ret;
@@ -550,8 +549,6 @@ static int hisi_ddrc_pmu_remove(struct platform_device *pdev)
        perf_pmu_unregister(&ddrc_pmu->pmu);
        cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
                                            &ddrc_pmu->node);
-       irq_set_affinity_hint(ddrc_pmu->irq, NULL);
-
        return 0;
 }
 
index 0316fab..3935131 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * HiSilicon SoC HHA uncore Hardware event counters support
  *
- * Copyright (C) 2017 Hisilicon Limited
+ * Copyright (C) 2017 HiSilicon Limited
  * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
  *         Anurup M <anurup.m@huawei.com>
  *
@@ -90,7 +90,7 @@ static void hisi_hha_pmu_config_ds(struct perf_event *event)
 
                val = readl(hha_pmu->base + HHA_DATSRC_CTRL);
                val |= HHA_DATSRC_SKT_EN;
-               writel(ds_skt, hha_pmu->base + HHA_DATSRC_CTRL);
+               writel(val, hha_pmu->base + HHA_DATSRC_CTRL);
        }
 }
 
@@ -104,7 +104,7 @@ static void hisi_hha_pmu_clear_ds(struct perf_event *event)
 
                val = readl(hha_pmu->base + HHA_DATSRC_CTRL);
                val &= ~HHA_DATSRC_SKT_EN;
-               writel(ds_skt, hha_pmu->base + HHA_DATSRC_CTRL);
+               writel(val, hha_pmu->base + HHA_DATSRC_CTRL);
        }
 }
 
@@ -540,7 +540,6 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
                dev_err(hha_pmu->dev, "HHA PMU register failed!\n");
                cpuhp_state_remove_instance_nocalls(
                        CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, &hha_pmu->node);
-               irq_set_affinity_hint(hha_pmu->irq, NULL);
        }
 
        return ret;
@@ -553,8 +552,6 @@ static int hisi_hha_pmu_remove(struct platform_device *pdev)
        perf_pmu_unregister(&hha_pmu->pmu);
        cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
                                            &hha_pmu->node);
-       irq_set_affinity_hint(hha_pmu->irq, NULL);
-
        return 0;
 }
 
index bf9f777..560ab96 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * HiSilicon SoC L3C uncore Hardware event counters support
  *
- * Copyright (C) 2017 Hisilicon Limited
+ * Copyright (C) 2017 HiSilicon Limited
  * Author: Anurup M <anurup.m@huawei.com>
  *         Shaokun Zhang <zhangshaokun@hisilicon.com>
  *
@@ -578,7 +578,6 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
                dev_err(l3c_pmu->dev, "L3C PMU register failed!\n");
                cpuhp_state_remove_instance_nocalls(
                        CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, &l3c_pmu->node);
-               irq_set_affinity_hint(l3c_pmu->irq, NULL);
        }
 
        return ret;
@@ -591,8 +590,6 @@ static int hisi_l3c_pmu_remove(struct platform_device *pdev)
        perf_pmu_unregister(&l3c_pmu->pmu);
        cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
                                            &l3c_pmu->node);
-       irq_set_affinity_hint(l3c_pmu->irq, NULL);
-
        return 0;
 }
 
index 14f23eb..83264ec 100644 (file)
@@ -333,7 +333,7 @@ static struct attribute *hisi_pa_pmu_identifier_attrs[] = {
        NULL
 };
 
-static struct attribute_group hisi_pa_pmu_identifier_group = {
+static const struct attribute_group hisi_pa_pmu_identifier_group = {
        .attrs = hisi_pa_pmu_identifier_attrs,
 };
 
@@ -436,7 +436,6 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
                dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
                cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
                                            &pa_pmu->node);
-               irq_set_affinity_hint(pa_pmu->irq, NULL);
                return ret;
        }
 
@@ -451,8 +450,6 @@ static int hisi_pa_pmu_remove(struct platform_device *pdev)
        perf_pmu_unregister(&pa_pmu->pmu);
        cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
                                            &pa_pmu->node);
-       irq_set_affinity_hint(pa_pmu->irq, NULL);
-
        return 0;
 }
 
index 13c68b5..a738aea 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * HiSilicon SoC Hardware event counters support
  *
- * Copyright (C) 2017 Hisilicon Limited
+ * Copyright (C) 2017 HiSilicon Limited
  * Author: Anurup M <anurup.m@huawei.com>
  *         Shaokun Zhang <zhangshaokun@hisilicon.com>
  *
@@ -488,7 +488,7 @@ int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
        hisi_pmu->on_cpu = cpu;
 
        /* Overflow interrupt also should use the same CPU */
-       WARN_ON(irq_set_affinity_hint(hisi_pmu->irq, cpumask_of(cpu)));
+       WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(cpu)));
 
        return 0;
 }
@@ -521,7 +521,7 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
        perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target);
        /* Use this CPU for event counting */
        hisi_pmu->on_cpu = target;
-       WARN_ON(irq_set_affinity_hint(hisi_pmu->irq, cpumask_of(target)));
+       WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(target)));
 
        return 0;
 }
index ea9d89b..7f5841d 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * HiSilicon SoC Hardware event counters support
  *
- * Copyright (C) 2017 Hisilicon Limited
+ * Copyright (C) 2017 HiSilicon Limited
  * Author: Anurup M <anurup.m@huawei.com>
  *         Shaokun Zhang <zhangshaokun@hisilicon.com>
  *
index 46be312..6aedc30 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * HiSilicon SLLC uncore Hardware event counters support
  *
- * Copyright (C) 2020 Hisilicon Limited
+ * Copyright (C) 2020 HiSilicon Limited
  * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
  *
  * This code is based on the uncore PMUs like arm-cci and arm-ccn.
@@ -366,7 +366,7 @@ static struct attribute *hisi_sllc_pmu_identifier_attrs[] = {
        NULL
 };
 
-static struct attribute_group hisi_sllc_pmu_identifier_group = {
+static const struct attribute_group hisi_sllc_pmu_identifier_group = {
        .attrs = hisi_sllc_pmu_identifier_attrs,
 };
 
@@ -465,7 +465,6 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
                dev_err(sllc_pmu->dev, "PMU register failed, ret = %d\n", ret);
                cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
                                            &sllc_pmu->node);
-               irq_set_affinity_hint(sllc_pmu->irq, NULL);
                return ret;
        }
 
@@ -481,8 +480,6 @@ static int hisi_sllc_pmu_remove(struct platform_device *pdev)
        perf_pmu_unregister(&sllc_pmu->pmu);
        cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
                                            &sllc_pmu->node);
-       irq_set_affinity_hint(sllc_pmu->irq, NULL);
-
        return 0;
 }
 
index fc54a80..5b093ba 100644 (file)
@@ -679,11 +679,8 @@ static ssize_t l2cache_pmu_event_show(struct device *dev,
        return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
 }
 
-#define L2CACHE_EVENT_ATTR(_name, _id)                                      \
-       (&((struct perf_pmu_events_attr[]) {                                 \
-               { .attr = __ATTR(_name, 0444, l2cache_pmu_event_show, NULL), \
-                 .id = _id, }                                               \
-       })[0].attr.attr)
+#define L2CACHE_EVENT_ATTR(_name, _id)                     \
+       PMU_EVENT_ATTR_ID(_name, l2cache_pmu_event_show, _id)
 
 static struct attribute *l2_cache_pmu_events[] = {
        L2CACHE_EVENT_ATTR(cycles, L2_EVENT_CYCLES),
@@ -869,14 +866,14 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data)
        irq = platform_get_irq(sdev, 0);
        if (irq < 0)
                return irq;
-       irq_set_status_flags(irq, IRQ_NOAUTOEN);
        cluster->irq = irq;
 
        cluster->l2cache_pmu = l2cache_pmu;
        cluster->on_cpu = -1;
 
        err = devm_request_irq(&pdev->dev, irq, l2_cache_handle_irq,
-                              IRQF_NOBALANCING | IRQF_NO_THREAD,
+                              IRQF_NOBALANCING | IRQF_NO_THREAD |
+                              IRQF_NO_AUTOEN,
                               "l2-cache-pmu", cluster);
        if (err) {
                dev_err(&pdev->dev,
index bba0780..1ff2ff6 100644 (file)
@@ -647,10 +647,7 @@ static ssize_t l3cache_pmu_event_show(struct device *dev,
 }
 
 #define L3CACHE_EVENT_ATTR(_name, _id)                                      \
-       (&((struct perf_pmu_events_attr[]) {                                 \
-               { .attr = __ATTR(_name, 0444, l3cache_pmu_event_show, NULL), \
-                 .id = _id, }                                               \
-       })[0].attr.attr)
+       PMU_EVENT_ATTR_ID(_name, l3cache_pmu_event_show, _id)
 
 static struct attribute *qcom_l3_cache_pmu_events[] = {
        L3CACHE_EVENT_ATTR(cycles, L3_EVENT_CYCLES),
@@ -670,15 +667,15 @@ static const struct attribute_group qcom_l3_cache_pmu_events_group = {
 
 /* cpumask */
 
-static ssize_t qcom_l3_cache_pmu_cpumask_show(struct device *dev,
-                                    struct device_attribute *attr, char *buf)
+static ssize_t cpumask_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
 {
        struct l3cache_pmu *l3pmu = to_l3cache_pmu(dev_get_drvdata(dev));
 
        return cpumap_print_to_pagebuf(true, buf, &l3pmu->cpumask);
 }
 
-static DEVICE_ATTR(cpumask, 0444, qcom_l3_cache_pmu_cpumask_show, NULL);
+static DEVICE_ATTR_RO(cpumask);
 
 static struct attribute *qcom_l3_cache_pmu_cpumask_attrs[] = {
        &dev_attr_cpumask.attr,
@@ -767,10 +764,8 @@ static int qcom_l3_cache_pmu_probe(struct platform_device *pdev)
 
        memrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        l3pmu->regs = devm_ioremap_resource(&pdev->dev, memrc);
-       if (IS_ERR(l3pmu->regs)) {
-               dev_err(&pdev->dev, "Can't map PMU @%pa\n", &memrc->start);
+       if (IS_ERR(l3pmu->regs))
                return PTR_ERR(l3pmu->regs);
-       }
 
        qcom_l3_cache__init(l3pmu);
 
index 06a6d56..fc1a376 100644 (file)
@@ -817,10 +817,8 @@ static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
        }
 
        base = devm_ioremap_resource(dev, &res);
-       if (IS_ERR(base)) {
-               dev_err(dev, "PMU type %d: Fail to map resource\n", type);
+       if (IS_ERR(base))
                return NULL;
-       }
 
        tx2_pmu = devm_kzalloc(dev, sizeof(*tx2_pmu), GFP_KERNEL);
        if (!tx2_pmu)
index ffe3bde..2b6d476 100644 (file)
@@ -278,17 +278,14 @@ static const struct attribute_group mc_pmu_v3_format_attr_group = {
 static ssize_t xgene_pmu_event_show(struct device *dev,
                                    struct device_attribute *attr, char *buf)
 {
-       struct dev_ext_attribute *eattr;
+       struct perf_pmu_events_attr *pmu_attr =
+               container_of(attr, struct perf_pmu_events_attr, attr);
 
-       eattr = container_of(attr, struct dev_ext_attribute, attr);
-       return sysfs_emit(buf, "config=0x%lx\n", (unsigned long) eattr->var);
+       return sysfs_emit(buf, "config=0x%llx\n", pmu_attr->id);
 }
 
 #define XGENE_PMU_EVENT_ATTR(_name, _config)           \
-       (&((struct dev_ext_attribute[]) {               \
-               { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_event_show, NULL), \
-                 .var = (void *) _config, }            \
-        })[0].attr.attr)
+       PMU_EVENT_ATTR_ID(_name, xgene_pmu_event_show, _config)
 
 static struct attribute *l3c_pmu_events_attrs[] = {
        XGENE_PMU_EVENT_ATTR(cycle-count,                       0x00),
@@ -604,15 +601,15 @@ static const struct attribute_group mc_pmu_v3_events_attr_group = {
 /*
  * sysfs cpumask attributes
  */
-static ssize_t xgene_pmu_cpumask_show(struct device *dev,
-                                     struct device_attribute *attr, char *buf)
+static ssize_t cpumask_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
 {
        struct xgene_pmu_dev *pmu_dev = to_pmu_dev(dev_get_drvdata(dev));
 
        return cpumap_print_to_pagebuf(true, buf, &pmu_dev->parent->cpu);
 }
 
-static DEVICE_ATTR(cpumask, S_IRUGO, xgene_pmu_cpumask_show, NULL);
+static DEVICE_ATTR_RO(cpumask);
 
 static struct attribute *xgene_pmu_cpumask_attrs[] = {
        &dev_attr_cpumask.attr,
index 6861489..7d1cabe 100644 (file)
@@ -63,6 +63,9 @@
 #define ARM_SMCCC_VERSION_1_0          0x10000
 #define ARM_SMCCC_VERSION_1_1          0x10001
 #define ARM_SMCCC_VERSION_1_2          0x10002
+#define ARM_SMCCC_VERSION_1_3          0x10003
+
+#define ARM_SMCCC_1_3_SVE_HINT         0x10000
 
 #define ARM_SMCCC_VERSION_FUNC_ID                                      \
        ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
@@ -216,6 +219,8 @@ u32 arm_smccc_get_version(void);
 
 void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit);
 
+extern u64 smccc_has_sve_hint;
+
 /**
  * struct arm_smccc_res - Result from SMC/HVC call
  * @a0-a3 result values from registers 0 to 3
@@ -227,6 +232,61 @@ struct arm_smccc_res {
        unsigned long a3;
 };
 
+#ifdef CONFIG_ARM64
+/**
+ * struct arm_smccc_1_2_regs - Arguments for or Results from SMC/HVC call
+ * @a0-a17 argument values from registers 0 to 17
+ */
+struct arm_smccc_1_2_regs {
+       unsigned long a0;
+       unsigned long a1;
+       unsigned long a2;
+       unsigned long a3;
+       unsigned long a4;
+       unsigned long a5;
+       unsigned long a6;
+       unsigned long a7;
+       unsigned long a8;
+       unsigned long a9;
+       unsigned long a10;
+       unsigned long a11;
+       unsigned long a12;
+       unsigned long a13;
+       unsigned long a14;
+       unsigned long a15;
+       unsigned long a16;
+       unsigned long a17;
+};
+
+/**
+ * arm_smccc_1_2_hvc() - make HVC calls
+ * @args: arguments passed via struct arm_smccc_1_2_regs
+ * @res: result values via struct arm_smccc_1_2_regs
+ *
+ * This function is used to make HVC calls following SMC Calling Convention
+ * v1.2 or above. The content of the supplied param are copied from the
+ * structure to registers prior to the HVC instruction. The return values
+ * are updated with the content from registers on return from the HVC
+ * instruction.
+ */
+asmlinkage void arm_smccc_1_2_hvc(const struct arm_smccc_1_2_regs *args,
+                                 struct arm_smccc_1_2_regs *res);
+
+/**
+ * arm_smccc_1_2_smc() - make SMC calls
+ * @args: arguments passed via struct arm_smccc_1_2_regs
+ * @res: result values via struct arm_smccc_1_2_regs
+ *
+ * This function is used to make SMC calls following SMC Calling Convention
+ * v1.2 or above. The content of the supplied param are copied from the
+ * structure to registers prior to the SMC instruction. The return values
+ * are updated with the content from registers on return from the SMC
+ * instruction.
+ */
+asmlinkage void arm_smccc_1_2_smc(const struct arm_smccc_1_2_regs *args,
+                                 struct arm_smccc_1_2_regs *res);
+#endif
+
 /**
  * struct arm_smccc_quirk - Contains quirk information
  * @id: quirk identification
@@ -241,6 +301,15 @@ struct arm_smccc_quirk {
 };
 
 /**
+ * __arm_smccc_sve_check() - Set the SVE hint bit when doing SMC calls
+ *
+ * Sets the SMCCC hint bit to indicate if there is live state in the SVE
+ * registers, this modifies x0 in place and should never be called from C
+ * code.
+ */
+asmlinkage unsigned long __arm_smccc_sve_check(unsigned long x0);
+
+/**
  * __arm_smccc_smc() - make SMC calls
  * @a0-a7: arguments passed in registers 0 to 7
  * @res: result values from registers 0 to 3
@@ -297,6 +366,20 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
 
 #endif
 
+/* nVHE hypervisor doesn't have a current thread so needs separate checks */
+#if defined(CONFIG_ARM64_SVE) && !defined(__KVM_NVHE_HYPERVISOR__)
+
+#define SMCCC_SVE_CHECK ALTERNATIVE("nop \n",  "bl __arm_smccc_sve_check \n", \
+                                   ARM64_SVE)
+#define smccc_sve_clobbers "x16", "x30", "cc",
+
+#else
+
+#define SMCCC_SVE_CHECK
+#define smccc_sve_clobbers
+
+#endif
+
 #define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
 
 #define __count_args(...)                                              \
@@ -364,7 +447,7 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
 
 #define ___constraints(count)                                          \
        : __constraint_read_ ## count                                   \
-       : "memory"
+       : smccc_sve_clobbers "memory"
 #define __constraints(count)   ___constraints(count)
 
 /*
@@ -379,7 +462,8 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
                register unsigned long r2 asm("r2");                    \
                register unsigned long r3 asm("r3");                    \
                __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
-               asm volatile(inst "\n" :                                \
+               asm volatile(SMCCC_SVE_CHECK                            \
+                            inst "\n" :                                \
                             "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) \
                             __constraints(__count_args(__VA_ARGS__))); \
                if (___res)                                             \
index 11da8af..e6102df 100644 (file)
@@ -53,8 +53,10 @@ struct vm_area_struct;
 #define ___GFP_HARDWALL                0x100000u
 #define ___GFP_THISNODE                0x200000u
 #define ___GFP_ACCOUNT         0x400000u
+#define ___GFP_ZEROTAGS                0x800000u
+#define ___GFP_SKIP_KASAN_POISON       0x1000000u
 #ifdef CONFIG_LOCKDEP
-#define ___GFP_NOLOCKDEP       0x800000u
+#define ___GFP_NOLOCKDEP       0x2000000u
 #else
 #define ___GFP_NOLOCKDEP       0
 #endif
@@ -229,16 +231,25 @@ struct vm_area_struct;
  * %__GFP_COMP address compound page metadata.
  *
  * %__GFP_ZERO returns a zeroed page on success.
+ *
+ * %__GFP_ZEROTAGS returns a page with zeroed memory tags on success, if
+ * __GFP_ZERO is set.
+ *
+ * %__GFP_SKIP_KASAN_POISON returns a page which does not need to be poisoned
+ * on deallocation. Typically used for userspace pages. Currently only has an
+ * effect in HW tags mode.
  */
 #define __GFP_NOWARN   ((__force gfp_t)___GFP_NOWARN)
 #define __GFP_COMP     ((__force gfp_t)___GFP_COMP)
 #define __GFP_ZERO     ((__force gfp_t)___GFP_ZERO)
+#define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS)
+#define __GFP_SKIP_KASAN_POISON        ((__force gfp_t)___GFP_SKIP_KASAN_POISON)
 
 /* Disable lockdep for GFP context tracking */
 #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
 
 /* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP))
+#define __GFP_BITS_SHIFT (25 + IS_ENABLED(CONFIG_LOCKDEP))
 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 
 /**
@@ -319,7 +330,8 @@ struct vm_area_struct;
 #define GFP_DMA                __GFP_DMA
 #define GFP_DMA32      __GFP_DMA32
 #define GFP_HIGHUSER   (GFP_USER | __GFP_HIGHMEM)
-#define GFP_HIGHUSER_MOVABLE   (GFP_HIGHUSER | __GFP_MOVABLE)
+#define GFP_HIGHUSER_MOVABLE   (GFP_HIGHUSER | __GFP_MOVABLE | \
+                        __GFP_SKIP_KASAN_POISON)
 #define GFP_TRANSHUGE_LIGHT    ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
                         __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
 #define GFP_TRANSHUGE  (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
index 832b49b..8c6e8e9 100644 (file)
@@ -152,28 +152,24 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
 }
 #endif
 
-#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
 /**
- * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
- * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
+ * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
  * @vma: The VMA the page is to be allocated for
  * @vaddr: The virtual address the page will be inserted into
  *
- * This function will allocate a page for a VMA but the caller is expected
- * to specify via movableflags whether the page will be movable in the
- * future or not
+ * This function will allocate a page for a VMA that the caller knows will
+ * be able to migrate in the future using move_pages() or reclaimed
  *
  * An architecture may override this function by defining
- * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
+ * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
  * implementation.
  */
 static inline struct page *
-__alloc_zeroed_user_highpage(gfp_t movableflags,
-                       struct vm_area_struct *vma,
-                       unsigned long vaddr)
+alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+                                  unsigned long vaddr)
 {
-       struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
-                       vma, vaddr);
+       struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
 
        if (page)
                clear_user_highpage(page, vaddr);
@@ -182,21 +178,6 @@ __alloc_zeroed_user_highpage(gfp_t movableflags,
 }
 #endif
 
-/**
- * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
- * @vma: The VMA the page is to be allocated for
- * @vaddr: The virtual address the page will be inserted into
- *
- * This function will allocate a page for a VMA that the caller knows will
- * be able to migrate in the future using move_pages() or reclaimed
- */
-static inline struct page *
-alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
-                                       unsigned long vaddr)
-{
-       return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
-}
-
 static inline void clear_highpage(struct page *page)
 {
        void *kaddr = kmap_atomic(page);
@@ -204,6 +185,14 @@ static inline void clear_highpage(struct page *page)
        kunmap_atomic(kaddr);
 }
 
+#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
+
+static inline void tag_clear_highpage(struct page *page)
+{
+}
+
+#endif
+
 /*
  * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
  * If we pass in a head page, we can zero up to the size of the compound page.
index 4777850..35a3742 100644 (file)
@@ -319,39 +319,8 @@ struct irq_affinity_desc {
 
 extern cpumask_var_t irq_default_affinity;
 
-/* Internal implementation. Use the helpers below */
-extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
-                             bool force);
-
-/**
- * irq_set_affinity - Set the irq affinity of a given irq
- * @irq:       Interrupt to set affinity
- * @cpumask:   cpumask
- *
- * Fails if cpumask does not contain an online CPU
- */
-static inline int
-irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
-{
-       return __irq_set_affinity(irq, cpumask, false);
-}
-
-/**
- * irq_force_affinity - Force the irq affinity of a given irq
- * @irq:       Interrupt to set affinity
- * @cpumask:   cpumask
- *
- * Same as irq_set_affinity, but without checking the mask against
- * online cpus.
- *
- * Solely for low level cpu hotplug code, where we need to make per
- * cpu interrupts affine before the cpu becomes online.
- */
-static inline int
-irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
-{
-       return __irq_set_affinity(irq, cpumask, true);
-}
+extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
+extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
 
 extern int irq_can_set_affinity(unsigned int irq);
 extern int irq_select_affinity(unsigned int irq);
index b1678a6..a1c7ce5 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef _LINUX_KASAN_H
 #define _LINUX_KASAN_H
 
+#include <linux/bug.h>
 #include <linux/static_key.h>
 #include <linux/types.h>
 
@@ -79,14 +80,6 @@ static inline void kasan_disable_current(void) {}
 
 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
 
-#ifdef CONFIG_KASAN
-
-struct kasan_cache {
-       int alloc_meta_offset;
-       int free_meta_offset;
-       bool is_kmalloc;
-};
-
 #ifdef CONFIG_KASAN_HW_TAGS
 
 DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
@@ -101,11 +94,14 @@ static inline bool kasan_has_integrated_init(void)
        return kasan_enabled();
 }
 
+void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
+void kasan_free_pages(struct page *page, unsigned int order);
+
 #else /* CONFIG_KASAN_HW_TAGS */
 
 static inline bool kasan_enabled(void)
 {
-       return true;
+       return IS_ENABLED(CONFIG_KASAN);
 }
 
 static inline bool kasan_has_integrated_init(void)
@@ -113,8 +109,30 @@ static inline bool kasan_has_integrated_init(void)
        return false;
 }
 
+static __always_inline void kasan_alloc_pages(struct page *page,
+                                             unsigned int order, gfp_t flags)
+{
+       /* Only available for integrated init. */
+       BUILD_BUG();
+}
+
+static __always_inline void kasan_free_pages(struct page *page,
+                                            unsigned int order)
+{
+       /* Only available for integrated init. */
+       BUILD_BUG();
+}
+
 #endif /* CONFIG_KASAN_HW_TAGS */
 
+#ifdef CONFIG_KASAN
+
+struct kasan_cache {
+       int alloc_meta_offset;
+       int free_meta_offset;
+       bool is_kmalloc;
+};
+
 slab_flags_t __kasan_never_merge(void);
 static __always_inline slab_flags_t kasan_never_merge(void)
 {
@@ -130,20 +148,20 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
                __kasan_unpoison_range(addr, size);
 }
 
-void __kasan_alloc_pages(struct page *page, unsigned int order, bool init);
-static __always_inline void kasan_alloc_pages(struct page *page,
+void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
+static __always_inline void kasan_poison_pages(struct page *page,
                                                unsigned int order, bool init)
 {
        if (kasan_enabled())
-               __kasan_alloc_pages(page, order, init);
+               __kasan_poison_pages(page, order, init);
 }
 
-void __kasan_free_pages(struct page *page, unsigned int order, bool init);
-static __always_inline void kasan_free_pages(struct page *page,
-                                               unsigned int order, bool init)
+void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
+static __always_inline void kasan_unpoison_pages(struct page *page,
+                                                unsigned int order, bool init)
 {
        if (kasan_enabled())
-               __kasan_free_pages(page, order, init);
+               __kasan_unpoison_pages(page, order, init);
 }
 
 void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
@@ -285,21 +303,15 @@ void kasan_restore_multi_shot(bool enabled);
 
 #else /* CONFIG_KASAN */
 
-static inline bool kasan_enabled(void)
-{
-       return false;
-}
-static inline bool kasan_has_integrated_init(void)
-{
-       return false;
-}
 static inline slab_flags_t kasan_never_merge(void)
 {
        return 0;
 }
 static inline void kasan_unpoison_range(const void *address, size_t size) {}
-static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {}
-static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {}
+static inline void kasan_poison_pages(struct page *page, unsigned int order,
+                                     bool init) {}
+static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
+                                       bool init) {}
 static inline void kasan_cache_create(struct kmem_cache *cache,
                                      unsigned int *size,
                                      slab_flags_t *flags) {}
index 04a34c0..40e2c50 100644 (file)
@@ -138,6 +138,9 @@ enum pageflags {
 #ifdef CONFIG_64BIT
        PG_arch_2,
 #endif
+#ifdef CONFIG_KASAN_HW_TAGS
+       PG_skip_kasan_poison,
+#endif
        __NR_PAGEFLAGS,
 
        /* Filesystems */
@@ -443,6 +446,12 @@ TESTCLEARFLAG(Young, young, PF_ANY)
 PAGEFLAG(Idle, idle, PF_ANY)
 #endif
 
+#ifdef CONFIG_KASAN_HW_TAGS
+PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD)
+#else
+PAGEFLAG_FALSE(SkipKASanPoison)
+#endif
+
 /*
  * PageReported() is used to track reported free pages within the Buddy
  * allocator. We can use the non-atomic version of the test and set
index f5a6a2f..2d510ad 100644 (file)
@@ -1576,6 +1576,12 @@ static struct perf_pmu_events_attr _var = {                                  \
        .event_str      = _str,                                             \
 };
 
+#define PMU_EVENT_ATTR_ID(_name, _show, _id)                           \
+       (&((struct perf_pmu_events_attr[]) {                            \
+               { .attr = __ATTR(_name, 0444, _show, NULL),             \
+                 .id = _id, }                                          \
+       })[0].attr.attr)
+
 #define PMU_FORMAT_ATTR(_name, _format)                                        \
 static ssize_t                                                         \
 _name##_show(struct device *dev,                                       \
index 629c7a0..390270e 100644 (file)
 #define IF_HAVE_PG_ARCH_2(flag,string)
 #endif
 
+#ifdef CONFIG_KASAN_HW_TAGS
+#define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string) ,{1UL << flag, string}
+#else
+#define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string)
+#endif
+
 #define __def_pageflag_names                                           \
        {1UL << PG_locked,              "locked"        },              \
        {1UL << PG_waiters,             "waiters"       },              \
@@ -112,7 +118,8 @@ IF_HAVE_PG_UNCACHED(PG_uncached,    "uncached"      )               \
 IF_HAVE_PG_HWPOISON(PG_hwpoison,       "hwpoison"      )               \
 IF_HAVE_PG_IDLE(PG_young,              "young"         )               \
 IF_HAVE_PG_IDLE(PG_idle,               "idle"          )               \
-IF_HAVE_PG_ARCH_2(PG_arch_2,           "arch_2"        )
+IF_HAVE_PG_ARCH_2(PG_arch_2,           "arch_2"        )               \
+IF_HAVE_PG_SKIP_KASAN_POISON(PG_skip_kasan_poison, "skip_kasan_poison")
 
 #define show_page_flags(flags)                                         \
        (flags) ? __print_flags(flags, "|",                             \
index 4c14356..a847dd2 100644 (file)
@@ -441,7 +441,8 @@ out_unlock:
        return ret;
 }
 
-int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
+static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,
+                             bool force)
 {
        struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
@@ -456,6 +457,36 @@ int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
        return ret;
 }
 
+/**
+ * irq_set_affinity - Set the irq affinity of a given irq
+ * @irq:       Interrupt to set affinity
+ * @cpumask:   cpumask
+ *
+ * Fails if cpumask does not contain an online CPU
+ */
+int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+       return __irq_set_affinity(irq, cpumask, false);
+}
+EXPORT_SYMBOL_GPL(irq_set_affinity);
+
+/**
+ * irq_force_affinity - Force the irq affinity of a given irq
+ * @irq:       Interrupt to set affinity
+ * @cpumask:   cpumask
+ *
+ * Same as irq_set_affinity, but without checking the mask against
+ * online cpus.
+ *
+ * Solely for low level cpu hotplug code, where we need to make per
+ * cpu interrupts affine before the cpu becomes online.
+ */
+int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+       return __irq_set_affinity(irq, cpumask, true);
+}
+EXPORT_SYMBOL_GPL(irq_force_affinity);
+
 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
 {
        unsigned long flags;
index 6bb87f2..0ecd293 100644 (file)
@@ -97,7 +97,7 @@ slab_flags_t __kasan_never_merge(void)
        return 0;
 }
 
-void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
+void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
 {
        u8 tag;
        unsigned long i;
@@ -111,7 +111,7 @@ void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
        kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
 }
 
-void __kasan_free_pages(struct page *page, unsigned int order, bool init)
+void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
 {
        if (likely(!PageHighMem(page)))
                kasan_poison(page_address(page), PAGE_SIZE << order,
index 4004388..ed5e5b8 100644 (file)
@@ -238,6 +238,38 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
        return &alloc_meta->free_track[0];
 }
 
+void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags)
+{
+       /*
+        * This condition should match the one in post_alloc_hook() in
+        * page_alloc.c.
+        */
+       bool init = !want_init_on_free() && want_init_on_alloc(flags);
+
+       if (flags & __GFP_SKIP_KASAN_POISON)
+               SetPageSkipKASanPoison(page);
+
+       if (flags & __GFP_ZEROTAGS) {
+               int i;
+
+               for (i = 0; i != 1 << order; ++i)
+                       tag_clear_highpage(page + i);
+       } else {
+               kasan_unpoison_pages(page, order, init);
+       }
+}
+
+void kasan_free_pages(struct page *page, unsigned int order)
+{
+       /*
+        * This condition should match the one in free_pages_prepare() in
+        * page_alloc.c.
+        */
+       bool init = want_init_on_free();
+
+       kasan_poison_pages(page, order, init);
+}
+
 #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
 
 void kasan_set_tagging_report_once(bool state)
index 9df8e7f..9362938 100644 (file)
@@ -207,3 +207,10 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
 
        return &alloc_meta->free_track[i];
 }
+
+void kasan_tag_mismatch(unsigned long addr, unsigned long access_info,
+                       unsigned long ret_ip)
+{
+       kasan_report(addr, 1 << (access_info & 0xf), access_info & 0x10,
+                    ret_ip);
+}
index a258cf4..0b8afbe 100644 (file)
@@ -106,7 +106,8 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
        if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
                kasan_slab_free_mempool(element);
        else if (pool->alloc == mempool_alloc_pages)
-               kasan_free_pages(element, (unsigned long)pool->pool_data, false);
+               kasan_poison_pages(element, (unsigned long)pool->pool_data,
+                                  false);
 }
 
 static void kasan_unpoison_element(mempool_t *pool, void *element)
@@ -114,7 +115,8 @@ static void kasan_unpoison_element(mempool_t *pool, void *element)
        if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
                kasan_unpoison_range(element, __ksize(element));
        else if (pool->alloc == mempool_alloc_pages)
-               kasan_alloc_pages(element, (unsigned long)pool->pool_data, false);
+               kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
+                                    false);
 }
 
 static __always_inline void add_element(mempool_t *pool, void *element)
index 0422058..e7af86e 100644 (file)
@@ -382,7 +382,7 @@ int page_group_by_mobility_disabled __read_mostly;
 static DEFINE_STATIC_KEY_TRUE(deferred_pages);
 
 /*
- * Calling kasan_free_pages() only after deferred memory initialization
+ * Calling kasan_poison_pages() only after deferred memory initialization
  * has completed. Poisoning pages during deferred memory init will greatly
  * lengthen the process and cause problem in large memory systems as the
  * deferred pages initialization is done with interrupt disabled.
@@ -394,15 +394,12 @@ static DEFINE_STATIC_KEY_TRUE(deferred_pages);
  * on-demand allocation and then freed again before the deferred pages
  * initialization is done, but this is not likely to happen.
  */
-static inline void kasan_free_nondeferred_pages(struct page *page, int order,
-                                               bool init, fpi_t fpi_flags)
+static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
 {
-       if (static_branch_unlikely(&deferred_pages))
-               return;
-       if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
-                       (fpi_flags & FPI_SKIP_KASAN_POISON))
-               return;
-       kasan_free_pages(page, order, init);
+       return static_branch_unlikely(&deferred_pages) ||
+              (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
+               (fpi_flags & FPI_SKIP_KASAN_POISON)) ||
+              PageSkipKASanPoison(page);
 }
 
 /* Returns true if the struct page for the pfn is uninitialised */
@@ -453,13 +450,11 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
        return false;
 }
 #else
-static inline void kasan_free_nondeferred_pages(struct page *page, int order,
-                                               bool init, fpi_t fpi_flags)
+static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
 {
-       if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
-                       (fpi_flags & FPI_SKIP_KASAN_POISON))
-               return;
-       kasan_free_pages(page, order, init);
+       return (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
+               (fpi_flags & FPI_SKIP_KASAN_POISON)) ||
+              PageSkipKASanPoison(page);
 }
 
 static inline bool early_page_uninitialised(unsigned long pfn)
@@ -1226,10 +1221,16 @@ out:
        return ret;
 }
 
-static void kernel_init_free_pages(struct page *page, int numpages)
+static void kernel_init_free_pages(struct page *page, int numpages, bool zero_tags)
 {
        int i;
 
+       if (zero_tags) {
+               for (i = 0; i < numpages; i++)
+                       tag_clear_highpage(page + i);
+               return;
+       }
+
        /* s390's use of memset() could override KASAN redzones. */
        kasan_disable_current();
        for (i = 0; i < numpages; i++) {
@@ -1245,7 +1246,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
                        unsigned int order, bool check_free, fpi_t fpi_flags)
 {
        int bad = 0;
-       bool init;
+       bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags);
 
        VM_BUG_ON_PAGE(PageTail(page), page);
 
@@ -1314,10 +1315,17 @@ static __always_inline bool free_pages_prepare(struct page *page,
         * With hardware tag-based KASAN, memory tags must be set before the
         * page becomes unavailable via debug_pagealloc or arch_free_page.
         */
-       init = want_init_on_free();
-       if (init && !kasan_has_integrated_init())
-               kernel_init_free_pages(page, 1 << order);
-       kasan_free_nondeferred_pages(page, order, init, fpi_flags);
+       if (kasan_has_integrated_init()) {
+               if (!skip_kasan_poison)
+                       kasan_free_pages(page, order);
+       } else {
+               bool init = want_init_on_free();
+
+               if (init)
+                       kernel_init_free_pages(page, 1 << order, false);
+               if (!skip_kasan_poison)
+                       kasan_poison_pages(page, order, init);
+       }
 
        /*
         * arch_free_page() can make the page's contents inaccessible.  s390
@@ -2324,8 +2332,6 @@ static bool check_new_pages(struct page *page, unsigned int order)
 inline void post_alloc_hook(struct page *page, unsigned int order,
                                gfp_t gfp_flags)
 {
-       bool init;
-
        set_page_private(page, 0);
        set_page_refcounted(page);
 
@@ -2344,10 +2350,16 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
         * kasan_alloc_pages and kernel_init_free_pages must be
         * kept together to avoid discrepancies in behavior.
         */
-       init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
-       kasan_alloc_pages(page, order, init);
-       if (init && !kasan_has_integrated_init())
-               kernel_init_free_pages(page, 1 << order);
+       if (kasan_has_integrated_init()) {
+               kasan_alloc_pages(page, order, gfp_flags);
+       } else {
+               bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
+
+               kasan_unpoison_pages(page, order, init);
+               if (init)
+                       kernel_init_free_pages(page, 1 << order,
+                                              gfp_flags & __GFP_ZEROTAGS);
+       }
 
        set_page_owner(page, order, gfp_flags);
 }
index 3d79190..801c415 100644 (file)
@@ -50,6 +50,7 @@ endif
 CFLAGS_KASAN := -fsanitize=kernel-hwaddress \
                $(call cc-param,hwasan-instrument-stack=$(stack_enable)) \
                $(call cc-param,hwasan-use-short-granules=0) \
+               $(call cc-param,hwasan-inline-all-checks=0) \
                $(instrumentation_flags)
 
 endif # CONFIG_KASAN_SW_TAGS
index 45e8aa3..cb55878 100755 (executable)
@@ -7,7 +7,8 @@ trap "rm -f $tmp_file.o $tmp_file $tmp_file.bin" EXIT
 cat << "END" | $CC -c -x c - -o $tmp_file.o >/dev/null 2>&1
 void *p = &p;
 END
-$LD $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr -o $tmp_file
+$LD $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr \
+  --use-android-relr-tags -o $tmp_file
 
 # Despite printing an error message, GNU nm still exits with exit code 0 if it
 # sees a relr section. So we need to check that nothing is printed to stderr.
index b29cbc6..76e1385 100644 (file)
@@ -25,7 +25,7 @@ int main(int argc, char **argv)
        ksft_set_plan(2);
 
        if (!(getauxval(AT_HWCAP) & HWCAP_SVE))
-               ksft_exit_skip("SVE not available");
+               ksft_exit_skip("SVE not available\n");
 
        /*
         * Enumerate up to SVE_VQ_MAX vector lengths