From: Will Deacon Date: Thu, 24 Jun 2021 13:03:24 +0000 (+0100) Subject: Merge branch 'for-next/insn' into for-next/core X-Git-Tag: v5.15~891^2~9 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=181a126979307a0192f41a4a1fac235d6f4ac9f0;p=platform%2Fkernel%2Flinux-starfive.git Merge branch 'for-next/insn' into for-next/core Refactoring of our instruction decoding routines and addition of some missing encodings. * for-next/insn: arm64: insn: avoid circular include dependency arm64: insn: move AARCH64_INSN_SIZE into arm64: insn: decouple patching from insn code arm64: insn: Add load/store decoding helpers arm64: insn: Add some opcodes to instruction decoder arm64: insn: Add barrier encodings arm64: insn: Add SVE instruction class arm64: Move instruction encoder/decoder under lib/ arm64: Move aarch32 condition check functions arm64: Move patching utilities out of instruction encoding/decoding --- 181a126979307a0192f41a4a1fac235d6f4ac9f0 diff --cc arch/arm64/kernel/Makefile index de43420,03e8311..cce3085 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@@ -28,7 -22,7 +28,8 @@@ obj-y := debug-monitors.o entry.o irq return_address.o cpuinfo.o cpu_errata.o \ cpufeature.o alternative.o cacheinfo.o \ smp.o smp_spin_table.o topology.o smccc-call.o \ - syscall.o proton-pack.o idreg-override.o idle.o - syscall.o proton-pack.o idreg-override.o patching.o ++ syscall.o proton-pack.o idreg-override.o idle.o \ ++ patching.o targets += efi-entry.o diff --cc arch/arm64/kernel/patching.c index 0000000,9a6edb9..771f543 mode 000000,100644..100644 --- a/arch/arm64/kernel/patching.c +++ b/arch/arm64/kernel/patching.c @@@ -1,0 -1,150 +1,150 @@@ + // SPDX-License-Identifier: GPL-2.0-only + #include + #include + #include + #include + #include + #include + + #include + #include + #include + #include + #include + #include + + static DEFINE_RAW_SPINLOCK(patch_lock); + + static bool is_exit_text(unsigned long addr) + { + /* discarded with init text/data */ + return system_state < SYSTEM_RUNNING && + addr >= (unsigned long)__exittext_begin && + addr < (unsigned long)__exittext_end; + } + + static bool is_image_text(unsigned long addr) + { + return core_kernel_text(addr) || is_exit_text(addr); + } + + static void __kprobes *patch_map(void *addr, int fixmap) + { + unsigned long uintaddr = (uintptr_t) addr; + bool image = is_image_text(uintaddr); + struct page *page; + + if (image) + page = phys_to_page(__pa_symbol(addr)); + else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) + page = vmalloc_to_page(addr); + else + return addr; + + BUG_ON(!page); + return (void *)set_fixmap_offset(fixmap, page_to_phys(page) + + (uintaddr & ~PAGE_MASK)); + } + + static void __kprobes patch_unmap(int fixmap) + { + clear_fixmap(fixmap); + } + /* + * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always + * little-endian. + */ + int __kprobes aarch64_insn_read(void *addr, u32 *insnp) + { + int ret; + __le32 val; + + ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE); + if (!ret) + *insnp = le32_to_cpu(val); + + return ret; + } + + static int __kprobes __aarch64_insn_write(void *addr, __le32 insn) + { + void *waddr = addr; + unsigned long flags = 0; + int ret; + + raw_spin_lock_irqsave(&patch_lock, flags); + waddr = patch_map(addr, FIX_TEXT_POKE0); + + ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE); + + patch_unmap(FIX_TEXT_POKE0); + raw_spin_unlock_irqrestore(&patch_lock, flags); + + return ret; + } + + int __kprobes aarch64_insn_write(void *addr, u32 insn) + { + return __aarch64_insn_write(addr, cpu_to_le32(insn)); + } + + int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) + { + u32 *tp = addr; + int ret; + + /* A64 instructions must be word aligned */ + if ((uintptr_t)tp & 0x3) + return -EINVAL; + + ret = aarch64_insn_write(tp, insn); + if (ret == 0) - __flush_icache_range((uintptr_t)tp, ++ caches_clean_inval_pou((uintptr_t)tp, + (uintptr_t)tp + AARCH64_INSN_SIZE); + + return ret; + } + + struct aarch64_insn_patch { + void **text_addrs; + u32 *new_insns; + int insn_cnt; + atomic_t cpu_count; + }; + + static int __kprobes aarch64_insn_patch_text_cb(void *arg) + { + int i, ret = 0; + struct aarch64_insn_patch *pp = arg; + + /* The first CPU becomes master */ + if (atomic_inc_return(&pp->cpu_count) == 1) { + for (i = 0; ret == 0 && i < pp->insn_cnt; i++) + ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i], + pp->new_insns[i]); + /* Notify other processors with an additional increment. */ + atomic_inc(&pp->cpu_count); + } else { + while (atomic_read(&pp->cpu_count) <= num_online_cpus()) + cpu_relax(); + isb(); + } + + return ret; + } + + int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) + { + struct aarch64_insn_patch patch = { + .text_addrs = addrs, + .new_insns = insns, + .insn_cnt = cnt, + .cpu_count = ATOMIC_INIT(0), + }; + + if (cnt <= 0) + return -EINVAL; + + return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch, + cpu_online_mask); + } diff --cc arch/arm64/kernel/traps.c index af94199,8f66072..b03e383 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@@ -45,6 -46,111 +46,104 @@@ #include #include + static bool __kprobes __check_eq(unsigned long pstate) + { + return (pstate & PSR_Z_BIT) != 0; + } + + static bool __kprobes __check_ne(unsigned long pstate) + { + return (pstate & PSR_Z_BIT) == 0; + } + + static bool __kprobes __check_cs(unsigned long pstate) + { + return (pstate & PSR_C_BIT) != 0; + } + + static bool __kprobes __check_cc(unsigned long pstate) + { + return (pstate & PSR_C_BIT) == 0; + } + + static bool __kprobes __check_mi(unsigned long pstate) + { + return (pstate & PSR_N_BIT) != 0; + } + + static bool __kprobes __check_pl(unsigned long pstate) + { + return (pstate & PSR_N_BIT) == 0; + } + + static bool __kprobes __check_vs(unsigned long pstate) + { + return (pstate & PSR_V_BIT) != 0; + } + + static bool __kprobes __check_vc(unsigned long pstate) + { + return (pstate & PSR_V_BIT) == 0; + } + + static bool __kprobes __check_hi(unsigned long pstate) + { + pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ + return (pstate & PSR_C_BIT) != 0; + } + + static bool __kprobes __check_ls(unsigned long pstate) + { + pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ + return (pstate & PSR_C_BIT) == 0; + } + + static bool __kprobes __check_ge(unsigned long pstate) + { + pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */ + return (pstate & PSR_N_BIT) == 0; + } + + static bool __kprobes __check_lt(unsigned long pstate) + { + pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */ + return (pstate & PSR_N_BIT) != 0; + } + + static bool __kprobes __check_gt(unsigned long pstate) + { + /*PSR_N_BIT ^= PSR_V_BIT */ + unsigned long temp = pstate ^ (pstate << 3); + + temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */ + return (temp & PSR_N_BIT) == 0; + } + + static bool __kprobes __check_le(unsigned long pstate) + { + /*PSR_N_BIT ^= PSR_V_BIT */ + unsigned long temp = pstate ^ (pstate << 3); + + temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */ + return (temp & PSR_N_BIT) != 0; + } + + static bool __kprobes __check_al(unsigned long pstate) + { + return true; + } + + /* + * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that + * it behaves identically to 0b1110 ("al"). + */ + pstate_check_t * const aarch32_opcode_cond_checks[16] = { + __check_eq, __check_ne, __check_cs, __check_cc, + __check_mi, __check_pl, __check_vs, __check_vc, + __check_hi, __check_ls, __check_ge, __check_lt, + __check_gt, __check_le, __check_al, __check_al + }; + -static const char *handler[] = { - "Synchronous Abort", - "IRQ", - "FIQ", - "Error" -}; - int show_unhandled_signals = 0; static void dump_kernel_instr(const char *lvl, struct pt_regs *regs) diff --cc arch/arm64/lib/Makefile index 01c596a,9cd8390..c97b10d --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@@ -1,9 -1,9 +1,9 @@@ # SPDX-License-Identifier: GPL-2.0 lib-y := clear_user.o delay.o copy_from_user.o \ copy_to_user.o copy_in_user.o copy_page.o \ - clear_page.o csum.o memchr.o memcpy.o \ + clear_page.o csum.o insn.o memchr.o memcpy.o \ - memmove.o memset.o memcmp.o strcmp.o strncmp.o \ - strlen.o strnlen.o strchr.o strrchr.o tishift.o + memset.o memcmp.o strcmp.o strncmp.o strlen.o \ + strnlen.o strchr.o strrchr.o tishift.o ifeq ($(CONFIG_KERNEL_MODE_NEON), y) obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o