From: Linus Torvalds Date: Wed, 6 Jun 2018 20:49:25 +0000 (-0700) Subject: Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm X-Git-Tag: v4.19~881 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=311da4975894aab7a4bb94aa83f38f052d7ffda4;p=platform%2Fkernel%2Flinux-rpi3.git Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm Pull ARM updates from Russell King: - Initial round of Spectre variant 1 and variant 2 fixes for 32-bit ARM - Clang support improvements - nommu updates for v8 MPU - enable ARM_MODULE_PLTS by default to avoid problems loading modules with larger kernels - vmlinux.lds and dma-mapping cleanups * 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (31 commits) ARM: spectre-v1: fix syscall entry ARM: spectre-v1: add array_index_mask_nospec() implementation ARM: spectre-v1: add speculation barrier (csdb) macros ARM: KVM: report support for SMCCC_ARCH_WORKAROUND_1 ARM: KVM: Add SMCCC_ARCH_WORKAROUND_1 fast handling ARM: spectre-v2: KVM: invalidate icache on guest exit for Brahma B15 ARM: KVM: invalidate icache on guest exit for Cortex-A15 ARM: KVM: invalidate BTB on guest exit for Cortex-A12/A17 ARM: spectre-v2: warn about incorrect context switching functions ARM: spectre-v2: add firmware based hardening ARM: spectre-v2: harden user aborts in kernel space ARM: spectre-v2: add Cortex A8 and A15 validation of the IBE bit ARM: spectre-v2: harden branch predictor on context switches ARM: spectre: add Kconfig symbol for CPUs vulnerable to Spectre ARM: bugs: add support for per-processor bug checking ARM: bugs: hook processor bug checking into SMP and suspend paths ARM: bugs: prepare processor bug infrastructure ARM: add more CPU part numbers for Cortex and Brahma B15 CPUs ARM: 8774/1: remove no-op macro VMLINUX_SYMBOL() ARM: 8773/1: amba: Export amba_bustype ... --- 311da4975894aab7a4bb94aa83f38f052d7ffda4 diff --cc arch/arm/include/asm/kvm_mmu.h index f675162,cf2eae5..c94d291 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@@ -309,25 -309,30 +309,46 @@@ static inline unsigned int kvm_get_vmid return 8; } +/* + * We are not in the kvm->srcu critical section most of the time, so we take + * the SRCU read lock here. Since we copy the data from the user page, we + * can immediately drop the lock again. + */ +static inline int kvm_read_guest_lock(struct kvm *kvm, + gpa_t gpa, void *data, unsigned long len) +{ + int srcu_idx = srcu_read_lock(&kvm->srcu); + int ret = kvm_read_guest(kvm, gpa, data, len); + + srcu_read_unlock(&kvm->srcu, srcu_idx); + + return ret; +} + static inline void *kvm_get_hyp_vector(void) { - return kvm_ksym_ref(__kvm_hyp_vector); + switch(read_cpuid_part()) { + #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR + case ARM_CPU_PART_CORTEX_A12: + case ARM_CPU_PART_CORTEX_A17: + { + extern char __kvm_hyp_vector_bp_inv[]; + return kvm_ksym_ref(__kvm_hyp_vector_bp_inv); + } + + case ARM_CPU_PART_BRAHMA_B15: + case ARM_CPU_PART_CORTEX_A15: + { + extern char __kvm_hyp_vector_ic_inv[]; + return kvm_ksym_ref(__kvm_hyp_vector_ic_inv); + } + #endif + default: + { + extern char __kvm_hyp_vector[]; + return kvm_ksym_ref(__kvm_hyp_vector); + } + } } static inline int kvm_map_vectors(void) diff --cc arch/arm/mm/fault.c index 3203454,3b1ba00..84becc9 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@@ -163,8 -163,9 +163,11 @@@ __do_user_fault(struct task_struct *tsk { struct siginfo si; + if (addr > TASK_SIZE) + harden_branch_predictor(); + + clear_siginfo(&si); + #ifdef CONFIG_DEBUG_USER if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) || ((user_debug & UDBG_BUS) && (sig == SIGBUS))) { diff --cc drivers/amba/bus.c index 3ece711,12283bd..41b7064 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@@ -202,9 -194,10 +202,10 @@@ struct bus_type amba_bustype = .dev_groups = amba_dev_groups, .match = amba_match, .uevent = amba_uevent, + .dma_configure = platform_dma_configure, .pm = &amba_pm, - .force_dma = true, }; + EXPORT_SYMBOL_GPL(amba_bustype); static int __init amba_init(void) {